summaryrefslogtreecommitdiffstats
path: root/compiler
diff options
context:
space:
mode:
Diffstat (limited to 'compiler')
-rw-r--r--compiler/rustc/build.rs2
-rw-r--r--compiler/rustc_abi/src/layout.rs73
-rw-r--r--compiler/rustc_abi/src/lib.rs54
-rw-r--r--compiler/rustc_apfloat/Cargo.toml8
-rw-r--r--compiler/rustc_apfloat/src/ieee.rs2757
-rw-r--r--compiler/rustc_apfloat/src/lib.rs695
-rw-r--r--compiler/rustc_apfloat/src/ppc.rs434
-rw-r--r--compiler/rustc_apfloat/tests/ieee.rs3301
-rw-r--r--compiler/rustc_apfloat/tests/ppc.rs530
-rw-r--r--compiler/rustc_arena/src/lib.rs88
-rw-r--r--compiler/rustc_ast/src/ast.rs54
-rw-r--r--compiler/rustc_ast/src/attr/mod.rs51
-rw-r--r--compiler/rustc_ast/src/expand/allocator.rs22
-rw-r--r--compiler/rustc_ast/src/format.rs6
-rw-r--r--compiler/rustc_ast/src/mut_visit.rs15
-rw-r--r--compiler/rustc_ast/src/token.rs21
-rw-r--r--compiler/rustc_ast/src/tokenstream.rs134
-rw-r--r--compiler/rustc_ast/src/util/comments.rs2
-rw-r--r--compiler/rustc_ast/src/util/parser.rs2
-rw-r--r--compiler/rustc_ast/src/visit.rs13
-rw-r--r--compiler/rustc_ast_lowering/src/asm.rs8
-rw-r--r--compiler/rustc_ast_lowering/src/errors.rs17
-rw-r--r--compiler/rustc_ast_lowering/src/expr.rs18
-rw-r--r--compiler/rustc_ast_lowering/src/item.rs172
-rw-r--r--compiler/rustc_ast_lowering/src/lib.rs692
-rw-r--r--compiler/rustc_ast_lowering/src/lifetime_collector.rs25
-rw-r--r--compiler/rustc_ast_lowering/src/pat.rs3
-rw-r--r--compiler/rustc_ast_lowering/src/path.rs13
-rw-r--r--compiler/rustc_ast_passes/messages.ftl7
-rw-r--r--compiler/rustc_ast_passes/src/ast_validation.rs129
-rw-r--r--compiler/rustc_ast_passes/src/errors.rs30
-rw-r--r--compiler/rustc_ast_passes/src/feature_gate.rs30
-rw-r--r--compiler/rustc_ast_pretty/src/pprust/state.rs17
-rw-r--r--compiler/rustc_ast_pretty/src/pprust/state/expr.rs10
-rw-r--r--compiler/rustc_ast_pretty/src/pprust/state/item.rs35
-rw-r--r--compiler/rustc_attr/src/builtin.rs40
-rw-r--r--compiler/rustc_borrowck/src/borrowck_errors.rs46
-rw-r--r--compiler/rustc_borrowck/src/constraint_generation.rs10
-rw-r--r--compiler/rustc_borrowck/src/consumers.rs2
-rw-r--r--compiler/rustc_borrowck/src/dataflow.rs21
-rw-r--r--compiler/rustc_borrowck/src/diagnostics/conflict_errors.rs210
-rw-r--r--compiler/rustc_borrowck/src/diagnostics/explain_borrow.rs8
-rw-r--r--compiler/rustc_borrowck/src/diagnostics/mod.rs38
-rw-r--r--compiler/rustc_borrowck/src/diagnostics/move_errors.rs14
-rw-r--r--compiler/rustc_borrowck/src/diagnostics/mutability_errors.rs101
-rw-r--r--compiler/rustc_borrowck/src/diagnostics/region_errors.rs75
-rw-r--r--compiler/rustc_borrowck/src/diagnostics/region_name.rs49
-rw-r--r--compiler/rustc_borrowck/src/diagnostics/var_name.rs4
-rw-r--r--compiler/rustc_borrowck/src/facts.rs2
-rw-r--r--compiler/rustc_borrowck/src/invalidation.rs3
-rw-r--r--compiler/rustc_borrowck/src/lib.rs35
-rw-r--r--compiler/rustc_borrowck/src/nll.rs6
-rw-r--r--compiler/rustc_borrowck/src/path_utils.rs14
-rw-r--r--compiler/rustc_borrowck/src/places_conflict.rs143
-rw-r--r--compiler/rustc_borrowck/src/region_infer/dump_mir.rs11
-rw-r--r--compiler/rustc_borrowck/src/region_infer/graphviz.rs4
-rw-r--r--compiler/rustc_borrowck/src/region_infer/mod.rs61
-rw-r--r--compiler/rustc_borrowck/src/region_infer/opaque_types.rs98
-rw-r--r--compiler/rustc_borrowck/src/region_infer/values.rs6
-rw-r--r--compiler/rustc_borrowck/src/renumber.rs8
-rw-r--r--compiler/rustc_borrowck/src/session_diagnostics.rs2
-rw-r--r--compiler/rustc_borrowck/src/type_check/canonical.rs6
-rw-r--r--compiler/rustc_borrowck/src/type_check/constraint_conversion.rs10
-rw-r--r--compiler/rustc_borrowck/src/type_check/liveness/polonius.rs6
-rw-r--r--compiler/rustc_borrowck/src/type_check/liveness/trace.rs8
-rw-r--r--compiler/rustc_borrowck/src/type_check/mod.rs158
-rw-r--r--compiler/rustc_borrowck/src/type_check/relate_tys.rs6
-rw-r--r--compiler/rustc_borrowck/src/universal_regions.rs161
-rw-r--r--compiler/rustc_builtin_macros/messages.ftl6
-rw-r--r--compiler/rustc_builtin_macros/src/asm.rs21
-rw-r--r--compiler/rustc_builtin_macros/src/assert.rs7
-rw-r--r--compiler/rustc_builtin_macros/src/assert/context.rs9
-rw-r--r--compiler/rustc_builtin_macros/src/cfg.rs2
-rw-r--r--compiler/rustc_builtin_macros/src/cfg_eval.rs3
-rw-r--r--compiler/rustc_builtin_macros/src/deriving/clone.rs8
-rw-r--r--compiler/rustc_builtin_macros/src/deriving/cmp/ord.rs4
-rw-r--r--compiler/rustc_builtin_macros/src/deriving/cmp/partial_ord.rs4
-rw-r--r--compiler/rustc_builtin_macros/src/deriving/decodable.rs2
-rw-r--r--compiler/rustc_builtin_macros/src/deriving/encodable.rs2
-rw-r--r--compiler/rustc_builtin_macros/src/deriving/generic/mod.rs27
-rw-r--r--compiler/rustc_builtin_macros/src/edition_panic.rs3
-rw-r--r--compiler/rustc_builtin_macros/src/env.rs46
-rw-r--r--compiler/rustc_builtin_macros/src/errors.rs52
-rw-r--r--compiler/rustc_builtin_macros/src/format.rs46
-rw-r--r--compiler/rustc_builtin_macros/src/format_foreign.rs17
-rw-r--r--compiler/rustc_builtin_macros/src/global_allocator.rs53
-rw-r--r--compiler/rustc_builtin_macros/src/proc_macro_harness.rs11
-rw-r--r--compiler/rustc_builtin_macros/src/source_util.rs2
-rw-r--r--compiler/rustc_builtin_macros/src/standard_library_imports.rs31
-rw-r--r--compiler/rustc_builtin_macros/src/test.rs1
-rw-r--r--compiler/rustc_builtin_macros/src/test_harness.rs35
-rw-r--r--compiler/rustc_codegen_cranelift/.github/workflows/audit.yml19
-rw-r--r--compiler/rustc_codegen_cranelift/.github/workflows/main.yml10
-rw-r--r--compiler/rustc_codegen_cranelift/Cargo.lock84
-rw-r--r--compiler/rustc_codegen_cranelift/Cargo.toml14
-rw-r--r--compiler/rustc_codegen_cranelift/Readme.md10
-rw-r--r--compiler/rustc_codegen_cranelift/build_system/abi_cafe.rs10
-rw-r--r--compiler/rustc_codegen_cranelift/build_system/bench.rs69
-rw-r--r--compiler/rustc_codegen_cranelift/build_system/build_backend.rs18
-rw-r--r--compiler/rustc_codegen_cranelift/build_system/build_sysroot.rs41
-rw-r--r--compiler/rustc_codegen_cranelift/build_system/main.rs5
-rw-r--r--compiler/rustc_codegen_cranelift/build_system/path.rs2
-rw-r--r--compiler/rustc_codegen_cranelift/build_system/prepare.rs15
-rw-r--r--compiler/rustc_codegen_cranelift/build_system/shared_utils.rs26
-rw-r--r--compiler/rustc_codegen_cranelift/build_system/tests.rs59
-rw-r--r--compiler/rustc_codegen_cranelift/build_system/usage.txt2
-rw-r--r--compiler/rustc_codegen_cranelift/build_system/utils.rs61
-rw-r--r--compiler/rustc_codegen_cranelift/config.txt1
-rw-r--r--compiler/rustc_codegen_cranelift/example/float-minmax-pass.rs2
-rw-r--r--compiler/rustc_codegen_cranelift/example/issue-59326.rs27
-rw-r--r--compiler/rustc_codegen_cranelift/example/mini_core.rs6
-rw-r--r--compiler/rustc_codegen_cranelift/example/mini_core_hello_world.rs2
-rw-r--r--compiler/rustc_codegen_cranelift/example/std_example.rs32
-rw-r--r--compiler/rustc_codegen_cranelift/patches/0001-portable-simd-Allow-internal-features.patch24
-rw-r--r--compiler/rustc_codegen_cranelift/patches/0027-coretests-128bit-atomic-operations.patch12
-rw-r--r--compiler/rustc_codegen_cranelift/patches/0027-stdlib-128bit-atomic-operations.patch15
-rw-r--r--compiler/rustc_codegen_cranelift/patches/stdlib-lock.toml31
-rw-r--r--compiler/rustc_codegen_cranelift/rust-toolchain2
-rw-r--r--compiler/rustc_codegen_cranelift/scripts/cargo-clif.rs80
-rw-r--r--compiler/rustc_codegen_cranelift/scripts/setup_rust_fork.sh5
-rwxr-xr-xcompiler/rustc_codegen_cranelift/scripts/test_rustc_tests.sh14
-rw-r--r--compiler/rustc_codegen_cranelift/src/abi/comments.rs9
-rw-r--r--compiler/rustc_codegen_cranelift/src/abi/mod.rs29
-rw-r--r--compiler/rustc_codegen_cranelift/src/allocator.rs4
-rw-r--r--compiler/rustc_codegen_cranelift/src/base.rs30
-rw-r--r--compiler/rustc_codegen_cranelift/src/common.rs2
-rw-r--r--compiler/rustc_codegen_cranelift/src/constant.rs2
-rw-r--r--compiler/rustc_codegen_cranelift/src/debuginfo/line_info.rs2
-rw-r--r--compiler/rustc_codegen_cranelift/src/debuginfo/mod.rs2
-rw-r--r--compiler/rustc_codegen_cranelift/src/driver/jit.rs6
-rw-r--r--compiler/rustc_codegen_cranelift/src/driver/mod.rs10
-rw-r--r--compiler/rustc_codegen_cranelift/src/global_asm.rs2
-rw-r--r--compiler/rustc_codegen_cranelift/src/inline_asm.rs4
-rw-r--r--compiler/rustc_codegen_cranelift/src/intrinsics/llvm.rs20
-rw-r--r--compiler/rustc_codegen_cranelift/src/intrinsics/llvm_aarch64.rs4
-rw-r--r--compiler/rustc_codegen_cranelift/src/intrinsics/llvm_x86.rs278
-rw-r--r--compiler/rustc_codegen_cranelift/src/intrinsics/mod.rs45
-rw-r--r--compiler/rustc_codegen_cranelift/src/intrinsics/simd.rs58
-rw-r--r--compiler/rustc_codegen_cranelift/src/lib.rs9
-rw-r--r--compiler/rustc_codegen_cranelift/src/main_shim.rs6
-rw-r--r--compiler/rustc_codegen_cranelift/src/pretty_clif.rs6
-rw-r--r--compiler/rustc_codegen_cranelift/src/value_and_place.rs115
-rw-r--r--compiler/rustc_codegen_gcc/example/alloc_system.rs3
-rw-r--r--compiler/rustc_codegen_gcc/messages.ftl57
-rw-r--r--compiler/rustc_codegen_gcc/src/allocator.rs4
-rw-r--r--compiler/rustc_codegen_gcc/src/asm.rs9
-rw-r--r--compiler/rustc_codegen_gcc/src/base.rs4
-rw-r--r--compiler/rustc_codegen_gcc/src/builder.rs1
-rw-r--r--compiler/rustc_codegen_gcc/src/callee.rs6
-rw-r--r--compiler/rustc_codegen_gcc/src/common.rs8
-rw-r--r--compiler/rustc_codegen_gcc/src/context.rs2
-rw-r--r--compiler/rustc_codegen_gcc/src/errors.rs198
-rw-r--r--compiler/rustc_codegen_gcc/src/intrinsic/mod.rs29
-rw-r--r--compiler/rustc_codegen_gcc/src/intrinsic/simd.rs536
-rw-r--r--compiler/rustc_codegen_gcc/src/lib.rs12
-rw-r--r--compiler/rustc_codegen_gcc/src/mono_item.rs2
-rw-r--r--compiler/rustc_codegen_gcc/src/type_.rs26
-rw-r--r--compiler/rustc_codegen_gcc/src/type_of.rs4
-rw-r--r--compiler/rustc_codegen_llvm/Cargo.toml2
-rw-r--r--compiler/rustc_codegen_llvm/messages.ftl3
-rw-r--r--compiler/rustc_codegen_llvm/src/abi.rs26
-rw-r--r--compiler/rustc_codegen_llvm/src/allocator.rs188
-rw-r--r--compiler/rustc_codegen_llvm/src/asm.rs13
-rw-r--r--compiler/rustc_codegen_llvm/src/attributes.rs73
-rw-r--r--compiler/rustc_codegen_llvm/src/back/archive.rs53
-rw-r--r--compiler/rustc_codegen_llvm/src/back/lto.rs16
-rw-r--r--compiler/rustc_codegen_llvm/src/back/write.rs57
-rw-r--r--compiler/rustc_codegen_llvm/src/base.rs7
-rw-r--r--compiler/rustc_codegen_llvm/src/builder.rs93
-rw-r--r--compiler/rustc_codegen_llvm/src/callee.rs42
-rw-r--r--compiler/rustc_codegen_llvm/src/common.rs26
-rw-r--r--compiler/rustc_codegen_llvm/src/consts.rs23
-rw-r--r--compiler/rustc_codegen_llvm/src/context.rs90
-rw-r--r--compiler/rustc_codegen_llvm/src/coverageinfo/ffi.rs204
-rw-r--r--compiler/rustc_codegen_llvm/src/coverageinfo/map_data.rs89
-rw-r--r--compiler/rustc_codegen_llvm/src/coverageinfo/mapgen.rs47
-rw-r--r--compiler/rustc_codegen_llvm/src/coverageinfo/mod.rs87
-rw-r--r--compiler/rustc_codegen_llvm/src/debuginfo/create_scope_map.rs2
-rw-r--r--compiler/rustc_codegen_llvm/src/debuginfo/gdb.rs7
-rw-r--r--compiler/rustc_codegen_llvm/src/debuginfo/metadata.rs60
-rw-r--r--compiler/rustc_codegen_llvm/src/debuginfo/metadata/enums/cpp_like.rs18
-rw-r--r--compiler/rustc_codegen_llvm/src/debuginfo/metadata/enums/mod.rs13
-rw-r--r--compiler/rustc_codegen_llvm/src/debuginfo/metadata/enums/native.rs12
-rw-r--r--compiler/rustc_codegen_llvm/src/debuginfo/mod.rs18
-rw-r--r--compiler/rustc_codegen_llvm/src/debuginfo/utils.rs3
-rw-r--r--compiler/rustc_codegen_llvm/src/errors.rs2
-rw-r--r--compiler/rustc_codegen_llvm/src/intrinsic.rs341
-rw-r--r--compiler/rustc_codegen_llvm/src/lib.rs77
-rw-r--r--compiler/rustc_codegen_llvm/src/llvm/diagnostic.rs2
-rw-r--r--compiler/rustc_codegen_llvm/src/llvm/ffi.rs234
-rw-r--r--compiler/rustc_codegen_llvm/src/llvm_util.rs57
-rw-r--r--compiler/rustc_codegen_llvm/src/mono_item.rs4
-rw-r--r--compiler/rustc_codegen_llvm/src/type_.rs27
-rw-r--r--compiler/rustc_codegen_llvm/src/type_of.rs46
-rw-r--r--compiler/rustc_codegen_llvm/src/va_arg.rs35
-rw-r--r--compiler/rustc_codegen_ssa/Cargo.toml10
-rw-r--r--compiler/rustc_codegen_ssa/messages.ftl2
-rw-r--r--compiler/rustc_codegen_ssa/src/back/link.rs92
-rw-r--r--compiler/rustc_codegen_ssa/src/back/linker.rs48
-rw-r--r--compiler/rustc_codegen_ssa/src/back/metadata.rs96
-rw-r--r--compiler/rustc_codegen_ssa/src/back/rpath.rs38
-rw-r--r--compiler/rustc_codegen_ssa/src/back/rpath/tests.rs35
-rw-r--r--compiler/rustc_codegen_ssa/src/back/symbol_export.rs59
-rw-r--r--compiler/rustc_codegen_ssa/src/back/write.rs359
-rw-r--r--compiler/rustc_codegen_ssa/src/base.rs103
-rw-r--r--compiler/rustc_codegen_ssa/src/codegen_attrs.rs36
-rw-r--r--compiler/rustc_codegen_ssa/src/debuginfo/type_names.rs60
-rw-r--r--compiler/rustc_codegen_ssa/src/errors.rs28
-rw-r--r--compiler/rustc_codegen_ssa/src/lib.rs2
-rw-r--r--compiler/rustc_codegen_ssa/src/meth.rs6
-rw-r--r--compiler/rustc_codegen_ssa/src/mir/block.rs114
-rw-r--r--compiler/rustc_codegen_ssa/src/mir/debuginfo.rs98
-rw-r--r--compiler/rustc_codegen_ssa/src/mir/intrinsic.rs52
-rw-r--r--compiler/rustc_codegen_ssa/src/mir/mod.rs2
-rw-r--r--compiler/rustc_codegen_ssa/src/mir/operand.rs33
-rw-r--r--compiler/rustc_codegen_ssa/src/mir/place.rs53
-rw-r--r--compiler/rustc_codegen_ssa/src/mir/rvalue.rs39
-rw-r--r--compiler/rustc_codegen_ssa/src/mono_item.rs8
-rw-r--r--compiler/rustc_codegen_ssa/src/target_features.rs62
-rw-r--r--compiler/rustc_codegen_ssa/src/traits/backend.rs29
-rw-r--r--compiler/rustc_codegen_ssa/src/traits/consts.rs7
-rw-r--r--compiler/rustc_codegen_ssa/src/traits/mod.rs4
-rw-r--r--compiler/rustc_codegen_ssa/src/traits/type_.rs12
-rw-r--r--compiler/rustc_codegen_ssa/src/traits/write.rs5
-rw-r--r--compiler/rustc_const_eval/Cargo.toml2
-rw-r--r--compiler/rustc_const_eval/messages.ftl120
-rw-r--r--compiler/rustc_const_eval/src/const_eval/error.rs9
-rw-r--r--compiler/rustc_const_eval/src/const_eval/eval_queries.rs18
-rw-r--r--compiler/rustc_const_eval/src/const_eval/fn_queries.rs15
-rw-r--r--compiler/rustc_const_eval/src/const_eval/machine.rs71
-rw-r--r--compiler/rustc_const_eval/src/const_eval/mod.rs10
-rw-r--r--compiler/rustc_const_eval/src/const_eval/valtrees.rs202
-rw-r--r--compiler/rustc_const_eval/src/errors.rs178
-rw-r--r--compiler/rustc_const_eval/src/interpret/cast.rs14
-rw-r--r--compiler/rustc_const_eval/src/interpret/discriminant.rs110
-rw-r--r--compiler/rustc_const_eval/src/interpret/eval_context.rs42
-rw-r--r--compiler/rustc_const_eval/src/interpret/intern.rs135
-rw-r--r--compiler/rustc_const_eval/src/interpret/intrinsics.rs88
-rw-r--r--compiler/rustc_const_eval/src/interpret/intrinsics/caller_location.rs8
-rw-r--r--compiler/rustc_const_eval/src/interpret/machine.rs30
-rw-r--r--compiler/rustc_const_eval/src/interpret/memory.rs35
-rw-r--r--compiler/rustc_const_eval/src/interpret/mod.rs8
-rw-r--r--compiler/rustc_const_eval/src/interpret/operand.rs276
-rw-r--r--compiler/rustc_const_eval/src/interpret/operator.rs9
-rw-r--r--compiler/rustc_const_eval/src/interpret/place.rs566
-rw-r--r--compiler/rustc_const_eval/src/interpret/projection.rs417
-rw-r--r--compiler/rustc_const_eval/src/interpret/step.rs32
-rw-r--r--compiler/rustc_const_eval/src/interpret/terminator.rs252
-rw-r--r--compiler/rustc_const_eval/src/interpret/util.rs8
-rw-r--r--compiler/rustc_const_eval/src/interpret/validity.rs234
-rw-r--r--compiler/rustc_const_eval/src/interpret/visitor.rs692
-rw-r--r--compiler/rustc_const_eval/src/transform/check_consts/check.rs71
-rw-r--r--compiler/rustc_const_eval/src/transform/check_consts/mod.rs19
-rw-r--r--compiler/rustc_const_eval/src/transform/check_consts/ops.rs29
-rw-r--r--compiler/rustc_const_eval/src/transform/check_consts/post_drop_elaboration.rs6
-rw-r--r--compiler/rustc_const_eval/src/transform/check_consts/qualifs.rs30
-rw-r--r--compiler/rustc_const_eval/src/transform/check_consts/resolver.rs13
-rw-r--r--compiler/rustc_const_eval/src/transform/promote_consts.rs16
-rw-r--r--compiler/rustc_const_eval/src/transform/validate.rs627
-rw-r--r--compiler/rustc_const_eval/src/util/compare_types.rs14
-rw-r--r--compiler/rustc_const_eval/src/util/type_name.rs13
-rw-r--r--compiler/rustc_data_structures/Cargo.toml2
-rw-r--r--compiler/rustc_data_structures/src/base_n.rs10
-rw-r--r--compiler/rustc_data_structures/src/binary_search_util/mod.rs38
-rw-r--r--compiler/rustc_data_structures/src/graph/dominators/mod.rs4
-rw-r--r--compiler/rustc_data_structures/src/lib.rs1
-rw-r--r--compiler/rustc_data_structures/src/sorted_map.rs5
-rw-r--r--compiler/rustc_data_structures/src/sso/map.rs6
-rw-r--r--compiler/rustc_data_structures/src/sync/vec.rs28
-rw-r--r--compiler/rustc_data_structures/src/sync/worker_local.rs2
-rw-r--r--compiler/rustc_data_structures/src/unord.rs73
-rw-r--r--compiler/rustc_driver/Cargo.toml3
-rw-r--r--compiler/rustc_driver_impl/Cargo.toml1
-rw-r--r--compiler/rustc_driver_impl/messages.ftl6
-rw-r--r--compiler/rustc_driver_impl/src/lib.rs287
-rw-r--r--compiler/rustc_driver_impl/src/pretty.rs13
-rw-r--r--compiler/rustc_driver_impl/src/session_diagnostics.rs30
-rw-r--r--compiler/rustc_error_codes/src/error_codes/E0092.md2
-rw-r--r--compiler/rustc_error_codes/src/error_codes/E0093.md2
-rw-r--r--compiler/rustc_error_codes/src/error_codes/E0094.md2
-rw-r--r--compiler/rustc_error_codes/src/error_codes/E0132.md2
-rw-r--r--compiler/rustc_error_codes/src/error_codes/E0152.md4
-rw-r--r--compiler/rustc_error_codes/src/error_codes/E0208.md1
-rw-r--r--compiler/rustc_error_codes/src/error_codes/E0211.md2
-rw-r--r--compiler/rustc_error_codes/src/error_codes/E0230.md1
-rw-r--r--compiler/rustc_error_codes/src/error_codes/E0231.md1
-rw-r--r--compiler/rustc_error_codes/src/error_codes/E0232.md1
-rw-r--r--compiler/rustc_error_codes/src/error_codes/E0264.md2
-rw-r--r--compiler/rustc_error_codes/src/error_codes/E0391.md3
-rw-r--r--compiler/rustc_error_codes/src/error_codes/E0439.md2
-rw-r--r--compiler/rustc_error_codes/src/error_codes/E0539.md2
-rw-r--r--compiler/rustc_error_codes/src/error_codes/E0542.md2
-rw-r--r--compiler/rustc_error_codes/src/error_codes/E0543.md2
-rw-r--r--compiler/rustc_error_codes/src/error_codes/E0544.md2
-rw-r--r--compiler/rustc_error_codes/src/error_codes/E0545.md2
-rw-r--r--compiler/rustc_error_codes/src/error_codes/E0546.md2
-rw-r--r--compiler/rustc_error_codes/src/error_codes/E0547.md2
-rw-r--r--compiler/rustc_error_codes/src/error_codes/E0549.md2
-rw-r--r--compiler/rustc_error_codes/src/error_codes/E0577.md2
-rw-r--r--compiler/rustc_error_codes/src/error_codes/E0622.md4
-rw-r--r--compiler/rustc_error_codes/src/error_codes/E0691.md3
-rw-r--r--compiler/rustc_error_codes/src/error_codes/E0773.md2
-rw-r--r--compiler/rustc_error_codes/src/error_codes/E0789.md1
-rw-r--r--compiler/rustc_error_messages/src/lib.rs26
-rw-r--r--compiler/rustc_errors/Cargo.toml1
-rw-r--r--compiler/rustc_errors/messages.ftl22
-rw-r--r--compiler/rustc_errors/src/annotate_snippet_emitter_writer.rs20
-rw-r--r--compiler/rustc_errors/src/diagnostic.rs10
-rw-r--r--compiler/rustc_errors/src/diagnostic_builder.rs4
-rw-r--r--compiler/rustc_errors/src/diagnostic_impls.rs69
-rw-r--r--compiler/rustc_errors/src/emitter.rs271
-rw-r--r--compiler/rustc_errors/src/json.rs40
-rw-r--r--compiler/rustc_errors/src/json/tests.rs2
-rw-r--r--compiler/rustc_errors/src/lib.rs182
-rw-r--r--compiler/rustc_errors/src/markdown/parse.rs2
-rw-r--r--compiler/rustc_errors/src/markdown/term.rs2
-rw-r--r--compiler/rustc_errors/src/markdown/tests/term.rs2
-rw-r--r--compiler/rustc_expand/Cargo.toml1
-rw-r--r--compiler/rustc_expand/src/base.rs9
-rw-r--r--compiler/rustc_expand/src/build.rs11
-rw-r--r--compiler/rustc_expand/src/config.rs23
-rw-r--r--compiler/rustc_expand/src/expand.rs35
-rw-r--r--compiler/rustc_expand/src/lib.rs1
-rw-r--r--compiler/rustc_expand/src/mbe/diagnostics.rs5
-rw-r--r--compiler/rustc_expand/src/mbe/macro_check.rs4
-rw-r--r--compiler/rustc_expand/src/mbe/macro_parser.rs16
-rw-r--r--compiler/rustc_expand/src/mbe/macro_rules.rs56
-rw-r--r--compiler/rustc_expand/src/mbe/metavar_expr.rs23
-rw-r--r--compiler/rustc_expand/src/mbe/quoted.rs44
-rw-r--r--compiler/rustc_expand/src/mbe/transcribe.rs15
-rw-r--r--compiler/rustc_expand/src/parse/tests.rs21
-rw-r--r--compiler/rustc_expand/src/placeholders.rs3
-rw-r--r--compiler/rustc_expand/src/proc_macro.rs4
-rw-r--r--compiler/rustc_expand/src/proc_macro_server.rs6
-rw-r--r--compiler/rustc_expand/src/tests.rs76
-rw-r--r--compiler/rustc_feature/src/accepted.rs2
-rw-r--r--compiler/rustc_feature/src/active.rs91
-rw-r--r--compiler/rustc_feature/src/builtin_attrs.rs9
-rw-r--r--compiler/rustc_feature/src/lib.rs2
-rw-r--r--compiler/rustc_hir/src/def.rs17
-rw-r--r--compiler/rustc_hir/src/hir.rs72
-rw-r--r--compiler/rustc_hir/src/intravisit.rs11
-rw-r--r--compiler/rustc_hir/src/lib.rs1
-rw-r--r--compiler/rustc_hir/src/target.rs13
-rw-r--r--compiler/rustc_hir_analysis/messages.ftl3
-rw-r--r--compiler/rustc_hir_analysis/src/astconv/bounds.rs45
-rw-r--r--compiler/rustc_hir_analysis/src/astconv/errors.rs22
-rw-r--r--compiler/rustc_hir_analysis/src/astconv/generics.rs50
-rw-r--r--compiler/rustc_hir_analysis/src/astconv/lint.rs6
-rw-r--r--compiler/rustc_hir_analysis/src/astconv/mod.rs319
-rw-r--r--compiler/rustc_hir_analysis/src/astconv/object_safety.rs33
-rw-r--r--compiler/rustc_hir_analysis/src/autoderef.rs2
-rw-r--r--compiler/rustc_hir_analysis/src/bounds.rs3
-rw-r--r--compiler/rustc_hir_analysis/src/check/check.rs215
-rw-r--r--compiler/rustc_hir_analysis/src/check/compare_impl_item.rs396
-rw-r--r--compiler/rustc_hir_analysis/src/check/dropck.rs26
-rw-r--r--compiler/rustc_hir_analysis/src/check/entry.rs277
-rw-r--r--compiler/rustc_hir_analysis/src/check/intrinsic.rs34
-rw-r--r--compiler/rustc_hir_analysis/src/check/intrinsicck.rs18
-rw-r--r--compiler/rustc_hir_analysis/src/check/mod.rs25
-rw-r--r--compiler/rustc_hir_analysis/src/check/wfcheck.rs257
-rw-r--r--compiler/rustc_hir_analysis/src/check_unused.rs17
-rw-r--r--compiler/rustc_hir_analysis/src/coherence/builtin.rs41
-rw-r--r--compiler/rustc_hir_analysis/src/coherence/inherent_impls.rs5
-rw-r--r--compiler/rustc_hir_analysis/src/coherence/inherent_impls_overlap.rs8
-rw-r--r--compiler/rustc_hir_analysis/src/coherence/mod.rs2
-rw-r--r--compiler/rustc_hir_analysis/src/coherence/orphan.rs53
-rw-r--r--compiler/rustc_hir_analysis/src/coherence/unsafety.rs2
-rw-r--r--compiler/rustc_hir_analysis/src/collect.rs139
-rw-r--r--compiler/rustc_hir_analysis/src/collect/generics_of.rs17
-rw-r--r--compiler/rustc_hir_analysis/src/collect/item_bounds.rs64
-rw-r--r--compiler/rustc_hir_analysis/src/collect/predicates_of.rs145
-rw-r--r--compiler/rustc_hir_analysis/src/collect/resolve_bound_vars.rs194
-rw-r--r--compiler/rustc_hir_analysis/src/collect/type_of.rs41
-rw-r--r--compiler/rustc_hir_analysis/src/constrained_generic_params.rs2
-rw-r--r--compiler/rustc_hir_analysis/src/errors.rs11
-rw-r--r--compiler/rustc_hir_analysis/src/hir_wf_check.rs2
-rw-r--r--compiler/rustc_hir_analysis/src/impl_wf_check.rs30
-rw-r--r--compiler/rustc_hir_analysis/src/impl_wf_check/min_specialization.rs126
-rw-r--r--compiler/rustc_hir_analysis/src/lib.rs285
-rw-r--r--compiler/rustc_hir_analysis/src/outlives/implicit_infer.rs31
-rw-r--r--compiler/rustc_hir_analysis/src/outlives/mod.rs2
-rw-r--r--compiler/rustc_hir_analysis/src/outlives/utils.rs2
-rw-r--r--compiler/rustc_hir_analysis/src/structured_errors/wrong_number_of_generic_args.rs70
-rw-r--r--compiler/rustc_hir_analysis/src/variance/constraints.rs65
-rw-r--r--compiler/rustc_hir_analysis/src/variance/mod.rs46
-rw-r--r--compiler/rustc_hir_analysis/src/variance/solve.rs2
-rw-r--r--compiler/rustc_hir_analysis/src/variance/terms.rs11
-rw-r--r--compiler/rustc_hir_pretty/src/lib.rs21
-rw-r--r--compiler/rustc_hir_typeck/Cargo.toml1
-rw-r--r--compiler/rustc_hir_typeck/messages.ftl4
-rw-r--r--compiler/rustc_hir_typeck/src/_match.rs74
-rw-r--r--compiler/rustc_hir_typeck/src/callee.rs121
-rw-r--r--compiler/rustc_hir_typeck/src/cast.rs48
-rw-r--r--compiler/rustc_hir_typeck/src/check.rs4
-rw-r--r--compiler/rustc_hir_typeck/src/closure.rs52
-rw-r--r--compiler/rustc_hir_typeck/src/coercion.rs167
-rw-r--r--compiler/rustc_hir_typeck/src/demand.rs293
-rw-r--r--compiler/rustc_hir_typeck/src/errors.rs40
-rw-r--r--compiler/rustc_hir_typeck/src/expectation.rs9
-rw-r--r--compiler/rustc_hir_typeck/src/expr.rs171
-rw-r--r--compiler/rustc_hir_typeck/src/expr_use_visitor.rs6
-rw-r--r--compiler/rustc_hir_typeck/src/fallback.rs32
-rw-r--r--compiler/rustc_hir_typeck/src/fn_ctxt/_impl.rs159
-rw-r--r--compiler/rustc_hir_typeck/src/fn_ctxt/adjust_fulfillment_errors.rs129
-rw-r--r--compiler/rustc_hir_typeck/src/fn_ctxt/checks.rs156
-rw-r--r--compiler/rustc_hir_typeck/src/fn_ctxt/mod.rs10
-rw-r--r--compiler/rustc_hir_typeck/src/fn_ctxt/suggestions.rs320
-rw-r--r--compiler/rustc_hir_typeck/src/gather_locals.rs26
-rw-r--r--compiler/rustc_hir_typeck/src/generator_interior/drop_ranges/cfg_build.rs10
-rw-r--r--compiler/rustc_hir_typeck/src/generator_interior/drop_ranges/mod.rs14
-rw-r--r--compiler/rustc_hir_typeck/src/generator_interior/drop_ranges/record_consumed_borrow.rs15
-rw-r--r--compiler/rustc_hir_typeck/src/generator_interior/mod.rs2
-rw-r--r--compiler/rustc_hir_typeck/src/inherited.rs6
-rw-r--r--compiler/rustc_hir_typeck/src/intrinsicck.rs17
-rw-r--r--compiler/rustc_hir_typeck/src/lib.rs21
-rw-r--r--compiler/rustc_hir_typeck/src/mem_categorization.rs93
-rw-r--r--compiler/rustc_hir_typeck/src/method/confirm.rs108
-rw-r--r--compiler/rustc_hir_typeck/src/method/mod.rs44
-rw-r--r--compiler/rustc_hir_typeck/src/method/prelude2021.rs45
-rw-r--r--compiler/rustc_hir_typeck/src/method/probe.rs89
-rw-r--r--compiler/rustc_hir_typeck/src/method/suggest.rs305
-rw-r--r--compiler/rustc_hir_typeck/src/op.rs167
-rw-r--r--compiler/rustc_hir_typeck/src/pat.rs264
-rw-r--r--compiler/rustc_hir_typeck/src/place_op.rs4
-rw-r--r--compiler/rustc_hir_typeck/src/rvalue_scopes.rs6
-rw-r--r--compiler/rustc_hir_typeck/src/upvar.rs124
-rw-r--r--compiler/rustc_hir_typeck/src/writeback.rs206
-rw-r--r--compiler/rustc_incremental/src/assert_dep_graph.rs10
-rw-r--r--compiler/rustc_incremental/src/assert_module_sources.rs1
-rw-r--r--compiler/rustc_incremental/src/persist/dirty_clean.rs2
-rw-r--r--compiler/rustc_incremental/src/persist/fs.rs42
-rw-r--r--compiler/rustc_incremental/src/persist/load.rs4
-rw-r--r--compiler/rustc_incremental/src/persist/save.rs8
-rw-r--r--compiler/rustc_index/src/bit_set.rs4
-rw-r--r--compiler/rustc_index/src/lib.rs1
-rw-r--r--compiler/rustc_infer/src/errors/mod.rs19
-rw-r--r--compiler/rustc_infer/src/errors/note_and_explain.rs2
-rw-r--r--compiler/rustc_infer/src/infer/at.rs28
-rw-r--r--compiler/rustc_infer/src/infer/canonical/canonicalizer.rs14
-rw-r--r--compiler/rustc_infer/src/infer/canonical/mod.rs4
-rw-r--r--compiler/rustc_infer/src/infer/canonical/query_response.rs6
-rw-r--r--compiler/rustc_infer/src/infer/canonical/substitute.rs2
-rw-r--r--compiler/rustc_infer/src/infer/combine.rs8
-rw-r--r--compiler/rustc_infer/src/infer/equate.rs12
-rw-r--r--compiler/rustc_infer/src/infer/error_reporting/mod.rs245
-rw-r--r--compiler/rustc_infer/src/infer/error_reporting/need_type_info.rs125
-rw-r--r--compiler/rustc_infer/src/infer/error_reporting/nice_region_error/mismatched_static_lifetime.rs10
-rw-r--r--compiler/rustc_infer/src/infer/error_reporting/nice_region_error/named_anon_conflict.rs22
-rw-r--r--compiler/rustc_infer/src/infer/error_reporting/nice_region_error/placeholder_error.rs24
-rw-r--r--compiler/rustc_infer/src/infer/error_reporting/nice_region_error/static_impl_trait.rs37
-rw-r--r--compiler/rustc_infer/src/infer/error_reporting/nice_region_error/util.rs6
-rw-r--r--compiler/rustc_infer/src/infer/error_reporting/note.rs74
-rw-r--r--compiler/rustc_infer/src/infer/error_reporting/note_and_explain.rs42
-rw-r--r--compiler/rustc_infer/src/infer/error_reporting/suggest.rs141
-rw-r--r--compiler/rustc_infer/src/infer/freshen.rs2
-rw-r--r--compiler/rustc_infer/src/infer/generalize.rs22
-rw-r--r--compiler/rustc_infer/src/infer/lexical_region_resolve/mod.rs9
-rw-r--r--compiler/rustc_infer/src/infer/mod.rs84
-rw-r--r--compiler/rustc_infer/src/infer/nll_relate/mod.rs2
-rw-r--r--compiler/rustc_infer/src/infer/opaque_types.rs54
-rw-r--r--compiler/rustc_infer/src/infer/outlives/components.rs26
-rw-r--r--compiler/rustc_infer/src/infer/outlives/obligations.rs16
-rw-r--r--compiler/rustc_infer/src/infer/outlives/test_type_match.rs2
-rw-r--r--compiler/rustc_infer/src/infer/outlives/verify.rs4
-rw-r--r--compiler/rustc_infer/src/infer/region_constraints/leak_check.rs8
-rw-r--r--compiler/rustc_infer/src/infer/region_constraints/mod.rs8
-rw-r--r--compiler/rustc_infer/src/infer/type_variable.rs2
-rw-r--r--compiler/rustc_infer/src/traits/engine.rs2
-rw-r--r--compiler/rustc_infer/src/traits/error_reporting/mod.rs6
-rw-r--r--compiler/rustc_infer/src/traits/mod.rs40
-rw-r--r--compiler/rustc_infer/src/traits/project.rs8
-rw-r--r--compiler/rustc_infer/src/traits/structural_impls.rs10
-rw-r--r--compiler/rustc_infer/src/traits/util.rs13
-rw-r--r--compiler/rustc_interface/Cargo.toml2
-rw-r--r--compiler/rustc_interface/src/interface.rs23
-rw-r--r--compiler/rustc_interface/src/passes.rs100
-rw-r--r--compiler/rustc_interface/src/queries.rs150
-rw-r--r--compiler/rustc_interface/src/tests.rs3
-rw-r--r--compiler/rustc_interface/src/util.rs5
-rw-r--r--compiler/rustc_lexer/Cargo.toml6
-rw-r--r--compiler/rustc_lexer/src/cursor.rs4
-rw-r--r--compiler/rustc_lexer/src/lib.rs18
-rw-r--r--compiler/rustc_lexer/src/unescape.rs2
-rw-r--r--compiler/rustc_lint/messages.ftl27
-rw-r--r--compiler/rustc_lint/src/array_into_iter.rs2
-rw-r--r--compiler/rustc_lint/src/builtin.rs704
-rw-r--r--compiler/rustc_lint/src/context.rs54
-rw-r--r--compiler/rustc_lint/src/deref_into_dyn_supertrait.rs2
-rw-r--r--compiler/rustc_lint/src/early.rs3
-rw-r--r--compiler/rustc_lint/src/enum_intrinsics_non_enums.rs4
-rw-r--r--compiler/rustc_lint/src/for_loops_over_fallibles.rs12
-rw-r--r--compiler/rustc_lint/src/foreign_modules.rs402
-rw-r--r--compiler/rustc_lint/src/internal.rs24
-rw-r--r--compiler/rustc_lint/src/late.rs48
-rw-r--r--compiler/rustc_lint/src/levels.rs47
-rw-r--r--compiler/rustc_lint/src/lib.rs59
-rw-r--r--compiler/rustc_lint/src/lints.rs68
-rw-r--r--compiler/rustc_lint/src/methods.rs4
-rw-r--r--compiler/rustc_lint/src/multiple_supertrait_upcastable.rs1
-rw-r--r--compiler/rustc_lint/src/non_ascii_idents.rs1
-rw-r--r--compiler/rustc_lint/src/noop_method_call.rs25
-rw-r--r--compiler/rustc_lint/src/opaque_hidden_inferred_bound.rs21
-rw-r--r--compiler/rustc_lint/src/pass_by_value.rs4
-rw-r--r--compiler/rustc_lint/src/ptr_nulls.rs146
-rw-r--r--compiler/rustc_lint/src/reference_casting.rs144
-rw-r--r--compiler/rustc_lint/src/traits.rs14
-rw-r--r--compiler/rustc_lint/src/types.rs121
-rw-r--r--compiler/rustc_lint/src/unused.rs71
-rw-r--r--compiler/rustc_lint_defs/src/builtin.rs157
-rw-r--r--compiler/rustc_lint_defs/src/lib.rs22
-rw-r--r--compiler/rustc_llvm/build.rs6
-rw-r--r--compiler/rustc_llvm/llvm-wrapper/CoverageMappingWrapper.cpp26
-rw-r--r--compiler/rustc_llvm/llvm-wrapper/LLVMWrapper.h3
-rw-r--r--compiler/rustc_llvm/llvm-wrapper/PassWrapper.cpp76
-rw-r--r--compiler/rustc_llvm/llvm-wrapper/RustWrapper.cpp108
-rw-r--r--compiler/rustc_llvm/llvm-wrapper/SymbolWrapper.cpp1
-rw-r--r--compiler/rustc_llvm/src/lib.rs8
-rw-r--r--compiler/rustc_log/src/lib.rs2
-rw-r--r--compiler/rustc_macros/Cargo.toml3
-rw-r--r--compiler/rustc_macros/src/diagnostics/diagnostic_builder.rs14
-rw-r--r--compiler/rustc_macros/src/diagnostics/subdiagnostic.rs4
-rw-r--r--compiler/rustc_macros/src/lib.rs1
-rw-r--r--compiler/rustc_macros/src/newtype.rs6
-rw-r--r--compiler/rustc_macros/src/serialize.rs6
-rw-r--r--compiler/rustc_metadata/messages.ftl3
-rw-r--r--compiler/rustc_metadata/src/creader.rs34
-rw-r--r--compiler/rustc_metadata/src/dependency_format.rs3
-rw-r--r--compiler/rustc_metadata/src/errors.rs16
-rw-r--r--compiler/rustc_metadata/src/foreign_modules.rs17
-rw-r--r--compiler/rustc_metadata/src/fs.rs6
-rw-r--r--compiler/rustc_metadata/src/locator.rs74
-rw-r--r--compiler/rustc_metadata/src/native_libs.rs99
-rw-r--r--compiler/rustc_metadata/src/rmeta/decoder.rs49
-rw-r--r--compiler/rustc_metadata/src/rmeta/decoder/cstore_impl.rs14
-rw-r--r--compiler/rustc_metadata/src/rmeta/encoder.rs133
-rw-r--r--compiler/rustc_metadata/src/rmeta/mod.rs11
-rw-r--r--compiler/rustc_metadata/src/rmeta/table.rs10
-rw-r--r--compiler/rustc_middle/Cargo.toml2
-rw-r--r--compiler/rustc_middle/messages.ftl3
-rw-r--r--compiler/rustc_middle/src/arena.rs2
-rw-r--r--compiler/rustc_middle/src/dep_graph/dep_node.rs56
-rw-r--r--compiler/rustc_middle/src/dep_graph/mod.rs4
-rw-r--r--compiler/rustc_middle/src/error.rs3
-rw-r--r--compiler/rustc_middle/src/hir/map/mod.rs82
-rw-r--r--compiler/rustc_middle/src/hir/mod.rs31
-rw-r--r--compiler/rustc_middle/src/hir/place.rs4
-rw-r--r--compiler/rustc_middle/src/infer/canonical.rs6
-rw-r--r--compiler/rustc_middle/src/infer/mod.rs2
-rw-r--r--compiler/rustc_middle/src/lib.rs2
-rw-r--r--compiler/rustc_middle/src/lint.rs51
-rw-r--r--compiler/rustc_middle/src/macros.rs4
-rw-r--r--compiler/rustc_middle/src/middle/codegen_fn_attrs.rs2
-rw-r--r--compiler/rustc_middle/src/middle/exported_symbols.rs12
-rw-r--r--compiler/rustc_middle/src/middle/privacy.rs9
-rw-r--r--compiler/rustc_middle/src/middle/region.rs4
-rw-r--r--compiler/rustc_middle/src/middle/stability.rs14
-rw-r--r--compiler/rustc_middle/src/mir/basic_blocks.rs4
-rw-r--r--compiler/rustc_middle/src/mir/coverage.rs129
-rw-r--r--compiler/rustc_middle/src/mir/generic_graph.rs4
-rw-r--r--compiler/rustc_middle/src/mir/generic_graphviz.rs10
-rw-r--r--compiler/rustc_middle/src/mir/graphviz.rs2
-rw-r--r--compiler/rustc_middle/src/mir/interpret/allocation.rs71
-rw-r--r--compiler/rustc_middle/src/mir/interpret/allocation/init_mask.rs6
-rw-r--r--compiler/rustc_middle/src/mir/interpret/allocation/provenance_map.rs14
-rw-r--r--compiler/rustc_middle/src/mir/interpret/error.rs63
-rw-r--r--compiler/rustc_middle/src/mir/interpret/mod.rs22
-rw-r--r--compiler/rustc_middle/src/mir/interpret/queries.rs29
-rw-r--r--compiler/rustc_middle/src/mir/interpret/value.rs29
-rw-r--r--compiler/rustc_middle/src/mir/mod.rs221
-rw-r--r--compiler/rustc_middle/src/mir/mono.rs79
-rw-r--r--compiler/rustc_middle/src/mir/pretty.rs106
-rw-r--r--compiler/rustc_middle/src/mir/query.rs10
-rw-r--r--compiler/rustc_middle/src/mir/spanview.rs17
-rw-r--r--compiler/rustc_middle/src/mir/syntax.rs22
-rw-r--r--compiler/rustc_middle/src/mir/tcx.rs12
-rw-r--r--compiler/rustc_middle/src/mir/terminator.rs132
-rw-r--r--compiler/rustc_middle/src/mir/visit.rs29
-rw-r--r--compiler/rustc_middle/src/query/erase.rs1
-rw-r--r--compiler/rustc_middle/src/query/keys.rs47
-rw-r--r--compiler/rustc_middle/src/query/mod.rs100
-rw-r--r--compiler/rustc_middle/src/query/on_disk_cache.rs6
-rw-r--r--compiler/rustc_middle/src/query/plumbing.rs22
-rw-r--r--compiler/rustc_middle/src/thir.rs39
-rw-r--r--compiler/rustc_middle/src/thir/visit.rs12
-rw-r--r--compiler/rustc_middle/src/traits/mod.rs124
-rw-r--r--compiler/rustc_middle/src/traits/query.rs4
-rw-r--r--compiler/rustc_middle/src/traits/select.rs5
-rw-r--r--compiler/rustc_middle/src/traits/solve.rs9
-rw-r--r--compiler/rustc_middle/src/traits/solve/cache.rs100
-rw-r--r--compiler/rustc_middle/src/traits/solve/inspect.rs15
-rw-r--r--compiler/rustc_middle/src/traits/solve/inspect/format.rs122
-rw-r--r--compiler/rustc_middle/src/traits/specialization_graph.rs6
-rw-r--r--compiler/rustc_middle/src/traits/structural_impls.rs40
-rw-r--r--compiler/rustc_middle/src/ty/abstract_const.rs8
-rw-r--r--compiler/rustc_middle/src/ty/adt.rs18
-rw-r--r--compiler/rustc_middle/src/ty/assoc.rs12
-rw-r--r--compiler/rustc_middle/src/ty/binding.rs2
-rw-r--r--compiler/rustc_middle/src/ty/closure.rs31
-rw-r--r--compiler/rustc_middle/src/ty/codec.rs12
-rw-r--r--compiler/rustc_middle/src/ty/consts.rs30
-rw-r--r--compiler/rustc_middle/src/ty/consts/int.rs4
-rw-r--r--compiler/rustc_middle/src/ty/consts/kind.rs16
-rw-r--r--compiler/rustc_middle/src/ty/consts/valtree.rs2
-rw-r--r--compiler/rustc_middle/src/ty/context.rs254
-rw-r--r--compiler/rustc_middle/src/ty/diagnostics.rs39
-rw-r--r--compiler/rustc_middle/src/ty/error.rs17
-rw-r--r--compiler/rustc_middle/src/ty/fast_reject.rs133
-rw-r--r--compiler/rustc_middle/src/ty/flags.rs62
-rw-r--r--compiler/rustc_middle/src/ty/generic_args.rs (renamed from compiler/rustc_middle/src/ty/subst.rs)288
-rw-r--r--compiler/rustc_middle/src/ty/generics.rs44
-rw-r--r--compiler/rustc_middle/src/ty/impls_ty.rs2
-rw-r--r--compiler/rustc_middle/src/ty/inhabitedness/inhabited_predicate.rs24
-rw-r--r--compiler/rustc_middle/src/ty/inhabitedness/mod.rs10
-rw-r--r--compiler/rustc_middle/src/ty/instance.rs174
-rw-r--r--compiler/rustc_middle/src/ty/layout.rs44
-rw-r--r--compiler/rustc_middle/src/ty/list.rs11
-rw-r--r--compiler/rustc_middle/src/ty/mod.rs290
-rw-r--r--compiler/rustc_middle/src/ty/normalize_erasing_regions.rs22
-rw-r--r--compiler/rustc_middle/src/ty/opaque_types.rs44
-rw-r--r--compiler/rustc_middle/src/ty/parameterized.rs1
-rw-r--r--compiler/rustc_middle/src/ty/print/mod.rs46
-rw-r--r--compiler/rustc_middle/src/ty/print/pretty.rs216
-rw-r--r--compiler/rustc_middle/src/ty/relate.rs166
-rw-r--r--compiler/rustc_middle/src/ty/rvalue_scopes.rs4
-rw-r--r--compiler/rustc_middle/src/ty/structural_impls.rs244
-rw-r--r--compiler/rustc_middle/src/ty/sty.rs438
-rw-r--r--compiler/rustc_middle/src/ty/trait_def.rs2
-rw-r--r--compiler/rustc_middle/src/ty/typeck_results.rs43
-rw-r--r--compiler/rustc_middle/src/ty/util.rs113
-rw-r--r--compiler/rustc_middle/src/ty/visit.rs8
-rw-r--r--compiler/rustc_middle/src/ty/vtable.rs4
-rw-r--r--compiler/rustc_middle/src/ty/walk.rs28
-rw-r--r--compiler/rustc_middle/src/util/bug.rs2
-rw-r--r--compiler/rustc_middle/src/util/call_kind.rs26
-rw-r--r--compiler/rustc_middle/src/util/common.rs2
-rw-r--r--compiler/rustc_middle/src/util/find_self_call.rs10
-rw-r--r--compiler/rustc_middle/src/values.rs2
-rw-r--r--compiler/rustc_mir_build/Cargo.toml2
-rw-r--r--compiler/rustc_mir_build/messages.ftl2
-rw-r--r--compiler/rustc_mir_build/src/build/custom/parse.rs6
-rw-r--r--compiler/rustc_mir_build/src/build/custom/parse/instruction.rs31
-rw-r--r--compiler/rustc_mir_build/src/build/expr/as_constant.rs8
-rw-r--r--compiler/rustc_mir_build/src/build/expr/as_place.rs17
-rw-r--r--compiler/rustc_mir_build/src/build/expr/as_rvalue.rs21
-rw-r--r--compiler/rustc_mir_build/src/build/expr/into.rs6
-rw-r--r--compiler/rustc_mir_build/src/build/matches/mod.rs16
-rw-r--r--compiler/rustc_mir_build/src/build/matches/simplify.rs4
-rw-r--r--compiler/rustc_mir_build/src/build/matches/test.rs18
-rw-r--r--compiler/rustc_mir_build/src/build/mod.rs45
-rw-r--r--compiler/rustc_mir_build/src/build/scope.rs73
-rw-r--r--compiler/rustc_mir_build/src/check_unsafety.rs129
-rw-r--r--compiler/rustc_mir_build/src/errors.rs59
-rw-r--r--compiler/rustc_mir_build/src/lib.rs2
-rw-r--r--compiler/rustc_mir_build/src/lints.rs139
-rw-r--r--compiler/rustc_mir_build/src/thir/cx/expr.rs106
-rw-r--r--compiler/rustc_mir_build/src/thir/cx/mod.rs16
-rw-r--r--compiler/rustc_mir_build/src/thir/pattern/check_match.rs41
-rw-r--r--compiler/rustc_mir_build/src/thir/pattern/const_to_pat.rs17
-rw-r--r--compiler/rustc_mir_build/src/thir/pattern/deconstruct_pat.rs64
-rw-r--r--compiler/rustc_mir_build/src/thir/pattern/mod.rs40
-rw-r--r--compiler/rustc_mir_build/src/thir/pattern/usefulness.rs4
-rw-r--r--compiler/rustc_mir_build/src/thir/print.rs20
-rw-r--r--compiler/rustc_mir_build/src/thir/util.rs2
-rw-r--r--compiler/rustc_mir_dataflow/src/elaborate_drops.rs48
-rw-r--r--compiler/rustc_mir_dataflow/src/framework/direction.rs173
-rw-r--r--compiler/rustc_mir_dataflow/src/framework/engine.rs41
-rw-r--r--compiler/rustc_mir_dataflow/src/framework/fmt.rs32
-rw-r--r--compiler/rustc_mir_dataflow/src/framework/graphviz.rs6
-rw-r--r--compiler/rustc_mir_dataflow/src/framework/lattice.rs94
-rw-r--r--compiler/rustc_mir_dataflow/src/framework/mod.rs111
-rw-r--r--compiler/rustc_mir_dataflow/src/framework/tests.rs7
-rw-r--r--compiler/rustc_mir_dataflow/src/impls/borrowed_locals.rs65
-rw-r--r--compiler/rustc_mir_dataflow/src/impls/initialized.rs778
-rw-r--r--compiler/rustc_mir_dataflow/src/impls/liveness.rs92
-rw-r--r--compiler/rustc_mir_dataflow/src/impls/mod.rs757
-rw-r--r--compiler/rustc_mir_dataflow/src/impls/storage_liveness.rs83
-rw-r--r--compiler/rustc_mir_dataflow/src/lib.rs5
-rw-r--r--compiler/rustc_mir_dataflow/src/move_paths/builder.rs125
-rw-r--r--compiler/rustc_mir_dataflow/src/move_paths/mod.rs72
-rw-r--r--compiler/rustc_mir_dataflow/src/rustc_peek.rs6
-rw-r--r--compiler/rustc_mir_dataflow/src/un_derefer.rs100
-rw-r--r--compiler/rustc_mir_dataflow/src/value_analysis.rs73
-rw-r--r--compiler/rustc_mir_transform/Cargo.toml1
-rw-r--r--compiler/rustc_mir_transform/src/add_retag.rs2
-rw-r--r--compiler/rustc_mir_transform/src/check_unsafety.rs2
-rw-r--r--compiler/rustc_mir_transform/src/const_prop.rs229
-rw-r--r--compiler/rustc_mir_transform/src/const_prop_lint.rs14
-rw-r--r--compiler/rustc_mir_transform/src/copy_prop.rs10
-rw-r--r--compiler/rustc_mir_transform/src/coverage/counters.rs354
-rw-r--r--compiler/rustc_mir_transform/src/coverage/debug.rs115
-rw-r--r--compiler/rustc_mir_transform/src/coverage/graph.rs115
-rw-r--r--compiler/rustc_mir_transform/src/coverage/mod.rs182
-rw-r--r--compiler/rustc_mir_transform/src/coverage/query.rs57
-rw-r--r--compiler/rustc_mir_transform/src/coverage/spans.rs7
-rw-r--r--compiler/rustc_mir_transform/src/coverage/test_macros/src/lib.rs2
-rw-r--r--compiler/rustc_mir_transform/src/coverage/tests.rs19
-rw-r--r--compiler/rustc_mir_transform/src/dataflow_const_prop.rs68
-rw-r--r--compiler/rustc_mir_transform/src/dead_store_elimination.rs40
-rw-r--r--compiler/rustc_mir_transform/src/deduce_param_attrs.rs2
-rw-r--r--compiler/rustc_mir_transform/src/dest_prop.rs10
-rw-r--r--compiler/rustc_mir_transform/src/early_otherwise_branch.rs47
-rw-r--r--compiler/rustc_mir_transform/src/elaborate_box_derefs.rs9
-rw-r--r--compiler/rustc_mir_transform/src/elaborate_drops.rs89
-rw-r--r--compiler/rustc_mir_transform/src/ffi_unwind_calls.rs2
-rw-r--r--compiler/rustc_mir_transform/src/function_item_references.rs43
-rw-r--r--compiler/rustc_mir_transform/src/generator.rs75
-rw-r--r--compiler/rustc_mir_transform/src/inline.rs172
-rw-r--r--compiler/rustc_mir_transform/src/inline/cycle.rs16
-rw-r--r--compiler/rustc_mir_transform/src/instsimplify.rs48
-rw-r--r--compiler/rustc_mir_transform/src/large_enums.rs2
-rw-r--r--compiler/rustc_mir_transform/src/lib.rs52
-rw-r--r--compiler/rustc_mir_transform/src/lower_intrinsics.rs19
-rw-r--r--compiler/rustc_mir_transform/src/match_branches.rs2
-rw-r--r--compiler/rustc_mir_transform/src/multiple_return_terminators.rs2
-rw-r--r--compiler/rustc_mir_transform/src/nrvo.rs2
-rw-r--r--compiler/rustc_mir_transform/src/pass_manager.rs4
-rw-r--r--compiler/rustc_mir_transform/src/ref_prop.rs50
-rw-r--r--compiler/rustc_mir_transform/src/remove_noop_landing_pads.rs16
-rw-r--r--compiler/rustc_mir_transform/src/remove_uninit_drops.rs28
-rw-r--r--compiler/rustc_mir_transform/src/remove_unneeded_drops.rs2
-rw-r--r--compiler/rustc_mir_transform/src/remove_zsts.rs4
-rw-r--r--compiler/rustc_mir_transform/src/shim.rs65
-rw-r--r--compiler/rustc_mir_transform/src/simplify.rs3
-rw-r--r--compiler/rustc_mir_transform/src/sroa.rs10
-rw-r--r--compiler/rustc_mir_transform/src/ssa.rs8
-rw-r--r--compiler/rustc_mir_transform/src/uninhabited_enum_branching.rs3
-rw-r--r--compiler/rustc_monomorphize/src/collector.rs49
-rw-r--r--compiler/rustc_monomorphize/src/partitioning.rs228
-rw-r--r--compiler/rustc_monomorphize/src/polymorphize.rs28
-rw-r--r--compiler/rustc_monomorphize/src/util.rs4
-rw-r--r--compiler/rustc_parse/messages.ftl26
-rw-r--r--compiler/rustc_parse/src/errors.rs85
-rw-r--r--compiler/rustc_parse/src/lexer/diagnostics.rs2
-rw-r--r--compiler/rustc_parse/src/lexer/mod.rs42
-rw-r--r--compiler/rustc_parse/src/lexer/tokentrees.rs2
-rw-r--r--compiler/rustc_parse/src/lexer/unescape_error_reporting.rs39
-rw-r--r--compiler/rustc_parse/src/lexer/unicode_chars.rs2
-rw-r--r--compiler/rustc_parse/src/lib.rs7
-rw-r--r--compiler/rustc_parse/src/parser/attr.rs6
-rw-r--r--compiler/rustc_parse/src/parser/attr_wrapper.rs41
-rw-r--r--compiler/rustc_parse/src/parser/diagnostics.rs124
-rw-r--r--compiler/rustc_parse/src/parser/expr.rs94
-rw-r--r--compiler/rustc_parse/src/parser/generics.rs2
-rw-r--r--compiler/rustc_parse/src/parser/item.rs193
-rw-r--r--compiler/rustc_parse/src/parser/mod.rs240
-rw-r--r--compiler/rustc_parse/src/parser/nonterminal.rs89
-rw-r--r--compiler/rustc_parse/src/parser/pat.rs132
-rw-r--r--compiler/rustc_parse/src/parser/path.rs2
-rw-r--r--compiler/rustc_parse/src/parser/stmt.rs12
-rw-r--r--compiler/rustc_parse/src/parser/ty.rs26
-rw-r--r--compiler/rustc_parse/src/validate_attr.rs25
-rw-r--r--compiler/rustc_parse_format/src/lib.rs103
-rw-r--r--compiler/rustc_parse_format/src/tests.rs15
-rw-r--r--compiler/rustc_passes/messages.ftl24
-rw-r--r--compiler/rustc_passes/src/check_attr.rs129
-rw-r--r--compiler/rustc_passes/src/check_const.rs12
-rw-r--r--compiler/rustc_passes/src/dead.rs144
-rw-r--r--compiler/rustc_passes/src/entry.rs8
-rw-r--r--compiler/rustc_passes/src/errors.rs55
-rw-r--r--compiler/rustc_passes/src/hir_id_validator.rs5
-rw-r--r--compiler/rustc_passes/src/hir_stats.rs8
-rw-r--r--compiler/rustc_passes/src/layout_test.rs5
-rw-r--r--compiler/rustc_passes/src/liveness.rs15
-rw-r--r--compiler/rustc_passes/src/loops.rs4
-rw-r--r--compiler/rustc_passes/src/naked_functions.rs4
-rw-r--r--compiler/rustc_passes/src/reachable.rs20
-rw-r--r--compiler/rustc_passes/src/stability.rs20
-rw-r--r--compiler/rustc_passes/src/weak_lang_items.rs2
-rw-r--r--compiler/rustc_privacy/src/lib.rs145
-rw-r--r--compiler/rustc_query_impl/src/lib.rs1
-rw-r--r--compiler/rustc_query_impl/src/plumbing.rs2
-rw-r--r--compiler/rustc_query_system/Cargo.toml2
-rw-r--r--compiler/rustc_query_system/messages.ftl1
-rw-r--r--compiler/rustc_query_system/src/dep_graph/graph.rs17
-rw-r--r--compiler/rustc_query_system/src/dep_graph/mod.rs2
-rw-r--r--compiler/rustc_query_system/src/error.rs2
-rw-r--r--compiler/rustc_query_system/src/query/caches.rs56
-rw-r--r--compiler/rustc_query_system/src/query/job.rs74
-rw-r--r--compiler/rustc_resolve/src/build_reduced_graph.rs18
-rw-r--r--compiler/rustc_resolve/src/check_unused.rs4
-rw-r--r--compiler/rustc_resolve/src/diagnostics.rs147
-rw-r--r--compiler/rustc_resolve/src/effective_visibilities.rs9
-rw-r--r--compiler/rustc_resolve/src/ident.rs12
-rw-r--r--compiler/rustc_resolve/src/imports.rs197
-rw-r--r--compiler/rustc_resolve/src/late.rs296
-rw-r--r--compiler/rustc_resolve/src/late/diagnostics.rs257
-rw-r--r--compiler/rustc_resolve/src/lib.rs38
-rw-r--r--compiler/rustc_resolve/src/macros.rs34
-rw-r--r--compiler/rustc_resolve/src/rustdoc.rs75
-rw-r--r--compiler/rustc_session/Cargo.toml1
-rw-r--r--compiler/rustc_session/messages.ftl7
-rw-r--r--compiler/rustc_session/src/code_stats.rs6
-rw-r--r--compiler/rustc_session/src/config.rs150
-rw-r--r--compiler/rustc_session/src/cstore.rs2
-rw-r--r--compiler/rustc_session/src/errors.rs19
-rw-r--r--compiler/rustc_session/src/lib.rs1
-rw-r--r--compiler/rustc_session/src/options.rs16
-rw-r--r--compiler/rustc_session/src/output.rs13
-rw-r--r--compiler/rustc_session/src/parse.rs35
-rw-r--r--compiler/rustc_session/src/session.rs154
-rw-r--r--compiler/rustc_session/src/utils.rs1
-rw-r--r--compiler/rustc_smir/Cargo.toml6
-rw-r--r--compiler/rustc_smir/rust-toolchain.toml2
-rw-r--r--compiler/rustc_smir/src/lib.rs15
-rw-r--r--compiler/rustc_smir/src/rustc_internal/mod.rs110
-rw-r--r--compiler/rustc_smir/src/rustc_smir/mod.rs1120
-rw-r--r--compiler/rustc_smir/src/stable_mir/mir/body.rs322
-rw-r--r--compiler/rustc_smir/src/stable_mir/mod.rs28
-rw-r--r--compiler/rustc_smir/src/stable_mir/ty.rs414
-rw-r--r--compiler/rustc_span/src/def_id.rs95
-rw-r--r--compiler/rustc_span/src/edit_distance.rs7
-rw-r--r--compiler/rustc_span/src/edition.rs6
-rw-r--r--compiler/rustc_span/src/lib.rs59
-rw-r--r--compiler/rustc_span/src/source_map.rs15
-rw-r--r--compiler/rustc_span/src/symbol.rs30
-rw-r--r--compiler/rustc_symbol_mangling/src/legacy.rs20
-rw-r--r--compiler/rustc_symbol_mangling/src/lib.rs16
-rw-r--r--compiler/rustc_symbol_mangling/src/test.rs4
-rw-r--r--compiler/rustc_symbol_mangling/src/typeid/typeid_itanium_cxx_abi.rs163
-rw-r--r--compiler/rustc_symbol_mangling/src/v0.rs51
-rw-r--r--compiler/rustc_target/Cargo.toml2
-rw-r--r--compiler/rustc_target/src/abi/call/aarch64.rs70
-rw-r--r--compiler/rustc_target/src/abi/call/csky.rs31
-rw-r--r--compiler/rustc_target/src/abi/call/m68k.rs2
-rw-r--r--compiler/rustc_target/src/abi/call/mod.rs60
-rw-r--r--compiler/rustc_target/src/abi/call/wasm.rs2
-rw-r--r--compiler/rustc_target/src/abi/call/x86.rs73
-rw-r--r--compiler/rustc_target/src/abi/call/x86_64.rs2
-rw-r--r--compiler/rustc_target/src/abi/mod.rs28
-rw-r--r--compiler/rustc_target/src/asm/csky.rs128
-rw-r--r--compiler/rustc_target/src/asm/mod.rs28
-rw-r--r--compiler/rustc_target/src/json.rs5
-rw-r--r--compiler/rustc_target/src/lib.rs2
-rw-r--r--compiler/rustc_target/src/spec/aarch64_unknown_hermit.rs14
-rw-r--r--compiler/rustc_target/src/spec/aarch64_unknown_linux_ohos.rs6
-rw-r--r--compiler/rustc_target/src/spec/aarch64_unknown_teeos.rs16
-rw-r--r--compiler/rustc_target/src/spec/abi.rs41
-rw-r--r--compiler/rustc_target/src/spec/abi/tests.rs6
-rw-r--r--compiler/rustc_target/src/spec/arm_unknown_linux_gnueabi.rs1
-rw-r--r--compiler/rustc_target/src/spec/arm_unknown_linux_gnueabihf.rs1
-rw-r--r--compiler/rustc_target/src/spec/armeb_unknown_linux_gnueabi.rs1
-rw-r--r--compiler/rustc_target/src/spec/armebv7r_none_eabi.rs4
-rw-r--r--compiler/rustc_target/src/spec/armebv7r_none_eabihf.rs4
-rw-r--r--compiler/rustc_target/src/spec/armv4t_none_eabi.rs4
-rw-r--r--compiler/rustc_target/src/spec/armv4t_unknown_linux_gnueabi.rs1
-rw-r--r--compiler/rustc_target/src/spec/armv6_unknown_freebsd.rs1
-rw-r--r--compiler/rustc_target/src/spec/armv7_unknown_linux_gnueabi.rs1
-rw-r--r--compiler/rustc_target/src/spec/armv7_unknown_linux_ohos.rs6
-rw-r--r--compiler/rustc_target/src/spec/armv7a_none_eabihf.rs2
-rw-r--r--compiler/rustc_target/src/spec/armv7r_none_eabi.rs4
-rw-r--r--compiler/rustc_target/src/spec/armv7r_none_eabihf.rs4
-rw-r--r--compiler/rustc_target/src/spec/avr_gnu_base.rs2
-rw-r--r--compiler/rustc_target/src/spec/csky_unknown_linux_gnuabiv2.rs20
-rw-r--r--compiler/rustc_target/src/spec/hermit_base.rs14
-rw-r--r--compiler/rustc_target/src/spec/linux_ohos_base.rs12
-rw-r--r--compiler/rustc_target/src/spec/loongarch64_unknown_none.rs3
-rw-r--r--compiler/rustc_target/src/spec/loongarch64_unknown_none_softfloat.rs3
-rw-r--r--compiler/rustc_target/src/spec/mipsisa32r6_unknown_linux_gnu.rs2
-rw-r--r--compiler/rustc_target/src/spec/mipsisa32r6el_unknown_linux_gnu.rs2
-rw-r--r--compiler/rustc_target/src/spec/mipsisa64r6_unknown_linux_gnuabi64.rs2
-rw-r--r--compiler/rustc_target/src/spec/mipsisa64r6el_unknown_linux_gnuabi64.rs2
-rw-r--r--compiler/rustc_target/src/spec/mod.rs75
-rw-r--r--compiler/rustc_target/src/spec/powerpc64_ibm_aix.rs2
-rw-r--r--compiler/rustc_target/src/spec/powerpc64_unknown_freebsd.rs2
-rw-r--r--compiler/rustc_target/src/spec/powerpc64_unknown_linux_gnu.rs2
-rw-r--r--compiler/rustc_target/src/spec/powerpc64_unknown_linux_musl.rs2
-rw-r--r--compiler/rustc_target/src/spec/powerpc64_unknown_openbsd.rs2
-rw-r--r--compiler/rustc_target/src/spec/powerpc64_wrs_vxworks.rs2
-rw-r--r--compiler/rustc_target/src/spec/powerpc64le_unknown_freebsd.rs2
-rw-r--r--compiler/rustc_target/src/spec/powerpc64le_unknown_linux_gnu.rs2
-rw-r--r--compiler/rustc_target/src/spec/powerpc64le_unknown_linux_musl.rs2
-rw-r--r--compiler/rustc_target/src/spec/powerpc_unknown_freebsd.rs2
-rw-r--r--compiler/rustc_target/src/spec/powerpc_unknown_linux_gnu.rs2
-rw-r--r--compiler/rustc_target/src/spec/powerpc_unknown_linux_gnuspe.rs2
-rw-r--r--compiler/rustc_target/src/spec/powerpc_unknown_linux_musl.rs2
-rw-r--r--compiler/rustc_target/src/spec/powerpc_unknown_netbsd.rs2
-rw-r--r--compiler/rustc_target/src/spec/powerpc_unknown_openbsd.rs2
-rw-r--r--compiler/rustc_target/src/spec/powerpc_wrs_vxworks.rs2
-rw-r--r--compiler/rustc_target/src/spec/powerpc_wrs_vxworks_spe.rs2
-rw-r--r--compiler/rustc_target/src/spec/riscv64_linux_android.rs19
-rw-r--r--compiler/rustc_target/src/spec/riscv64gc_unknown_hermit.rs20
-rw-r--r--compiler/rustc_target/src/spec/sparc_unknown_linux_gnu.rs2
-rw-r--r--compiler/rustc_target/src/spec/sparc_unknown_none_elf.rs27
-rw-r--r--compiler/rustc_target/src/spec/teeos_base.rs29
-rw-r--r--compiler/rustc_target/src/spec/thumb_base.rs2
-rw-r--r--compiler/rustc_target/src/spec/thumbv4t_none_eabi.rs2
-rw-r--r--compiler/rustc_target/src/spec/unikraft_linux_musl_base.rs15
-rw-r--r--compiler/rustc_target/src/spec/wasm32_wasi_preview1_threads.rs134
-rw-r--r--compiler/rustc_target/src/spec/x86_64_unikraft_linux_musl.rs19
-rw-r--r--compiler/rustc_target/src/spec/x86_64_unknown_hermit.rs20
-rw-r--r--compiler/rustc_target/src/spec/x86_64_unknown_linux_ohos.rs26
-rw-r--r--compiler/rustc_trait_selection/messages.ftl19
-rw-r--r--compiler/rustc_trait_selection/src/errors.rs72
-rw-r--r--compiler/rustc_trait_selection/src/infer.rs2
-rw-r--r--compiler/rustc_trait_selection/src/solve/alias_relate.rs48
-rw-r--r--compiler/rustc_trait_selection/src/solve/assembly/mod.rs563
-rw-r--r--compiler/rustc_trait_selection/src/solve/assembly/structural_traits.rs90
-rw-r--r--compiler/rustc_trait_selection/src/solve/canonicalize.rs119
-rw-r--r--compiler/rustc_trait_selection/src/solve/eval_ctxt.rs410
-rw-r--r--compiler/rustc_trait_selection/src/solve/eval_ctxt/canonical.rs202
-rw-r--r--compiler/rustc_trait_selection/src/solve/eval_ctxt/probe.rs1
-rw-r--r--compiler/rustc_trait_selection/src/solve/eval_ctxt/select.rs319
-rw-r--r--compiler/rustc_trait_selection/src/solve/fulfill.rs4
-rw-r--r--compiler/rustc_trait_selection/src/solve/inherent_projection.rs50
-rw-r--r--compiler/rustc_trait_selection/src/solve/inspect.rs37
-rw-r--r--compiler/rustc_trait_selection/src/solve/mod.rs82
-rw-r--r--compiler/rustc_trait_selection/src/solve/normalize.rs32
-rw-r--r--compiler/rustc_trait_selection/src/solve/opaques.rs15
-rw-r--r--compiler/rustc_trait_selection/src/solve/project_goals.rs252
-rw-r--r--compiler/rustc_trait_selection/src/solve/search_graph/cache.rs32
-rw-r--r--compiler/rustc_trait_selection/src/solve/search_graph/mod.rs445
-rw-r--r--compiler/rustc_trait_selection/src/solve/search_graph/overflow.rs120
-rw-r--r--compiler/rustc_trait_selection/src/solve/trait_goals.rs635
-rw-r--r--compiler/rustc_trait_selection/src/solve/weak_types.rs17
-rw-r--r--compiler/rustc_trait_selection/src/traits/auto_trait.rs41
-rw-r--r--compiler/rustc_trait_selection/src/traits/coherence.rs240
-rw-r--r--compiler/rustc_trait_selection/src/traits/const_evaluatable.rs2
-rw-r--r--compiler/rustc_trait_selection/src/traits/engine.rs2
-rw-r--r--compiler/rustc_trait_selection/src/traits/error_reporting/ambiguity.rs6
-rw-r--r--compiler/rustc_trait_selection/src/traits/error_reporting/mod.rs250
-rw-r--r--compiler/rustc_trait_selection/src/traits/error_reporting/on_unimplemented.rs62
-rw-r--r--compiler/rustc_trait_selection/src/traits/error_reporting/suggestions.rs528
-rw-r--r--compiler/rustc_trait_selection/src/traits/fulfill.rs73
-rw-r--r--compiler/rustc_trait_selection/src/traits/misc.rs20
-rw-r--r--compiler/rustc_trait_selection/src/traits/mod.rs64
-rw-r--r--compiler/rustc_trait_selection/src/traits/object_safety.rs35
-rw-r--r--compiler/rustc_trait_selection/src/traits/outlives_bounds.rs3
-rw-r--r--compiler/rustc_trait_selection/src/traits/project.rs319
-rw-r--r--compiler/rustc_trait_selection/src/traits/query/dropck_outlives.rs33
-rw-r--r--compiler/rustc_trait_selection/src/traits/query/evaluate_obligation.rs13
-rw-r--r--compiler/rustc_trait_selection/src/traits/query/normalize.rs53
-rw-r--r--compiler/rustc_trait_selection/src/traits/query/type_op/ascribe_user_type.rs23
-rw-r--r--compiler/rustc_trait_selection/src/traits/query/type_op/custom.rs5
-rw-r--r--compiler/rustc_trait_selection/src/traits/query/type_op/outlives.rs10
-rw-r--r--compiler/rustc_trait_selection/src/traits/select/candidate_assembly.rs134
-rw-r--r--compiler/rustc_trait_selection/src/traits/select/confirmation.rs452
-rw-r--r--compiler/rustc_trait_selection/src/traits/select/mod.rs360
-rw-r--r--compiler/rustc_trait_selection/src/traits/specialize/mod.rs90
-rw-r--r--compiler/rustc_trait_selection/src/traits/specialize/specialization_graph.rs4
-rw-r--r--compiler/rustc_trait_selection/src/traits/structural_match.rs10
-rw-r--r--compiler/rustc_trait_selection/src/traits/structural_normalize.rs4
-rw-r--r--compiler/rustc_trait_selection/src/traits/util.rs48
-rw-r--r--compiler/rustc_trait_selection/src/traits/vtable.rs184
-rw-r--r--compiler/rustc_trait_selection/src/traits/wf.rs95
-rw-r--r--compiler/rustc_traits/src/dropck_outlives.rs10
-rw-r--r--compiler/rustc_traits/src/normalize_projection_ty.rs4
-rw-r--r--compiler/rustc_transmute/src/layout/tree.rs16
-rw-r--r--compiler/rustc_transmute/src/lib.rs13
-rw-r--r--compiler/rustc_ty_utils/src/abi.rs29
-rw-r--r--compiler/rustc_ty_utils/src/assoc.rs128
-rw-r--r--compiler/rustc_ty_utils/src/consts.rs15
-rw-r--r--compiler/rustc_ty_utils/src/implied_bounds.rs92
-rw-r--r--compiler/rustc_ty_utils/src/instance.rs145
-rw-r--r--compiler/rustc_ty_utils/src/layout.rs80
-rw-r--r--compiler/rustc_ty_utils/src/layout_sanity_check.rs4
-rw-r--r--compiler/rustc_ty_utils/src/lib.rs1
-rw-r--r--compiler/rustc_ty_utils/src/needs_drop.rs98
-rw-r--r--compiler/rustc_ty_utils/src/opaque_types.rs45
-rw-r--r--compiler/rustc_ty_utils/src/representability.rs16
-rw-r--r--compiler/rustc_ty_utils/src/ty.rs165
-rw-r--r--compiler/rustc_type_ir/src/lib.rs49
-rw-r--r--compiler/rustc_type_ir/src/structural_impls.rs162
-rw-r--r--compiler/rustc_type_ir/src/sty.rs168
964 files changed, 29835 insertions, 30215 deletions
diff --git a/compiler/rustc/build.rs b/compiler/rustc/build.rs
index 39cf3e094..8b7d28d2b 100644
--- a/compiler/rustc/build.rs
+++ b/compiler/rustc/build.rs
@@ -18,7 +18,7 @@ fn set_windows_exe_options() {
let mut manifest = env::current_dir().unwrap();
manifest.push(WINDOWS_MANIFEST_FILE);
- println!("cargo:rerun-if-changed={}", WINDOWS_MANIFEST_FILE);
+ println!("cargo:rerun-if-changed={WINDOWS_MANIFEST_FILE}");
// Embed the Windows application manifest file.
println!("cargo:rustc-link-arg-bin=rustc-main=/MANIFEST:EMBED");
println!("cargo:rustc-link-arg-bin=rustc-main=/MANIFESTINPUT:{}", manifest.to_str().unwrap());
diff --git a/compiler/rustc_abi/src/layout.rs b/compiler/rustc_abi/src/layout.rs
index f6875d895..a8a1a9057 100644
--- a/compiler/rustc_abi/src/layout.rs
+++ b/compiler/rustc_abi/src/layout.rs
@@ -40,6 +40,8 @@ pub trait LayoutCalculator {
largest_niche,
align,
size,
+ max_repr_align: None,
+ unadjusted_abi_align: align.abi,
}
}
@@ -122,6 +124,8 @@ pub trait LayoutCalculator {
largest_niche: None,
align: dl.i8_align,
size: Size::ZERO,
+ max_repr_align: None,
+ unadjusted_abi_align: dl.i8_align.abi,
}
}
@@ -256,8 +260,7 @@ pub trait LayoutCalculator {
}
_ => assert!(
start == Bound::Unbounded && end == Bound::Unbounded,
- "nonscalar layout for layout_scalar_valid_range type: {:#?}",
- st,
+ "nonscalar layout for layout_scalar_valid_range type: {st:#?}",
),
}
@@ -289,6 +292,9 @@ pub trait LayoutCalculator {
}
let mut align = dl.aggregate_align;
+ let mut max_repr_align = repr.align;
+ let mut unadjusted_abi_align = align.abi;
+
let mut variant_layouts = variants
.iter_enumerated()
.map(|(j, v)| {
@@ -296,6 +302,8 @@ pub trait LayoutCalculator {
st.variants = Variants::Single { index: j };
align = align.max(st.align);
+ max_repr_align = max_repr_align.max(st.max_repr_align);
+ unadjusted_abi_align = unadjusted_abi_align.max(st.unadjusted_abi_align);
Some(st)
})
@@ -422,6 +430,8 @@ pub trait LayoutCalculator {
largest_niche,
size,
align,
+ max_repr_align,
+ unadjusted_abi_align,
};
Some(TmpLayout { layout, variants: variant_layouts })
@@ -452,10 +462,13 @@ pub trait LayoutCalculator {
min = 0;
max = 0;
}
- assert!(min <= max, "discriminant range is {}...{}", min, max);
+ assert!(min <= max, "discriminant range is {min}...{max}");
let (min_ity, signed) = discr_range_of_repr(min, max); //Integer::repr_discr(tcx, ty, &repr, min, max);
let mut align = dl.aggregate_align;
+ let mut max_repr_align = repr.align;
+ let mut unadjusted_abi_align = align.abi;
+
let mut size = Size::ZERO;
// We're interested in the smallest alignment, so start large.
@@ -498,6 +511,8 @@ pub trait LayoutCalculator {
}
size = cmp::max(size, st.size);
align = align.max(st.align);
+ max_repr_align = max_repr_align.max(st.max_repr_align);
+ unadjusted_abi_align = unadjusted_abi_align.max(st.unadjusted_abi_align);
Some(st)
})
.collect::<Option<IndexVec<VariantIdx, _>>>()?;
@@ -521,8 +536,7 @@ pub trait LayoutCalculator {
// space necessary to represent would have to be discarded (or layout is wrong
// on thinking it needs 16 bits)
panic!(
- "layout decided on a larger discriminant type ({:?}) than typeck ({:?})",
- min_ity, typeck_ity
+ "layout decided on a larger discriminant type ({min_ity:?}) than typeck ({typeck_ity:?})"
);
// However, it is fine to make discr type however large (as an optimisation)
// after this point – we’ll just truncate the value we load in codegen.
@@ -691,6 +705,8 @@ pub trait LayoutCalculator {
abi,
align,
size,
+ max_repr_align,
+ unadjusted_abi_align,
};
let tagged_layout = TmpLayout { layout: tagged_layout, variants: layout_variants };
@@ -730,10 +746,7 @@ pub trait LayoutCalculator {
let dl = self.current_data_layout();
let dl = dl.borrow();
let mut align = if repr.pack.is_some() { dl.i8_align } else { dl.aggregate_align };
-
- if let Some(repr_align) = repr.align {
- align = align.max(AbiAndPrefAlign::new(repr_align));
- }
+ let mut max_repr_align = repr.align;
// If all the non-ZST fields have the same ABI and union ABI optimizations aren't
// disabled, we can use that common ABI for the union as a whole.
@@ -748,9 +761,12 @@ pub trait LayoutCalculator {
let mut size = Size::ZERO;
let only_variant = &variants[FIRST_VARIANT];
for field in only_variant {
- assert!(field.0.is_sized());
+ if field.0.is_unsized() {
+ self.delay_bug("unsized field in union".to_string());
+ }
align = align.max(field.align());
+ max_repr_align = max_repr_align.max(field.max_repr_align());
size = cmp::max(size, field.size());
if field.0.is_zst() {
@@ -787,6 +803,14 @@ pub trait LayoutCalculator {
if let Some(pack) = repr.pack {
align = align.min(AbiAndPrefAlign::new(pack));
}
+ // The unadjusted ABI alignment does not include repr(align), but does include repr(pack).
+ // See documentation on `LayoutS::unadjusted_abi_align`.
+ let unadjusted_abi_align = align.abi;
+ if let Some(repr_align) = repr.align {
+ align = align.max(AbiAndPrefAlign::new(repr_align));
+ }
+ // `align` must not be modified after this, or `unadjusted_abi_align` could be inaccurate.
+ let align = align;
// If all non-ZST fields have the same ABI, we may forward that ABI
// for the union as a whole, unless otherwise inhibited.
@@ -809,6 +833,8 @@ pub trait LayoutCalculator {
largest_niche: None,
align,
size: size.align_to(align.abi),
+ max_repr_align,
+ unadjusted_abi_align,
})
}
}
@@ -829,6 +855,7 @@ fn univariant(
) -> Option<LayoutS> {
let pack = repr.pack;
let mut align = if pack.is_some() { dl.i8_align } else { dl.aggregate_align };
+ let mut max_repr_align = repr.align;
let mut inverse_memory_index: IndexVec<u32, FieldIdx> = fields.indices().collect();
let optimize = !repr.inhibit_struct_field_reordering_opt();
if optimize && fields.len() > 1 {
@@ -997,6 +1024,7 @@ fn univariant(
};
offset = offset.align_to(field_align.abi);
align = align.max(field_align);
+ max_repr_align = max_repr_align.max(field.max_repr_align());
debug!("univariant offset: {:?} field: {:#?}", offset, field);
offsets[i] = offset;
@@ -1018,9 +1046,16 @@ fn univariant(
offset = offset.checked_add(field.size(), dl)?;
}
+
+ // The unadjusted ABI alignment does not include repr(align), but does include repr(pack).
+ // See documentation on `LayoutS::unadjusted_abi_align`.
+ let unadjusted_abi_align = align.abi;
if let Some(repr_align) = repr.align {
align = align.max(AbiAndPrefAlign::new(repr_align));
}
+ // `align` must not be modified after this point, or `unadjusted_abi_align` could be inaccurate.
+ let align = align;
+
debug!("univariant min_size: {:?}", offset);
let min_size = offset;
// As stated above, inverse_memory_index holds field indices by increasing offset.
@@ -1036,6 +1071,7 @@ fn univariant(
inverse_memory_index.into_iter().map(FieldIdx::as_u32).collect()
};
let size = min_size.align_to(align.abi);
+ let mut layout_of_single_non_zst_field = None;
let mut abi = Abi::Aggregate { sized };
// Unpack newtype ABIs and find scalar pairs.
if sized && size.bytes() > 0 {
@@ -1045,6 +1081,8 @@ fn univariant(
match (non_zst_fields.next(), non_zst_fields.next(), non_zst_fields.next()) {
// We have exactly one non-ZST field.
(Some((i, field)), None, None) => {
+ layout_of_single_non_zst_field = Some(field);
+
// Field fills the struct and it has a scalar or scalar pair ABI.
if offsets[i].bytes() == 0 && align.abi == field.align().abi && size == field.size()
{
@@ -1102,6 +1140,19 @@ fn univariant(
if fields.iter().any(|f| f.abi().is_uninhabited()) {
abi = Abi::Uninhabited;
}
+
+ let unadjusted_abi_align = if repr.transparent() {
+ match layout_of_single_non_zst_field {
+ Some(l) => l.unadjusted_abi_align(),
+ None => {
+ // `repr(transparent)` with all ZST fields.
+ align.abi
+ }
+ }
+ } else {
+ unadjusted_abi_align
+ };
+
Some(LayoutS {
variants: Variants::Single { index: FIRST_VARIANT },
fields: FieldsShape::Arbitrary { offsets, memory_index },
@@ -1109,6 +1160,8 @@ fn univariant(
largest_niche,
align,
size,
+ max_repr_align,
+ unadjusted_abi_align,
})
}
diff --git a/compiler/rustc_abi/src/lib.rs b/compiler/rustc_abi/src/lib.rs
index e1b9987f5..12dd1542d 100644
--- a/compiler/rustc_abi/src/lib.rs
+++ b/compiler/rustc_abi/src/lib.rs
@@ -1,4 +1,5 @@
#![cfg_attr(feature = "nightly", feature(step_trait, rustc_attrs, min_specialization))]
+#![cfg_attr(all(not(bootstrap), feature = "nightly"), allow(internal_features))]
use std::fmt;
#[cfg(feature = "nightly")]
@@ -332,7 +333,7 @@ impl TargetDataLayout {
16 => 1 << 15,
32 => 1 << 31,
64 => 1 << 47,
- bits => panic!("obj_size_bound: unknown pointer bit size {}", bits),
+ bits => panic!("obj_size_bound: unknown pointer bit size {bits}"),
}
}
@@ -342,7 +343,7 @@ impl TargetDataLayout {
16 => I16,
32 => I32,
64 => I64,
- bits => panic!("ptr_sized_integer: unknown pointer bit size {}", bits),
+ bits => panic!("ptr_sized_integer: unknown pointer bit size {bits}"),
}
}
@@ -399,7 +400,7 @@ impl FromStr for Endian {
match s {
"little" => Ok(Self::Little),
"big" => Ok(Self::Big),
- _ => Err(format!(r#"unknown endian: "{}""#, s)),
+ _ => Err(format!(r#"unknown endian: "{s}""#)),
}
}
}
@@ -456,7 +457,7 @@ impl Size {
pub fn bits(self) -> u64 {
#[cold]
fn overflow(bytes: u64) -> ! {
- panic!("Size::bits: {} bytes in bits doesn't fit in u64", bytes)
+ panic!("Size::bits: {bytes} bytes in bits doesn't fit in u64")
}
self.bytes().checked_mul(8).unwrap_or_else(|| overflow(self.bytes()))
@@ -1179,17 +1180,12 @@ impl FieldsShape {
unreachable!("FieldsShape::offset: `Primitive`s have no fields")
}
FieldsShape::Union(count) => {
- assert!(
- i < count.get(),
- "tried to access field {} of union with {} fields",
- i,
- count
- );
+ assert!(i < count.get(), "tried to access field {i} of union with {count} fields");
Size::ZERO
}
FieldsShape::Array { stride, count } => {
let i = u64::try_from(i).unwrap();
- assert!(i < count);
+ assert!(i < count, "tried to access field {i} of array with {count} fields");
stride * i
}
FieldsShape::Arbitrary { ref offsets, .. } => offsets[FieldIdx::from_usize(i)],
@@ -1294,7 +1290,7 @@ impl Abi {
Primitive::Int(_, signed) => signed,
_ => false,
},
- _ => panic!("`is_signed` on non-scalar ABI {:?}", self),
+ _ => panic!("`is_signed` on non-scalar ABI {self:?}"),
}
}
@@ -1345,7 +1341,6 @@ impl Abi {
/// Discard validity range information and allow undef.
pub fn to_union(&self) -> Self {
- assert!(self.is_sized());
match *self {
Abi::Scalar(s) => Abi::Scalar(s.to_union()),
Abi::ScalarPair(s1, s2) => Abi::ScalarPair(s1.to_union(), s2.to_union()),
@@ -1531,6 +1526,16 @@ pub struct LayoutS {
pub align: AbiAndPrefAlign,
pub size: Size,
+
+ /// The largest alignment explicitly requested with `repr(align)` on this type or any field.
+ /// Only used on i686-windows, where the argument passing ABI is different when alignment is
+ /// requested, even if the requested alignment is equal to the natural alignment.
+ pub max_repr_align: Option<Align>,
+
+ /// The alignment the type would have, ignoring any `repr(align)` but including `repr(packed)`.
+ /// Only used on aarch64-linux, where the argument passing ABI ignores the requested alignment
+ /// in some cases.
+ pub unadjusted_abi_align: Align,
}
impl LayoutS {
@@ -1545,6 +1550,8 @@ impl LayoutS {
largest_niche,
size,
align,
+ max_repr_align: None,
+ unadjusted_abi_align: align.abi,
}
}
}
@@ -1554,7 +1561,16 @@ impl fmt::Debug for LayoutS {
// This is how `Layout` used to print before it become
// `Interned<LayoutS>`. We print it like this to avoid having to update
// expected output in a lot of tests.
- let LayoutS { size, align, abi, fields, largest_niche, variants } = self;
+ let LayoutS {
+ size,
+ align,
+ abi,
+ fields,
+ largest_niche,
+ variants,
+ max_repr_align,
+ unadjusted_abi_align,
+ } = self;
f.debug_struct("Layout")
.field("size", size)
.field("align", align)
@@ -1562,6 +1578,8 @@ impl fmt::Debug for LayoutS {
.field("fields", fields)
.field("largest_niche", largest_niche)
.field("variants", variants)
+ .field("max_repr_align", max_repr_align)
+ .field("unadjusted_abi_align", unadjusted_abi_align)
.finish()
}
}
@@ -1602,6 +1620,14 @@ impl<'a> Layout<'a> {
self.0.0.size
}
+ pub fn max_repr_align(self) -> Option<Align> {
+ self.0.0.max_repr_align
+ }
+
+ pub fn unadjusted_abi_align(self) -> Align {
+ self.0.0.unadjusted_abi_align
+ }
+
/// Whether the layout is from a type that implements [`std::marker::PointerLike`].
///
/// Currently, that means that the type is pointer-sized, pointer-aligned,
diff --git a/compiler/rustc_apfloat/Cargo.toml b/compiler/rustc_apfloat/Cargo.toml
deleted file mode 100644
index 98305201b..000000000
--- a/compiler/rustc_apfloat/Cargo.toml
+++ /dev/null
@@ -1,8 +0,0 @@
-[package]
-name = "rustc_apfloat"
-version = "0.0.0"
-edition = "2021"
-
-[dependencies]
-bitflags = "1.2.1"
-smallvec = { version = "1.8.1", features = ["union", "may_dangle"] }
diff --git a/compiler/rustc_apfloat/src/ieee.rs b/compiler/rustc_apfloat/src/ieee.rs
deleted file mode 100644
index 2286712f0..000000000
--- a/compiler/rustc_apfloat/src/ieee.rs
+++ /dev/null
@@ -1,2757 +0,0 @@
-use crate::{Category, ExpInt, IEK_INF, IEK_NAN, IEK_ZERO};
-use crate::{Float, FloatConvert, ParseError, Round, Status, StatusAnd};
-
-use core::cmp::{self, Ordering};
-use core::fmt::{self, Write};
-use core::marker::PhantomData;
-use core::mem;
-use core::ops::Neg;
-use smallvec::{smallvec, SmallVec};
-
-#[must_use]
-pub struct IeeeFloat<S> {
- /// Absolute significand value (including the integer bit).
- sig: [Limb; 1],
-
- /// The signed unbiased exponent of the value.
- exp: ExpInt,
-
- /// What kind of floating point number this is.
- category: Category,
-
- /// Sign bit of the number.
- sign: bool,
-
- marker: PhantomData<S>,
-}
-
-/// Fundamental unit of big integer arithmetic, but also
-/// large to store the largest significands by itself.
-type Limb = u128;
-const LIMB_BITS: usize = 128;
-fn limbs_for_bits(bits: usize) -> usize {
- (bits + LIMB_BITS - 1) / LIMB_BITS
-}
-
-/// Enum that represents what fraction of the LSB truncated bits of an fp number
-/// represent.
-///
-/// This essentially combines the roles of guard and sticky bits.
-#[must_use]
-#[derive(Copy, Clone, PartialEq, Eq, Debug)]
-enum Loss {
- // Example of truncated bits:
- ExactlyZero, // 000000
- LessThanHalf, // 0xxxxx x's not all zero
- ExactlyHalf, // 100000
- MoreThanHalf, // 1xxxxx x's not all zero
-}
-
-/// Represents floating point arithmetic semantics.
-pub trait Semantics: Sized {
- /// Total number of bits in the in-memory format.
- const BITS: usize;
-
- /// Number of bits in the significand. This includes the integer bit.
- const PRECISION: usize;
-
- /// The largest E such that 2<sup>E</sup> is representable; this matches the
- /// definition of IEEE 754.
- const MAX_EXP: ExpInt;
-
- /// The smallest E such that 2<sup>E</sup> is a normalized number; this
- /// matches the definition of IEEE 754.
- const MIN_EXP: ExpInt = -Self::MAX_EXP + 1;
-
- /// The significand bit that marks NaN as quiet.
- const QNAN_BIT: usize = Self::PRECISION - 2;
-
- /// The significand bitpattern to mark a NaN as quiet.
- /// NOTE: for X87DoubleExtended we need to set two bits instead of 2.
- const QNAN_SIGNIFICAND: Limb = 1 << Self::QNAN_BIT;
-
- fn from_bits(bits: u128) -> IeeeFloat<Self> {
- assert!(Self::BITS > Self::PRECISION);
-
- let sign = bits & (1 << (Self::BITS - 1));
- let exponent = (bits & !sign) >> (Self::PRECISION - 1);
- let mut r = IeeeFloat {
- sig: [bits & ((1 << (Self::PRECISION - 1)) - 1)],
- // Convert the exponent from its bias representation to a signed integer.
- exp: (exponent as ExpInt) - Self::MAX_EXP,
- category: Category::Zero,
- sign: sign != 0,
- marker: PhantomData,
- };
-
- if r.exp == Self::MIN_EXP - 1 && r.sig == [0] {
- // Exponent, significand meaningless.
- r.category = Category::Zero;
- } else if r.exp == Self::MAX_EXP + 1 && r.sig == [0] {
- // Exponent, significand meaningless.
- r.category = Category::Infinity;
- } else if r.exp == Self::MAX_EXP + 1 && r.sig != [0] {
- // Sign, exponent, significand meaningless.
- r.category = Category::NaN;
- } else {
- r.category = Category::Normal;
- if r.exp == Self::MIN_EXP - 1 {
- // Denormal.
- r.exp = Self::MIN_EXP;
- } else {
- // Set integer bit.
- sig::set_bit(&mut r.sig, Self::PRECISION - 1);
- }
- }
-
- r
- }
-
- fn to_bits(x: IeeeFloat<Self>) -> u128 {
- assert!(Self::BITS > Self::PRECISION);
-
- // Split integer bit from significand.
- let integer_bit = sig::get_bit(&x.sig, Self::PRECISION - 1);
- let mut significand = x.sig[0] & ((1 << (Self::PRECISION - 1)) - 1);
- let exponent = match x.category {
- Category::Normal => {
- if x.exp == Self::MIN_EXP && !integer_bit {
- // Denormal.
- Self::MIN_EXP - 1
- } else {
- x.exp
- }
- }
- Category::Zero => {
- // FIXME(eddyb) Maybe we should guarantee an invariant instead?
- significand = 0;
- Self::MIN_EXP - 1
- }
- Category::Infinity => {
- // FIXME(eddyb) Maybe we should guarantee an invariant instead?
- significand = 0;
- Self::MAX_EXP + 1
- }
- Category::NaN => Self::MAX_EXP + 1,
- };
-
- // Convert the exponent from a signed integer to its bias representation.
- let exponent = (exponent + Self::MAX_EXP) as u128;
-
- ((x.sign as u128) << (Self::BITS - 1)) | (exponent << (Self::PRECISION - 1)) | significand
- }
-}
-
-impl<S> Copy for IeeeFloat<S> {}
-impl<S> Clone for IeeeFloat<S> {
- fn clone(&self) -> Self {
- *self
- }
-}
-
-macro_rules! ieee_semantics {
- ($($name:ident = $sem:ident($bits:tt : $exp_bits:tt)),*) => {
- $(pub struct $sem;)*
- $(pub type $name = IeeeFloat<$sem>;)*
- $(impl Semantics for $sem {
- const BITS: usize = $bits;
- const PRECISION: usize = ($bits - 1 - $exp_bits) + 1;
- const MAX_EXP: ExpInt = (1 << ($exp_bits - 1)) - 1;
- })*
- }
-}
-
-ieee_semantics! {
- Half = HalfS(16:5),
- Single = SingleS(32:8),
- Double = DoubleS(64:11),
- Quad = QuadS(128:15)
-}
-
-pub struct X87DoubleExtendedS;
-pub type X87DoubleExtended = IeeeFloat<X87DoubleExtendedS>;
-impl Semantics for X87DoubleExtendedS {
- const BITS: usize = 80;
- const PRECISION: usize = 64;
- const MAX_EXP: ExpInt = (1 << (15 - 1)) - 1;
-
- /// For x87 extended precision, we want to make a NaN, not a
- /// pseudo-NaN. Maybe we should expose the ability to make
- /// pseudo-NaNs?
- const QNAN_SIGNIFICAND: Limb = 0b11 << Self::QNAN_BIT;
-
- /// Integer bit is explicit in this format. Intel hardware (387 and later)
- /// does not support these bit patterns:
- /// exponent = all 1's, integer bit 0, significand 0 ("pseudoinfinity")
- /// exponent = all 1's, integer bit 0, significand nonzero ("pseudoNaN")
- /// exponent = 0, integer bit 1 ("pseudodenormal")
- /// exponent != 0 nor all 1's, integer bit 0 ("unnormal")
- /// At the moment, the first two are treated as NaNs, the second two as Normal.
- fn from_bits(bits: u128) -> IeeeFloat<Self> {
- let sign = bits & (1 << (Self::BITS - 1));
- let exponent = (bits & !sign) >> Self::PRECISION;
- let mut r = IeeeFloat {
- sig: [bits & ((1 << (Self::PRECISION - 1)) - 1)],
- // Convert the exponent from its bias representation to a signed integer.
- exp: (exponent as ExpInt) - Self::MAX_EXP,
- category: Category::Zero,
- sign: sign != 0,
- marker: PhantomData,
- };
-
- if r.exp == Self::MIN_EXP - 1 && r.sig == [0] {
- // Exponent, significand meaningless.
- r.category = Category::Zero;
- } else if r.exp == Self::MAX_EXP + 1 && r.sig == [1 << (Self::PRECISION - 1)] {
- // Exponent, significand meaningless.
- r.category = Category::Infinity;
- } else if r.exp == Self::MAX_EXP + 1 && r.sig != [1 << (Self::PRECISION - 1)] {
- // Sign, exponent, significand meaningless.
- r.category = Category::NaN;
- } else {
- r.category = Category::Normal;
- if r.exp == Self::MIN_EXP - 1 {
- // Denormal.
- r.exp = Self::MIN_EXP;
- }
- }
-
- r
- }
-
- fn to_bits(x: IeeeFloat<Self>) -> u128 {
- // Get integer bit from significand.
- let integer_bit = sig::get_bit(&x.sig, Self::PRECISION - 1);
- let mut significand = x.sig[0] & ((1 << Self::PRECISION) - 1);
- let exponent = match x.category {
- Category::Normal => {
- if x.exp == Self::MIN_EXP && !integer_bit {
- // Denormal.
- Self::MIN_EXP - 1
- } else {
- x.exp
- }
- }
- Category::Zero => {
- // FIXME(eddyb) Maybe we should guarantee an invariant instead?
- significand = 0;
- Self::MIN_EXP - 1
- }
- Category::Infinity => {
- // FIXME(eddyb) Maybe we should guarantee an invariant instead?
- significand = 1 << (Self::PRECISION - 1);
- Self::MAX_EXP + 1
- }
- Category::NaN => Self::MAX_EXP + 1,
- };
-
- // Convert the exponent from a signed integer to its bias representation.
- let exponent = (exponent + Self::MAX_EXP) as u128;
-
- ((x.sign as u128) << (Self::BITS - 1)) | (exponent << Self::PRECISION) | significand
- }
-}
-
-float_common_impls!(IeeeFloat<S>);
-
-impl<S: Semantics> PartialEq for IeeeFloat<S> {
- fn eq(&self, rhs: &Self) -> bool {
- self.partial_cmp(rhs) == Some(Ordering::Equal)
- }
-}
-
-impl<S: Semantics> PartialOrd for IeeeFloat<S> {
- fn partial_cmp(&self, rhs: &Self) -> Option<Ordering> {
- match (self.category, rhs.category) {
- (Category::NaN, _) | (_, Category::NaN) => None,
-
- (Category::Infinity, Category::Infinity) => Some((!self.sign).cmp(&(!rhs.sign))),
-
- (Category::Zero, Category::Zero) => Some(Ordering::Equal),
-
- (Category::Infinity, _) | (Category::Normal, Category::Zero) => {
- Some((!self.sign).cmp(&self.sign))
- }
-
- (_, Category::Infinity) | (Category::Zero, Category::Normal) => {
- Some(rhs.sign.cmp(&(!rhs.sign)))
- }
-
- (Category::Normal, Category::Normal) => {
- // Two normal numbers. Do they have the same sign?
- Some((!self.sign).cmp(&(!rhs.sign)).then_with(|| {
- // Compare absolute values; invert result if negative.
- let result = self.cmp_abs_normal(*rhs);
-
- if self.sign { result.reverse() } else { result }
- }))
- }
- }
- }
-}
-
-impl<S> Neg for IeeeFloat<S> {
- type Output = Self;
- fn neg(mut self) -> Self {
- self.sign = !self.sign;
- self
- }
-}
-
-/// Prints this value as a decimal string.
-///
-/// \param precision The maximum number of digits of
-/// precision to output. If there are fewer digits available,
-/// zero padding will not be used unless the value is
-/// integral and small enough to be expressed in
-/// precision digits. 0 means to use the natural
-/// precision of the number.
-/// \param width The maximum number of zeros to
-/// consider inserting before falling back to scientific
-/// notation. 0 means to always use scientific notation.
-///
-/// \param alternate Indicate whether to remove the trailing zero in
-/// fraction part or not. Also setting this parameter to true forces
-/// producing of output more similar to default printf behavior.
-/// Specifically the lower e is used as exponent delimiter and exponent
-/// always contains no less than two digits.
-///
-/// Number precision width Result
-/// ------ --------- ----- ------
-/// 1.01E+4 5 2 10100
-/// 1.01E+4 4 2 1.01E+4
-/// 1.01E+4 5 1 1.01E+4
-/// 1.01E-2 5 2 0.0101
-/// 1.01E-2 4 2 0.0101
-/// 1.01E-2 4 1 1.01E-2
-impl<S: Semantics> fmt::Display for IeeeFloat<S> {
- fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
- let width = f.width().unwrap_or(3);
- let alternate = f.alternate();
-
- match self.category {
- Category::Infinity => {
- if self.sign {
- return f.write_str("-Inf");
- } else {
- return f.write_str("+Inf");
- }
- }
-
- Category::NaN => return f.write_str("NaN"),
-
- Category::Zero => {
- if self.sign {
- f.write_char('-')?;
- }
-
- if width == 0 {
- if alternate {
- f.write_str("0.0")?;
- if let Some(n) = f.precision() {
- for _ in 1..n {
- f.write_char('0')?;
- }
- }
- f.write_str("e+00")?;
- } else {
- f.write_str("0.0E+0")?;
- }
- } else {
- f.write_char('0')?;
- }
- return Ok(());
- }
-
- Category::Normal => {}
- }
-
- if self.sign {
- f.write_char('-')?;
- }
-
- // We use enough digits so the number can be round-tripped back to an
- // APFloat. The formula comes from "How to Print Floating-Point Numbers
- // Accurately" by Steele and White.
- // FIXME: Using a formula based purely on the precision is conservative;
- // we can print fewer digits depending on the actual value being printed.
-
- // precision = 2 + floor(S::PRECISION / lg_2(10))
- let precision = f.precision().unwrap_or(2 + S::PRECISION * 59 / 196);
-
- // Decompose the number into an APInt and an exponent.
- let mut exp = self.exp - (S::PRECISION as ExpInt - 1);
- let mut sig = vec![self.sig[0]];
-
- // Ignore trailing binary zeros.
- let trailing_zeros = sig[0].trailing_zeros();
- let _: Loss = sig::shift_right(&mut sig, &mut exp, trailing_zeros as usize);
-
- // Change the exponent from 2^e to 10^e.
- if exp == 0 {
- // Nothing to do.
- } else if exp > 0 {
- // Just shift left.
- let shift = exp as usize;
- sig.resize(limbs_for_bits(S::PRECISION + shift), 0);
- sig::shift_left(&mut sig, &mut exp, shift);
- } else {
- // exp < 0
- let mut texp = -exp as usize;
-
- // We transform this using the identity:
- // (N)(2^-e) == (N)(5^e)(10^-e)
-
- // Multiply significand by 5^e.
- // N * 5^0101 == N * 5^(1*1) * 5^(0*2) * 5^(1*4) * 5^(0*8)
- let mut sig_scratch = vec![];
- let mut p5 = vec![];
- let mut p5_scratch = vec![];
- while texp != 0 {
- if p5.is_empty() {
- p5.push(5);
- } else {
- p5_scratch.resize(p5.len() * 2, 0);
- let _: Loss =
- sig::mul(&mut p5_scratch, &mut 0, &p5, &p5, p5.len() * 2 * LIMB_BITS);
- while p5_scratch.last() == Some(&0) {
- p5_scratch.pop();
- }
- mem::swap(&mut p5, &mut p5_scratch);
- }
- if texp & 1 != 0 {
- sig_scratch.resize(sig.len() + p5.len(), 0);
- let _: Loss = sig::mul(
- &mut sig_scratch,
- &mut 0,
- &sig,
- &p5,
- (sig.len() + p5.len()) * LIMB_BITS,
- );
- while sig_scratch.last() == Some(&0) {
- sig_scratch.pop();
- }
- mem::swap(&mut sig, &mut sig_scratch);
- }
- texp >>= 1;
- }
- }
-
- // Fill the buffer.
- let mut buffer = vec![];
-
- // Ignore digits from the significand until it is no more
- // precise than is required for the desired precision.
- // 196/59 is a very slight overestimate of lg_2(10).
- let required = (precision * 196 + 58) / 59;
- let mut discard_digits = sig::omsb(&sig).saturating_sub(required) * 59 / 196;
- let mut in_trail = true;
- while !sig.is_empty() {
- // Perform short division by 10 to extract the rightmost digit.
- // rem <- sig % 10
- // sig <- sig / 10
- let mut rem = 0;
-
- // Use 64-bit division and remainder, with 32-bit chunks from sig.
- sig::each_chunk(&mut sig, 32, |chunk| {
- let chunk = chunk as u32;
- let combined = ((rem as u64) << 32) | (chunk as u64);
- rem = (combined % 10) as u8;
- (combined / 10) as u32 as Limb
- });
-
- // Reduce the significand to avoid wasting time dividing 0's.
- while sig.last() == Some(&0) {
- sig.pop();
- }
-
- let digit = rem;
-
- // Ignore digits we don't need.
- if discard_digits > 0 {
- discard_digits -= 1;
- exp += 1;
- continue;
- }
-
- // Drop trailing zeros.
- if in_trail && digit == 0 {
- exp += 1;
- } else {
- in_trail = false;
- buffer.push(b'0' + digit);
- }
- }
-
- assert!(!buffer.is_empty(), "no characters in buffer!");
-
- // Drop down to precision.
- // FIXME: don't do more precise calculations above than are required.
- if buffer.len() > precision {
- // The most significant figures are the last ones in the buffer.
- let mut first_sig = buffer.len() - precision;
-
- // Round.
- // FIXME: this probably shouldn't use 'round half up'.
-
- // Rounding down is just a truncation, except we also want to drop
- // trailing zeros from the new result.
- if buffer[first_sig - 1] < b'5' {
- while first_sig < buffer.len() && buffer[first_sig] == b'0' {
- first_sig += 1;
- }
- } else {
- // Rounding up requires a decimal add-with-carry. If we continue
- // the carry, the newly-introduced zeros will just be truncated.
- for x in &mut buffer[first_sig..] {
- if *x == b'9' {
- first_sig += 1;
- } else {
- *x += 1;
- break;
- }
- }
- }
-
- exp += first_sig as ExpInt;
- buffer.drain(..first_sig);
-
- // If we carried through, we have exactly one digit of precision.
- if buffer.is_empty() {
- buffer.push(b'1');
- }
- }
-
- let digits = buffer.len();
-
- // Check whether we should use scientific notation.
- let scientific = if width == 0 {
- true
- } else if exp >= 0 {
- // 765e3 --> 765000
- // ^^^
- // But we shouldn't make the number look more precise than it is.
- exp as usize > width || digits + exp as usize > precision
- } else {
- // Power of the most significant digit.
- let msd = exp + (digits - 1) as ExpInt;
- if msd >= 0 {
- // 765e-2 == 7.65
- false
- } else {
- // 765e-5 == 0.00765
- // ^ ^^
- -msd as usize > width
- }
- };
-
- // Scientific formatting is pretty straightforward.
- if scientific {
- exp += digits as ExpInt - 1;
-
- f.write_char(buffer[digits - 1] as char)?;
- f.write_char('.')?;
- let truncate_zero = !alternate;
- if digits == 1 && truncate_zero {
- f.write_char('0')?;
- } else {
- for &d in buffer[..digits - 1].iter().rev() {
- f.write_char(d as char)?;
- }
- }
- // Fill with zeros up to precision.
- if !truncate_zero && precision > digits - 1 {
- for _ in 0..=precision - digits {
- f.write_char('0')?;
- }
- }
- // For alternate we use lower 'e'.
- f.write_char(if alternate { 'e' } else { 'E' })?;
-
- // Exponent always at least two digits if we do not truncate zeros.
- if truncate_zero {
- write!(f, "{:+}", exp)?;
- } else {
- write!(f, "{:+03}", exp)?;
- }
-
- return Ok(());
- }
-
- // Non-scientific, positive exponents.
- if exp >= 0 {
- for &d in buffer.iter().rev() {
- f.write_char(d as char)?;
- }
- for _ in 0..exp {
- f.write_char('0')?;
- }
- return Ok(());
- }
-
- // Non-scientific, negative exponents.
- let unit_place = -exp as usize;
- if unit_place < digits {
- for &d in buffer[unit_place..].iter().rev() {
- f.write_char(d as char)?;
- }
- f.write_char('.')?;
- for &d in buffer[..unit_place].iter().rev() {
- f.write_char(d as char)?;
- }
- } else {
- f.write_str("0.")?;
- for _ in digits..unit_place {
- f.write_char('0')?;
- }
- for &d in buffer.iter().rev() {
- f.write_char(d as char)?;
- }
- }
-
- Ok(())
- }
-}
-
-impl<S: Semantics> fmt::Debug for IeeeFloat<S> {
- fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
- write!(
- f,
- "{}({:?} | {}{:?} * 2^{})",
- self,
- self.category,
- if self.sign { "-" } else { "+" },
- self.sig,
- self.exp
- )
- }
-}
-
-impl<S: Semantics> Float for IeeeFloat<S> {
- const BITS: usize = S::BITS;
- const PRECISION: usize = S::PRECISION;
- const MAX_EXP: ExpInt = S::MAX_EXP;
- const MIN_EXP: ExpInt = S::MIN_EXP;
-
- const ZERO: Self = IeeeFloat {
- sig: [0],
- exp: S::MIN_EXP - 1,
- category: Category::Zero,
- sign: false,
- marker: PhantomData,
- };
-
- const INFINITY: Self = IeeeFloat {
- sig: [0],
- exp: S::MAX_EXP + 1,
- category: Category::Infinity,
- sign: false,
- marker: PhantomData,
- };
-
- // FIXME(eddyb) remove when qnan becomes const fn.
- const NAN: Self = IeeeFloat {
- sig: [S::QNAN_SIGNIFICAND],
- exp: S::MAX_EXP + 1,
- category: Category::NaN,
- sign: false,
- marker: PhantomData,
- };
-
- fn qnan(payload: Option<u128>) -> Self {
- IeeeFloat {
- sig: [S::QNAN_SIGNIFICAND
- | payload.map_or(0, |payload| {
- // Zero out the excess bits of the significand.
- payload & ((1 << S::QNAN_BIT) - 1)
- })],
- exp: S::MAX_EXP + 1,
- category: Category::NaN,
- sign: false,
- marker: PhantomData,
- }
- }
-
- fn snan(payload: Option<u128>) -> Self {
- let mut snan = Self::qnan(payload);
-
- // We always have to clear the QNaN bit to make it an SNaN.
- sig::clear_bit(&mut snan.sig, S::QNAN_BIT);
-
- // If there are no bits set in the payload, we have to set
- // *something* to make it a NaN instead of an infinity;
- // conventionally, this is the next bit down from the QNaN bit.
- if snan.sig[0] & !S::QNAN_SIGNIFICAND == 0 {
- sig::set_bit(&mut snan.sig, S::QNAN_BIT - 1);
- }
-
- snan
- }
-
- fn largest() -> Self {
- // We want (in interchange format):
- // exponent = 1..10
- // significand = 1..1
- IeeeFloat {
- sig: [(1 << S::PRECISION) - 1],
- exp: S::MAX_EXP,
- category: Category::Normal,
- sign: false,
- marker: PhantomData,
- }
- }
-
- // We want (in interchange format):
- // exponent = 0..0
- // significand = 0..01
- const SMALLEST: Self = IeeeFloat {
- sig: [1],
- exp: S::MIN_EXP,
- category: Category::Normal,
- sign: false,
- marker: PhantomData,
- };
-
- fn smallest_normalized() -> Self {
- // We want (in interchange format):
- // exponent = 0..0
- // significand = 10..0
- IeeeFloat {
- sig: [1 << (S::PRECISION - 1)],
- exp: S::MIN_EXP,
- category: Category::Normal,
- sign: false,
- marker: PhantomData,
- }
- }
-
- fn add_r(mut self, rhs: Self, round: Round) -> StatusAnd<Self> {
- let status = match (self.category, rhs.category) {
- (Category::Infinity, Category::Infinity) => {
- // Differently signed infinities can only be validly
- // subtracted.
- if self.sign != rhs.sign {
- self = Self::NAN;
- Status::INVALID_OP
- } else {
- Status::OK
- }
- }
-
- // Sign may depend on rounding mode; handled below.
- (_, Category::Zero) | (Category::NaN, _) | (Category::Infinity, Category::Normal) => {
- Status::OK
- }
-
- (Category::Zero, _) | (_, Category::NaN | Category::Infinity) => {
- self = rhs;
- Status::OK
- }
-
- // This return code means it was not a simple case.
- (Category::Normal, Category::Normal) => {
- let loss = sig::add_or_sub(
- &mut self.sig,
- &mut self.exp,
- &mut self.sign,
- &mut [rhs.sig[0]],
- rhs.exp,
- rhs.sign,
- );
- let status;
- self = unpack!(status=, self.normalize(round, loss));
-
- // Can only be zero if we lost no fraction.
- assert!(self.category != Category::Zero || loss == Loss::ExactlyZero);
-
- status
- }
- };
-
- // If two numbers add (exactly) to zero, IEEE 754 decrees it is a
- // positive zero unless rounding to minus infinity, except that
- // adding two like-signed zeroes gives that zero.
- if self.category == Category::Zero
- && (rhs.category != Category::Zero || self.sign != rhs.sign)
- {
- self.sign = round == Round::TowardNegative;
- }
-
- status.and(self)
- }
-
- fn mul_r(mut self, rhs: Self, round: Round) -> StatusAnd<Self> {
- self.sign ^= rhs.sign;
-
- match (self.category, rhs.category) {
- (Category::NaN, _) => {
- self.sign = false;
- Status::OK.and(self)
- }
-
- (_, Category::NaN) => {
- self.sign = false;
- self.category = Category::NaN;
- self.sig = rhs.sig;
- Status::OK.and(self)
- }
-
- (Category::Zero, Category::Infinity) | (Category::Infinity, Category::Zero) => {
- Status::INVALID_OP.and(Self::NAN)
- }
-
- (_, Category::Infinity) | (Category::Infinity, _) => {
- self.category = Category::Infinity;
- Status::OK.and(self)
- }
-
- (Category::Zero, _) | (_, Category::Zero) => {
- self.category = Category::Zero;
- Status::OK.and(self)
- }
-
- (Category::Normal, Category::Normal) => {
- self.exp += rhs.exp;
- let mut wide_sig = [0; 2];
- let loss =
- sig::mul(&mut wide_sig, &mut self.exp, &self.sig, &rhs.sig, S::PRECISION);
- self.sig = [wide_sig[0]];
- let mut status;
- self = unpack!(status=, self.normalize(round, loss));
- if loss != Loss::ExactlyZero {
- status |= Status::INEXACT;
- }
- status.and(self)
- }
- }
- }
-
- fn mul_add_r(mut self, multiplicand: Self, addend: Self, round: Round) -> StatusAnd<Self> {
- // If and only if all arguments are normal do we need to do an
- // extended-precision calculation.
- if !self.is_finite_non_zero() || !multiplicand.is_finite_non_zero() || !addend.is_finite() {
- let mut status;
- self = unpack!(status=, self.mul_r(multiplicand, round));
-
- // FS can only be Status::OK or Status::INVALID_OP. There is no more work
- // to do in the latter case. The IEEE-754R standard says it is
- // implementation-defined in this case whether, if ADDEND is a
- // quiet NaN, we raise invalid op; this implementation does so.
- //
- // If we need to do the addition we can do so with normal
- // precision.
- if status == Status::OK {
- self = unpack!(status=, self.add_r(addend, round));
- }
- return status.and(self);
- }
-
- // Post-multiplication sign, before addition.
- self.sign ^= multiplicand.sign;
-
- // Allocate space for twice as many bits as the original significand, plus one
- // extra bit for the addition to overflow into.
- assert!(limbs_for_bits(S::PRECISION * 2 + 1) <= 2);
- let mut wide_sig = sig::widening_mul(self.sig[0], multiplicand.sig[0]);
-
- let mut loss = Loss::ExactlyZero;
- let mut omsb = sig::omsb(&wide_sig);
- self.exp += multiplicand.exp;
-
- // Assume the operands involved in the multiplication are single-precision
- // FP, and the two multiplicants are:
- // lhs = a23 . a22 ... a0 * 2^e1
- // rhs = b23 . b22 ... b0 * 2^e2
- // the result of multiplication is:
- // lhs = c48 c47 c46 . c45 ... c0 * 2^(e1+e2)
- // Note that there are three significant bits at the left-hand side of the
- // radix point: two for the multiplication, and an overflow bit for the
- // addition (that will always be zero at this point). Move the radix point
- // toward left by two bits, and adjust exponent accordingly.
- self.exp += 2;
-
- if addend.is_non_zero() {
- // Normalize our MSB to one below the top bit to allow for overflow.
- let ext_precision = 2 * S::PRECISION + 1;
- if omsb != ext_precision - 1 {
- assert!(ext_precision > omsb);
- sig::shift_left(&mut wide_sig, &mut self.exp, (ext_precision - 1) - omsb);
- }
-
- // The intermediate result of the multiplication has "2 * S::PRECISION"
- // significant bit; adjust the addend to be consistent with mul result.
- let mut ext_addend_sig = [addend.sig[0], 0];
-
- // Extend the addend significand to ext_precision - 1. This guarantees
- // that the high bit of the significand is zero (same as wide_sig),
- // so the addition will overflow (if it does overflow at all) into the top bit.
- sig::shift_left(&mut ext_addend_sig, &mut 0, ext_precision - 1 - S::PRECISION);
- loss = sig::add_or_sub(
- &mut wide_sig,
- &mut self.exp,
- &mut self.sign,
- &mut ext_addend_sig,
- addend.exp + 1,
- addend.sign,
- );
-
- omsb = sig::omsb(&wide_sig);
- }
-
- // Convert the result having "2 * S::PRECISION" significant-bits back to the one
- // having "S::PRECISION" significant-bits. First, move the radix point from
- // position "2*S::PRECISION - 1" to "S::PRECISION - 1". The exponent need to be
- // adjusted by "2*S::PRECISION - 1" - "S::PRECISION - 1" = "S::PRECISION".
- self.exp -= S::PRECISION as ExpInt + 1;
-
- // In case MSB resides at the left-hand side of radix point, shift the
- // mantissa right by some amount to make sure the MSB reside right before
- // the radix point (i.e., "MSB . rest-significant-bits").
- if omsb > S::PRECISION {
- let bits = omsb - S::PRECISION;
- loss = sig::shift_right(&mut wide_sig, &mut self.exp, bits).combine(loss);
- }
-
- self.sig[0] = wide_sig[0];
-
- let mut status;
- self = unpack!(status=, self.normalize(round, loss));
- if loss != Loss::ExactlyZero {
- status |= Status::INEXACT;
- }
-
- // If two numbers add (exactly) to zero, IEEE 754 decrees it is a
- // positive zero unless rounding to minus infinity, except that
- // adding two like-signed zeroes gives that zero.
- if self.category == Category::Zero
- && !status.intersects(Status::UNDERFLOW)
- && self.sign != addend.sign
- {
- self.sign = round == Round::TowardNegative;
- }
-
- status.and(self)
- }
-
- fn div_r(mut self, rhs: Self, round: Round) -> StatusAnd<Self> {
- self.sign ^= rhs.sign;
-
- match (self.category, rhs.category) {
- (Category::NaN, _) => {
- self.sign = false;
- Status::OK.and(self)
- }
-
- (_, Category::NaN) => {
- self.category = Category::NaN;
- self.sig = rhs.sig;
- self.sign = false;
- Status::OK.and(self)
- }
-
- (Category::Infinity, Category::Infinity) | (Category::Zero, Category::Zero) => {
- Status::INVALID_OP.and(Self::NAN)
- }
-
- (Category::Infinity | Category::Zero, _) => Status::OK.and(self),
-
- (Category::Normal, Category::Infinity) => {
- self.category = Category::Zero;
- Status::OK.and(self)
- }
-
- (Category::Normal, Category::Zero) => {
- self.category = Category::Infinity;
- Status::DIV_BY_ZERO.and(self)
- }
-
- (Category::Normal, Category::Normal) => {
- self.exp -= rhs.exp;
- let dividend = self.sig[0];
- let loss = sig::div(
- &mut self.sig,
- &mut self.exp,
- &mut [dividend],
- &mut [rhs.sig[0]],
- S::PRECISION,
- );
- let mut status;
- self = unpack!(status=, self.normalize(round, loss));
- if loss != Loss::ExactlyZero {
- status |= Status::INEXACT;
- }
- status.and(self)
- }
- }
- }
-
- fn c_fmod(mut self, rhs: Self) -> StatusAnd<Self> {
- match (self.category, rhs.category) {
- (Category::NaN, _)
- | (Category::Zero, Category::Infinity | Category::Normal)
- | (Category::Normal, Category::Infinity) => Status::OK.and(self),
-
- (_, Category::NaN) => {
- self.sign = false;
- self.category = Category::NaN;
- self.sig = rhs.sig;
- Status::OK.and(self)
- }
-
- (Category::Infinity, _) | (_, Category::Zero) => Status::INVALID_OP.and(Self::NAN),
-
- (Category::Normal, Category::Normal) => {
- while self.is_finite_non_zero()
- && rhs.is_finite_non_zero()
- && self.cmp_abs_normal(rhs) != Ordering::Less
- {
- let mut v = rhs.scalbn(self.ilogb() - rhs.ilogb());
- if self.cmp_abs_normal(v) == Ordering::Less {
- v = v.scalbn(-1);
- }
- v.sign = self.sign;
-
- let status;
- self = unpack!(status=, self - v);
- assert_eq!(status, Status::OK);
- }
- Status::OK.and(self)
- }
- }
- }
-
- fn round_to_integral(self, round: Round) -> StatusAnd<Self> {
- // If the exponent is large enough, we know that this value is already
- // integral, and the arithmetic below would potentially cause it to saturate
- // to +/-Inf. Bail out early instead.
- if self.is_finite_non_zero() && self.exp + 1 >= S::PRECISION as ExpInt {
- return Status::OK.and(self);
- }
-
- // The algorithm here is quite simple: we add 2^(p-1), where p is the
- // precision of our format, and then subtract it back off again. The choice
- // of rounding modes for the addition/subtraction determines the rounding mode
- // for our integral rounding as well.
- // NOTE: When the input value is negative, we do subtraction followed by
- // addition instead.
- assert!(S::PRECISION <= 128);
- let mut status;
- let magic_const = unpack!(status=, Self::from_u128(1 << (S::PRECISION - 1)));
- let magic_const = magic_const.copy_sign(self);
-
- if status != Status::OK {
- return status.and(self);
- }
-
- let mut r = self;
- r = unpack!(status=, r.add_r(magic_const, round));
- if status != Status::OK && status != Status::INEXACT {
- return status.and(self);
- }
-
- // Restore the input sign to handle 0.0/-0.0 cases correctly.
- r.sub_r(magic_const, round).map(|r| r.copy_sign(self))
- }
-
- fn next_up(mut self) -> StatusAnd<Self> {
- // Compute nextUp(x), handling each float category separately.
- match self.category {
- Category::Infinity => {
- if self.sign {
- // nextUp(-inf) = -largest
- Status::OK.and(-Self::largest())
- } else {
- // nextUp(+inf) = +inf
- Status::OK.and(self)
- }
- }
- Category::NaN => {
- // IEEE-754R 2008 6.2 Par 2: nextUp(sNaN) = qNaN. Set Invalid flag.
- // IEEE-754R 2008 6.2: nextUp(qNaN) = qNaN. Must be identity so we do not
- // change the payload.
- if self.is_signaling() {
- // For consistency, propagate the sign of the sNaN to the qNaN.
- Status::INVALID_OP.and(Self::NAN.copy_sign(self))
- } else {
- Status::OK.and(self)
- }
- }
- Category::Zero => {
- // nextUp(pm 0) = +smallest
- Status::OK.and(Self::SMALLEST)
- }
- Category::Normal => {
- // nextUp(-smallest) = -0
- if self.is_smallest() && self.sign {
- return Status::OK.and(-Self::ZERO);
- }
-
- // nextUp(largest) == INFINITY
- if self.is_largest() && !self.sign {
- return Status::OK.and(Self::INFINITY);
- }
-
- // Excluding the integral bit. This allows us to test for binade boundaries.
- let sig_mask = (1 << (S::PRECISION - 1)) - 1;
-
- // nextUp(normal) == normal + inc.
- if self.sign {
- // If we are negative, we need to decrement the significand.
-
- // We only cross a binade boundary that requires adjusting the exponent
- // if:
- // 1. exponent != S::MIN_EXP. This implies we are not in the
- // smallest binade or are dealing with denormals.
- // 2. Our significand excluding the integral bit is all zeros.
- let crossing_binade_boundary =
- self.exp != S::MIN_EXP && self.sig[0] & sig_mask == 0;
-
- // Decrement the significand.
- //
- // We always do this since:
- // 1. If we are dealing with a non-binade decrement, by definition we
- // just decrement the significand.
- // 2. If we are dealing with a normal -> normal binade decrement, since
- // we have an explicit integral bit the fact that all bits but the
- // integral bit are zero implies that subtracting one will yield a
- // significand with 0 integral bit and 1 in all other spots. Thus we
- // must just adjust the exponent and set the integral bit to 1.
- // 3. If we are dealing with a normal -> denormal binade decrement,
- // since we set the integral bit to 0 when we represent denormals, we
- // just decrement the significand.
- sig::decrement(&mut self.sig);
-
- if crossing_binade_boundary {
- // Our result is a normal number. Do the following:
- // 1. Set the integral bit to 1.
- // 2. Decrement the exponent.
- sig::set_bit(&mut self.sig, S::PRECISION - 1);
- self.exp -= 1;
- }
- } else {
- // If we are positive, we need to increment the significand.
-
- // We only cross a binade boundary that requires adjusting the exponent if
- // the input is not a denormal and all of said input's significand bits
- // are set. If all of said conditions are true: clear the significand, set
- // the integral bit to 1, and increment the exponent. If we have a
- // denormal always increment since moving denormals and the numbers in the
- // smallest normal binade have the same exponent in our representation.
- let crossing_binade_boundary =
- !self.is_denormal() && self.sig[0] & sig_mask == sig_mask;
-
- if crossing_binade_boundary {
- self.sig = [0];
- sig::set_bit(&mut self.sig, S::PRECISION - 1);
- assert_ne!(
- self.exp,
- S::MAX_EXP,
- "We can not increment an exponent beyond the MAX_EXP \
- allowed by the given floating point semantics."
- );
- self.exp += 1;
- } else {
- sig::increment(&mut self.sig);
- }
- }
- Status::OK.and(self)
- }
- }
- }
-
- fn from_bits(input: u128) -> Self {
- // Dispatch to semantics.
- S::from_bits(input)
- }
-
- fn from_u128_r(input: u128, round: Round) -> StatusAnd<Self> {
- IeeeFloat {
- sig: [input],
- exp: S::PRECISION as ExpInt - 1,
- category: Category::Normal,
- sign: false,
- marker: PhantomData,
- }
- .normalize(round, Loss::ExactlyZero)
- }
-
- fn from_str_r(mut s: &str, mut round: Round) -> Result<StatusAnd<Self>, ParseError> {
- if s.is_empty() {
- return Err(ParseError("Invalid string length"));
- }
-
- // Handle special cases.
- match s {
- "inf" | "INFINITY" => return Ok(Status::OK.and(Self::INFINITY)),
- "-inf" | "-INFINITY" => return Ok(Status::OK.and(-Self::INFINITY)),
- "nan" | "NaN" => return Ok(Status::OK.and(Self::NAN)),
- "-nan" | "-NaN" => return Ok(Status::OK.and(-Self::NAN)),
- _ => {}
- }
-
- // Handle a leading minus sign.
- let minus = s.starts_with('-');
- if minus || s.starts_with('+') {
- s = &s[1..];
- if s.is_empty() {
- return Err(ParseError("String has no digits"));
- }
- }
-
- // Adjust the rounding mode for the absolute value below.
- if minus {
- round = -round;
- }
-
- let r = if s.starts_with("0x") || s.starts_with("0X") {
- s = &s[2..];
- if s.is_empty() {
- return Err(ParseError("Invalid string"));
- }
- Self::from_hexadecimal_string(s, round)?
- } else {
- Self::from_decimal_string(s, round)?
- };
-
- Ok(r.map(|r| if minus { -r } else { r }))
- }
-
- fn to_bits(self) -> u128 {
- // Dispatch to semantics.
- S::to_bits(self)
- }
-
- fn to_u128_r(self, width: usize, round: Round, is_exact: &mut bool) -> StatusAnd<u128> {
- // The result of trying to convert a number too large.
- let overflow = if self.sign {
- // Negative numbers cannot be represented as unsigned.
- 0
- } else {
- // Largest unsigned integer of the given width.
- !0 >> (128 - width)
- };
-
- *is_exact = false;
-
- match self.category {
- Category::NaN => Status::INVALID_OP.and(0),
-
- Category::Infinity => Status::INVALID_OP.and(overflow),
-
- Category::Zero => {
- // Negative zero can't be represented as an int.
- *is_exact = !self.sign;
- Status::OK.and(0)
- }
-
- Category::Normal => {
- let mut r = 0;
-
- // Step 1: place our absolute value, with any fraction truncated, in
- // the destination.
- let truncated_bits = if self.exp < 0 {
- // Our absolute value is less than one; truncate everything.
- // For exponent -1 the integer bit represents .5, look at that.
- // For smaller exponents leftmost truncated bit is 0.
- S::PRECISION - 1 + (-self.exp) as usize
- } else {
- // We want the most significant (exponent + 1) bits; the rest are
- // truncated.
- let bits = self.exp as usize + 1;
-
- // Hopelessly large in magnitude?
- if bits > width {
- return Status::INVALID_OP.and(overflow);
- }
-
- if bits < S::PRECISION {
- // We truncate (S::PRECISION - bits) bits.
- r = self.sig[0] >> (S::PRECISION - bits);
- S::PRECISION - bits
- } else {
- // We want at least as many bits as are available.
- r = self.sig[0] << (bits - S::PRECISION);
- 0
- }
- };
-
- // Step 2: work out any lost fraction, and increment the absolute
- // value if we would round away from zero.
- let mut loss = Loss::ExactlyZero;
- if truncated_bits > 0 {
- loss = Loss::through_truncation(&self.sig, truncated_bits);
- if loss != Loss::ExactlyZero
- && self.round_away_from_zero(round, loss, truncated_bits)
- {
- r = r.wrapping_add(1);
- if r == 0 {
- return Status::INVALID_OP.and(overflow); // Overflow.
- }
- }
- }
-
- // Step 3: check if we fit in the destination.
- if r > overflow {
- return Status::INVALID_OP.and(overflow);
- }
-
- if loss == Loss::ExactlyZero {
- *is_exact = true;
- Status::OK.and(r)
- } else {
- Status::INEXACT.and(r)
- }
- }
- }
- }
-
- fn cmp_abs_normal(self, rhs: Self) -> Ordering {
- assert!(self.is_finite_non_zero());
- assert!(rhs.is_finite_non_zero());
-
- // If exponents are equal, do an unsigned comparison of the significands.
- self.exp.cmp(&rhs.exp).then_with(|| sig::cmp(&self.sig, &rhs.sig))
- }
-
- fn bitwise_eq(self, rhs: Self) -> bool {
- if self.category != rhs.category || self.sign != rhs.sign {
- return false;
- }
-
- if self.category == Category::Zero || self.category == Category::Infinity {
- return true;
- }
-
- if self.is_finite_non_zero() && self.exp != rhs.exp {
- return false;
- }
-
- self.sig == rhs.sig
- }
-
- fn is_negative(self) -> bool {
- self.sign
- }
-
- fn is_denormal(self) -> bool {
- self.is_finite_non_zero()
- && self.exp == S::MIN_EXP
- && !sig::get_bit(&self.sig, S::PRECISION - 1)
- }
-
- fn is_signaling(self) -> bool {
- // IEEE-754R 2008 6.2.1: A signaling NaN bit string should be encoded with the
- // first bit of the trailing significand being 0.
- self.is_nan() && !sig::get_bit(&self.sig, S::QNAN_BIT)
- }
-
- fn category(self) -> Category {
- self.category
- }
-
- fn get_exact_inverse(self) -> Option<Self> {
- // Special floats and denormals have no exact inverse.
- if !self.is_finite_non_zero() {
- return None;
- }
-
- // Check that the number is a power of two by making sure that only the
- // integer bit is set in the significand.
- if self.sig != [1 << (S::PRECISION - 1)] {
- return None;
- }
-
- // Get the inverse.
- let mut reciprocal = Self::from_u128(1).value;
- let status;
- reciprocal = unpack!(status=, reciprocal / self);
- if status != Status::OK {
- return None;
- }
-
- // Avoid multiplication with a denormal, it is not safe on all platforms and
- // may be slower than a normal division.
- if reciprocal.is_denormal() {
- return None;
- }
-
- assert!(reciprocal.is_finite_non_zero());
- assert_eq!(reciprocal.sig, [1 << (S::PRECISION - 1)]);
-
- Some(reciprocal)
- }
-
- fn ilogb(mut self) -> ExpInt {
- if self.is_nan() {
- return IEK_NAN;
- }
- if self.is_zero() {
- return IEK_ZERO;
- }
- if self.is_infinite() {
- return IEK_INF;
- }
- if !self.is_denormal() {
- return self.exp;
- }
-
- let sig_bits = (S::PRECISION - 1) as ExpInt;
- self.exp += sig_bits;
- self = self.normalize(Round::NearestTiesToEven, Loss::ExactlyZero).value;
- self.exp - sig_bits
- }
-
- fn scalbn_r(mut self, exp: ExpInt, round: Round) -> Self {
- // If exp is wildly out-of-scale, simply adding it to self.exp will
- // overflow; clamp it to a safe range before adding, but ensure that the range
- // is large enough that the clamp does not change the result. The range we
- // need to support is the difference between the largest possible exponent and
- // the normalized exponent of half the smallest denormal.
-
- let sig_bits = (S::PRECISION - 1) as i32;
- let max_change = S::MAX_EXP as i32 - (S::MIN_EXP as i32 - sig_bits) + 1;
-
- // Clamp to one past the range ends to let normalize handle overflow.
- let exp_change = cmp::min(cmp::max(exp as i32, -max_change - 1), max_change);
- self.exp = self.exp.saturating_add(exp_change as ExpInt);
- self = self.normalize(round, Loss::ExactlyZero).value;
- if self.is_nan() {
- sig::set_bit(&mut self.sig, S::QNAN_BIT);
- }
- self
- }
-
- fn frexp_r(mut self, exp: &mut ExpInt, round: Round) -> Self {
- *exp = self.ilogb();
-
- // Quiet signalling nans.
- if *exp == IEK_NAN {
- sig::set_bit(&mut self.sig, S::QNAN_BIT);
- return self;
- }
-
- if *exp == IEK_INF {
- return self;
- }
-
- // 1 is added because frexp is defined to return a normalized fraction in
- // +/-[0.5, 1.0), rather than the usual +/-[1.0, 2.0).
- if *exp == IEK_ZERO {
- *exp = 0;
- } else {
- *exp += 1;
- }
- self.scalbn_r(-*exp, round)
- }
-}
-
-impl<S: Semantics, T: Semantics> FloatConvert<IeeeFloat<T>> for IeeeFloat<S> {
- fn convert_r(self, round: Round, loses_info: &mut bool) -> StatusAnd<IeeeFloat<T>> {
- let mut r = IeeeFloat {
- sig: self.sig,
- exp: self.exp,
- category: self.category,
- sign: self.sign,
- marker: PhantomData,
- };
-
- // x86 has some unusual NaNs which cannot be represented in any other
- // format; note them here.
- fn is_x87_double_extended<S: Semantics>() -> bool {
- S::QNAN_SIGNIFICAND == X87DoubleExtendedS::QNAN_SIGNIFICAND
- }
- let x87_special_nan = is_x87_double_extended::<S>()
- && !is_x87_double_extended::<T>()
- && r.category == Category::NaN
- && (r.sig[0] & S::QNAN_SIGNIFICAND) != S::QNAN_SIGNIFICAND;
-
- // If this is a truncation of a denormal number, and the target semantics
- // has larger exponent range than the source semantics (this can happen
- // when truncating from PowerPC double-double to double format), the
- // right shift could lose result mantissa bits. Adjust exponent instead
- // of performing excessive shift.
- let mut shift = T::PRECISION as ExpInt - S::PRECISION as ExpInt;
- if shift < 0 && r.is_finite_non_zero() {
- let mut exp_change = sig::omsb(&r.sig) as ExpInt - S::PRECISION as ExpInt;
- if r.exp + exp_change < T::MIN_EXP {
- exp_change = T::MIN_EXP - r.exp;
- }
- if exp_change < shift {
- exp_change = shift;
- }
- if exp_change < 0 {
- shift -= exp_change;
- r.exp += exp_change;
- }
- }
-
- // If this is a truncation, perform the shift.
- let loss = if shift < 0 && (r.is_finite_non_zero() || r.category == Category::NaN) {
- sig::shift_right(&mut r.sig, &mut 0, -shift as usize)
- } else {
- Loss::ExactlyZero
- };
-
- // If this is an extension, perform the shift.
- if shift > 0 && (r.is_finite_non_zero() || r.category == Category::NaN) {
- sig::shift_left(&mut r.sig, &mut 0, shift as usize);
- }
-
- let status;
- if r.is_finite_non_zero() {
- r = unpack!(status=, r.normalize(round, loss));
- *loses_info = status != Status::OK;
- } else if r.category == Category::NaN {
- *loses_info = loss != Loss::ExactlyZero || x87_special_nan;
-
- // For x87 extended precision, we want to make a NaN, not a special NaN if
- // the input wasn't special either.
- if !x87_special_nan && is_x87_double_extended::<T>() {
- sig::set_bit(&mut r.sig, T::PRECISION - 1);
- }
-
- // Convert of sNaN creates qNaN and raises an exception (invalid op).
- // This also guarantees that a sNaN does not become Inf on a truncation
- // that loses all payload bits.
- if self.is_signaling() {
- // Quiet signaling NaN.
- sig::set_bit(&mut r.sig, T::QNAN_BIT);
- status = Status::INVALID_OP;
- } else {
- status = Status::OK;
- }
- } else {
- *loses_info = false;
- status = Status::OK;
- }
-
- status.and(r)
- }
-}
-
-impl<S: Semantics> IeeeFloat<S> {
- /// Handle positive overflow. We either return infinity or
- /// the largest finite number. For negative overflow,
- /// negate the `round` argument before calling.
- fn overflow_result(round: Round) -> StatusAnd<Self> {
- match round {
- // Infinity?
- Round::NearestTiesToEven | Round::NearestTiesToAway | Round::TowardPositive => {
- (Status::OVERFLOW | Status::INEXACT).and(Self::INFINITY)
- }
- // Otherwise we become the largest finite number.
- Round::TowardNegative | Round::TowardZero => Status::INEXACT.and(Self::largest()),
- }
- }
-
- /// Returns `true` if, when truncating the current number, with `bit` the
- /// new LSB, with the given lost fraction and rounding mode, the result
- /// would need to be rounded away from zero (i.e., by increasing the
- /// signficand). This routine must work for `Category::Zero` of both signs, and
- /// `Category::Normal` numbers.
- fn round_away_from_zero(&self, round: Round, loss: Loss, bit: usize) -> bool {
- // NaNs and infinities should not have lost fractions.
- assert!(self.is_finite_non_zero() || self.is_zero());
-
- // Current callers never pass this so we don't handle it.
- assert_ne!(loss, Loss::ExactlyZero);
-
- match round {
- Round::NearestTiesToAway => loss == Loss::ExactlyHalf || loss == Loss::MoreThanHalf,
- Round::NearestTiesToEven => {
- if loss == Loss::MoreThanHalf {
- return true;
- }
-
- // Our zeros don't have a significand to test.
- if loss == Loss::ExactlyHalf && self.category != Category::Zero {
- return sig::get_bit(&self.sig, bit);
- }
-
- false
- }
- Round::TowardZero => false,
- Round::TowardPositive => !self.sign,
- Round::TowardNegative => self.sign,
- }
- }
-
- fn normalize(mut self, round: Round, mut loss: Loss) -> StatusAnd<Self> {
- if !self.is_finite_non_zero() {
- return Status::OK.and(self);
- }
-
- // Before rounding normalize the exponent of Category::Normal numbers.
- let mut omsb = sig::omsb(&self.sig);
-
- if omsb > 0 {
- // OMSB is numbered from 1. We want to place it in the integer
- // bit numbered PRECISION if possible, with a compensating change in
- // the exponent.
- let mut final_exp = self.exp.saturating_add(omsb as ExpInt - S::PRECISION as ExpInt);
-
- // If the resulting exponent is too high, overflow according to
- // the rounding mode.
- if final_exp > S::MAX_EXP {
- let round = if self.sign { -round } else { round };
- return Self::overflow_result(round).map(|r| r.copy_sign(self));
- }
-
- // Subnormal numbers have exponent MIN_EXP, and their MSB
- // is forced based on that.
- if final_exp < S::MIN_EXP {
- final_exp = S::MIN_EXP;
- }
-
- // Shifting left is easy as we don't lose precision.
- if final_exp < self.exp {
- assert_eq!(loss, Loss::ExactlyZero);
-
- let exp_change = (self.exp - final_exp) as usize;
- sig::shift_left(&mut self.sig, &mut self.exp, exp_change);
-
- return Status::OK.and(self);
- }
-
- // Shift right and capture any new lost fraction.
- if final_exp > self.exp {
- let exp_change = (final_exp - self.exp) as usize;
- loss = sig::shift_right(&mut self.sig, &mut self.exp, exp_change).combine(loss);
-
- // Keep OMSB up-to-date.
- omsb = omsb.saturating_sub(exp_change);
- }
- }
-
- // Now round the number according to round given the lost
- // fraction.
-
- // As specified in IEEE 754, since we do not trap we do not report
- // underflow for exact results.
- if loss == Loss::ExactlyZero {
- // Canonicalize zeros.
- if omsb == 0 {
- self.category = Category::Zero;
- }
-
- return Status::OK.and(self);
- }
-
- // Increment the significand if we're rounding away from zero.
- if self.round_away_from_zero(round, loss, 0) {
- if omsb == 0 {
- self.exp = S::MIN_EXP;
- }
-
- // We should never overflow.
- assert_eq!(sig::increment(&mut self.sig), 0);
- omsb = sig::omsb(&self.sig);
-
- // Did the significand increment overflow?
- if omsb == S::PRECISION + 1 {
- // Renormalize by incrementing the exponent and shifting our
- // significand right one. However if we already have the
- // maximum exponent we overflow to infinity.
- if self.exp == S::MAX_EXP {
- self.category = Category::Infinity;
-
- return (Status::OVERFLOW | Status::INEXACT).and(self);
- }
-
- let _: Loss = sig::shift_right(&mut self.sig, &mut self.exp, 1);
-
- return Status::INEXACT.and(self);
- }
- }
-
- // The normal case - we were and are not denormal, and any
- // significand increment above didn't overflow.
- if omsb == S::PRECISION {
- return Status::INEXACT.and(self);
- }
-
- // We have a non-zero denormal.
- assert!(omsb < S::PRECISION);
-
- // Canonicalize zeros.
- if omsb == 0 {
- self.category = Category::Zero;
- }
-
- // The Category::Zero case is a denormal that underflowed to zero.
- (Status::UNDERFLOW | Status::INEXACT).and(self)
- }
-
- fn from_hexadecimal_string(s: &str, round: Round) -> Result<StatusAnd<Self>, ParseError> {
- let mut r = IeeeFloat {
- sig: [0],
- exp: 0,
- category: Category::Normal,
- sign: false,
- marker: PhantomData,
- };
-
- let mut any_digits = false;
- let mut has_exp = false;
- let mut bit_pos = LIMB_BITS as isize;
- let mut loss = None;
-
- // Without leading or trailing zeros, irrespective of the dot.
- let mut first_sig_digit = None;
- let mut dot = s.len();
-
- for (p, c) in s.char_indices() {
- // Skip leading zeros and any (hexa)decimal point.
- if c == '.' {
- if dot != s.len() {
- return Err(ParseError("String contains multiple dots"));
- }
- dot = p;
- } else if let Some(hex_value) = c.to_digit(16) {
- any_digits = true;
-
- if first_sig_digit.is_none() {
- if hex_value == 0 {
- continue;
- }
- first_sig_digit = Some(p);
- }
-
- // Store the number while we have space.
- bit_pos -= 4;
- if bit_pos >= 0 {
- r.sig[0] |= (hex_value as Limb) << bit_pos;
- // If zero or one-half (the hexadecimal digit 8) are followed
- // by non-zero, they're a little more than zero or one-half.
- } else if let Some(ref mut loss) = loss {
- if hex_value != 0 {
- if *loss == Loss::ExactlyZero {
- *loss = Loss::LessThanHalf;
- }
- if *loss == Loss::ExactlyHalf {
- *loss = Loss::MoreThanHalf;
- }
- }
- } else {
- loss = Some(match hex_value {
- 0 => Loss::ExactlyZero,
- 1..=7 => Loss::LessThanHalf,
- 8 => Loss::ExactlyHalf,
- 9..=15 => Loss::MoreThanHalf,
- _ => unreachable!(),
- });
- }
- } else if c == 'p' || c == 'P' {
- if !any_digits {
- return Err(ParseError("Significand has no digits"));
- }
-
- if dot == s.len() {
- dot = p;
- }
-
- let mut chars = s[p + 1..].chars().peekable();
-
- // Adjust for the given exponent.
- let exp_minus = chars.peek() == Some(&'-');
- if exp_minus || chars.peek() == Some(&'+') {
- chars.next();
- }
-
- for c in chars {
- if let Some(value) = c.to_digit(10) {
- has_exp = true;
- r.exp = r.exp.saturating_mul(10).saturating_add(value as ExpInt);
- } else {
- return Err(ParseError("Invalid character in exponent"));
- }
- }
- if !has_exp {
- return Err(ParseError("Exponent has no digits"));
- }
-
- if exp_minus {
- r.exp = -r.exp;
- }
-
- break;
- } else {
- return Err(ParseError("Invalid character in significand"));
- }
- }
- if !any_digits {
- return Err(ParseError("Significand has no digits"));
- }
-
- // Hex floats require an exponent but not a hexadecimal point.
- if !has_exp {
- return Err(ParseError("Hex strings require an exponent"));
- }
-
- // Ignore the exponent if we are zero.
- let first_sig_digit = match first_sig_digit {
- Some(p) => p,
- None => return Ok(Status::OK.and(Self::ZERO)),
- };
-
- // Calculate the exponent adjustment implicit in the number of
- // significant digits and adjust for writing the significand starting
- // at the most significant nibble.
- let exp_adjustment = if dot > first_sig_digit {
- ExpInt::try_from(dot - first_sig_digit).unwrap()
- } else {
- -ExpInt::try_from(first_sig_digit - dot - 1).unwrap()
- };
- let exp_adjustment = exp_adjustment
- .saturating_mul(4)
- .saturating_sub(1)
- .saturating_add(S::PRECISION as ExpInt)
- .saturating_sub(LIMB_BITS as ExpInt);
- r.exp = r.exp.saturating_add(exp_adjustment);
-
- Ok(r.normalize(round, loss.unwrap_or(Loss::ExactlyZero)))
- }
-
- fn from_decimal_string(s: &str, round: Round) -> Result<StatusAnd<Self>, ParseError> {
- // Given a normal decimal floating point number of the form
- //
- // dddd.dddd[eE][+-]ddd
- //
- // where the decimal point and exponent are optional, fill out the
- // variables below. Exponent is appropriate if the significand is
- // treated as an integer, and normalized_exp if the significand
- // is taken to have the decimal point after a single leading
- // non-zero digit.
- //
- // If the value is zero, first_sig_digit is None.
-
- let mut any_digits = false;
- let mut dec_exp = 0i32;
-
- // Without leading or trailing zeros, irrespective of the dot.
- let mut first_sig_digit = None;
- let mut last_sig_digit = 0;
- let mut dot = s.len();
-
- for (p, c) in s.char_indices() {
- if c == '.' {
- if dot != s.len() {
- return Err(ParseError("String contains multiple dots"));
- }
- dot = p;
- } else if let Some(dec_value) = c.to_digit(10) {
- any_digits = true;
-
- if dec_value != 0 {
- if first_sig_digit.is_none() {
- first_sig_digit = Some(p);
- }
- last_sig_digit = p;
- }
- } else if c == 'e' || c == 'E' {
- if !any_digits {
- return Err(ParseError("Significand has no digits"));
- }
-
- if dot == s.len() {
- dot = p;
- }
-
- let mut chars = s[p + 1..].chars().peekable();
-
- // Adjust for the given exponent.
- let exp_minus = chars.peek() == Some(&'-');
- if exp_minus || chars.peek() == Some(&'+') {
- chars.next();
- }
-
- any_digits = false;
- for c in chars {
- if let Some(value) = c.to_digit(10) {
- any_digits = true;
- dec_exp = dec_exp.saturating_mul(10).saturating_add(value as i32);
- } else {
- return Err(ParseError("Invalid character in exponent"));
- }
- }
- if !any_digits {
- return Err(ParseError("Exponent has no digits"));
- }
-
- if exp_minus {
- dec_exp = -dec_exp;
- }
-
- break;
- } else {
- return Err(ParseError("Invalid character in significand"));
- }
- }
- if !any_digits {
- return Err(ParseError("Significand has no digits"));
- }
-
- // Test if we have a zero number allowing for non-zero exponents.
- let first_sig_digit = match first_sig_digit {
- Some(p) => p,
- None => return Ok(Status::OK.and(Self::ZERO)),
- };
-
- // Adjust the exponents for any decimal point.
- if dot > last_sig_digit {
- dec_exp = dec_exp.saturating_add((dot - last_sig_digit - 1) as i32);
- } else {
- dec_exp = dec_exp.saturating_sub((last_sig_digit - dot) as i32);
- }
- let significand_digits = last_sig_digit - first_sig_digit + 1
- - (dot > first_sig_digit && dot < last_sig_digit) as usize;
- let normalized_exp = dec_exp.saturating_add(significand_digits as i32 - 1);
-
- // Handle the cases where exponents are obviously too large or too
- // small. Writing L for log 10 / log 2, a number d.ddddd*10^dec_exp
- // definitely overflows if
- //
- // (dec_exp - 1) * L >= MAX_EXP
- //
- // and definitely underflows to zero where
- //
- // (dec_exp + 1) * L <= MIN_EXP - PRECISION
- //
- // With integer arithmetic the tightest bounds for L are
- //
- // 93/28 < L < 196/59 [ numerator <= 256 ]
- // 42039/12655 < L < 28738/8651 [ numerator <= 65536 ]
-
- // Check for MAX_EXP.
- if normalized_exp.saturating_sub(1).saturating_mul(42039) >= 12655 * S::MAX_EXP as i32 {
- // Overflow and round.
- return Ok(Self::overflow_result(round));
- }
-
- // Check for MIN_EXP.
- if normalized_exp.saturating_add(1).saturating_mul(28738)
- <= 8651 * (S::MIN_EXP as i32 - S::PRECISION as i32)
- {
- // Underflow to zero and round.
- let r =
- if round == Round::TowardPositive { IeeeFloat::SMALLEST } else { IeeeFloat::ZERO };
- return Ok((Status::UNDERFLOW | Status::INEXACT).and(r));
- }
-
- // A tight upper bound on number of bits required to hold an
- // N-digit decimal integer is N * 196 / 59. Allocate enough space
- // to hold the full significand, and an extra limb required by
- // tcMultiplyPart.
- let max_limbs = limbs_for_bits(1 + 196 * significand_digits / 59);
- let mut dec_sig: SmallVec<[Limb; 1]> = SmallVec::with_capacity(max_limbs);
-
- // Convert to binary efficiently - we do almost all multiplication
- // in a Limb. When this would overflow do we do a single
- // bignum multiplication, and then revert again to multiplication
- // in a Limb.
- let mut chars = s[first_sig_digit..=last_sig_digit].chars();
- loop {
- let mut val = 0;
- let mut multiplier = 1;
-
- loop {
- let dec_value = match chars.next() {
- Some('.') => continue,
- Some(c) => c.to_digit(10).unwrap(),
- None => break,
- };
-
- multiplier *= 10;
- val = val * 10 + dec_value as Limb;
-
- // The maximum number that can be multiplied by ten with any
- // digit added without overflowing a Limb.
- if multiplier > (!0 - 9) / 10 {
- break;
- }
- }
-
- // If we've consumed no digits, we're done.
- if multiplier == 1 {
- break;
- }
-
- // Multiply out the current limb.
- let mut carry = val;
- for x in &mut dec_sig {
- let [low, mut high] = sig::widening_mul(*x, multiplier);
-
- // Now add carry.
- let (low, overflow) = low.overflowing_add(carry);
- high += overflow as Limb;
-
- *x = low;
- carry = high;
- }
-
- // If we had carry, we need another limb (likely but not guaranteed).
- if carry > 0 {
- dec_sig.push(carry);
- }
- }
-
- // Calculate pow(5, abs(dec_exp)) into `pow5_full`.
- // The *_calc Vec's are reused scratch space, as an optimization.
- let (pow5_full, mut pow5_calc, mut sig_calc, mut sig_scratch_calc) = {
- let mut power = dec_exp.abs() as usize;
-
- const FIRST_EIGHT_POWERS: [Limb; 8] = [1, 5, 25, 125, 625, 3125, 15625, 78125];
-
- let mut p5_scratch = smallvec![];
- let mut p5: SmallVec<[Limb; 1]> = smallvec![FIRST_EIGHT_POWERS[4]];
-
- let mut r_scratch = smallvec![];
- let mut r: SmallVec<[Limb; 1]> = smallvec![FIRST_EIGHT_POWERS[power & 7]];
- power >>= 3;
-
- while power > 0 {
- // Calculate pow(5,pow(2,n+3)).
- p5_scratch.resize(p5.len() * 2, 0);
- let _: Loss = sig::mul(&mut p5_scratch, &mut 0, &p5, &p5, p5.len() * 2 * LIMB_BITS);
- while p5_scratch.last() == Some(&0) {
- p5_scratch.pop();
- }
- mem::swap(&mut p5, &mut p5_scratch);
-
- if power & 1 != 0 {
- r_scratch.resize(r.len() + p5.len(), 0);
- let _: Loss =
- sig::mul(&mut r_scratch, &mut 0, &r, &p5, (r.len() + p5.len()) * LIMB_BITS);
- while r_scratch.last() == Some(&0) {
- r_scratch.pop();
- }
- mem::swap(&mut r, &mut r_scratch);
- }
-
- power >>= 1;
- }
-
- (r, r_scratch, p5, p5_scratch)
- };
-
- // Attempt dec_sig * 10^dec_exp with increasing precision.
- let mut attempt = 0;
- loop {
- let calc_precision = (LIMB_BITS << attempt) - 1;
- attempt += 1;
-
- let calc_normal_from_limbs = |sig: &mut SmallVec<[Limb; 1]>,
- limbs: &[Limb]|
- -> StatusAnd<ExpInt> {
- sig.resize(limbs_for_bits(calc_precision), 0);
- let (mut loss, mut exp) = sig::from_limbs(sig, limbs, calc_precision);
-
- // Before rounding normalize the exponent of Category::Normal numbers.
- let mut omsb = sig::omsb(sig);
-
- assert_ne!(omsb, 0);
-
- // OMSB is numbered from 1. We want to place it in the integer
- // bit numbered PRECISION if possible, with a compensating change in
- // the exponent.
- let final_exp = exp.saturating_add(omsb as ExpInt - calc_precision as ExpInt);
-
- // Shifting left is easy as we don't lose precision.
- if final_exp < exp {
- assert_eq!(loss, Loss::ExactlyZero);
-
- let exp_change = (exp - final_exp) as usize;
- sig::shift_left(sig, &mut exp, exp_change);
-
- return Status::OK.and(exp);
- }
-
- // Shift right and capture any new lost fraction.
- if final_exp > exp {
- let exp_change = (final_exp - exp) as usize;
- loss = sig::shift_right(sig, &mut exp, exp_change).combine(loss);
-
- // Keep OMSB up-to-date.
- omsb = omsb.saturating_sub(exp_change);
- }
-
- assert_eq!(omsb, calc_precision);
-
- // Now round the number according to round given the lost
- // fraction.
-
- // As specified in IEEE 754, since we do not trap we do not report
- // underflow for exact results.
- if loss == Loss::ExactlyZero {
- return Status::OK.and(exp);
- }
-
- // Increment the significand if we're rounding away from zero.
- if loss == Loss::MoreThanHalf || loss == Loss::ExactlyHalf && sig::get_bit(sig, 0) {
- // We should never overflow.
- assert_eq!(sig::increment(sig), 0);
- omsb = sig::omsb(sig);
-
- // Did the significand increment overflow?
- if omsb == calc_precision + 1 {
- let _: Loss = sig::shift_right(sig, &mut exp, 1);
-
- return Status::INEXACT.and(exp);
- }
- }
-
- // The normal case - we were and are not denormal, and any
- // significand increment above didn't overflow.
- Status::INEXACT.and(exp)
- };
-
- let status;
- let mut exp = unpack!(status=,
- calc_normal_from_limbs(&mut sig_calc, &dec_sig));
- let pow5_status;
- let pow5_exp = unpack!(pow5_status=,
- calc_normal_from_limbs(&mut pow5_calc, &pow5_full));
-
- // Add dec_exp, as 10^n = 5^n * 2^n.
- exp += dec_exp as ExpInt;
-
- let mut used_bits = S::PRECISION;
- let mut truncated_bits = calc_precision - used_bits;
-
- let half_ulp_err1 = (status != Status::OK) as Limb;
- let (calc_loss, half_ulp_err2);
- if dec_exp >= 0 {
- exp += pow5_exp;
-
- sig_scratch_calc.resize(sig_calc.len() + pow5_calc.len(), 0);
- calc_loss = sig::mul(
- &mut sig_scratch_calc,
- &mut exp,
- &sig_calc,
- &pow5_calc,
- calc_precision,
- );
- mem::swap(&mut sig_calc, &mut sig_scratch_calc);
-
- half_ulp_err2 = (pow5_status != Status::OK) as Limb;
- } else {
- exp -= pow5_exp;
-
- sig_scratch_calc.resize(sig_calc.len(), 0);
- calc_loss = sig::div(
- &mut sig_scratch_calc,
- &mut exp,
- &mut sig_calc,
- &mut pow5_calc,
- calc_precision,
- );
- mem::swap(&mut sig_calc, &mut sig_scratch_calc);
-
- // Denormal numbers have less precision.
- if exp < S::MIN_EXP {
- truncated_bits += (S::MIN_EXP - exp) as usize;
- used_bits = calc_precision.saturating_sub(truncated_bits);
- }
- // Extra half-ulp lost in reciprocal of exponent.
- half_ulp_err2 =
- 2 * (pow5_status != Status::OK || calc_loss != Loss::ExactlyZero) as Limb;
- }
-
- // Both sig::mul and sig::div return the
- // result with the integer bit set.
- assert!(sig::get_bit(&sig_calc, calc_precision - 1));
-
- // The error from the true value, in half-ulps, on multiplying two
- // floating point numbers, which differ from the value they
- // approximate by at most half_ulp_err1 and half_ulp_err2 half-ulps, is strictly less
- // than the returned value.
- //
- // See "How to Read Floating Point Numbers Accurately" by William D Clinger.
- assert!(half_ulp_err1 < 2 || half_ulp_err2 < 2 || (half_ulp_err1 + half_ulp_err2 < 8));
-
- let inexact = (calc_loss != Loss::ExactlyZero) as Limb;
- let half_ulp_err = if half_ulp_err1 + half_ulp_err2 == 0 {
- inexact * 2 // <= inexact half-ulps.
- } else {
- inexact + 2 * (half_ulp_err1 + half_ulp_err2)
- };
-
- let ulps_from_boundary = {
- let bits = calc_precision - used_bits - 1;
-
- let i = bits / LIMB_BITS;
- let limb = sig_calc[i] & (!0 >> (LIMB_BITS - 1 - bits % LIMB_BITS));
- let boundary = match round {
- Round::NearestTiesToEven | Round::NearestTiesToAway => 1 << (bits % LIMB_BITS),
- _ => 0,
- };
- if i == 0 {
- let delta = limb.wrapping_sub(boundary);
- cmp::min(delta, delta.wrapping_neg())
- } else if limb == boundary {
- if !sig::is_all_zeros(&sig_calc[1..i]) {
- !0 // A lot.
- } else {
- sig_calc[0]
- }
- } else if limb == boundary.wrapping_sub(1) {
- if sig_calc[1..i].iter().any(|&x| x.wrapping_neg() != 1) {
- !0 // A lot.
- } else {
- sig_calc[0].wrapping_neg()
- }
- } else {
- !0 // A lot.
- }
- };
-
- // Are we guaranteed to round correctly if we truncate?
- if ulps_from_boundary.saturating_mul(2) >= half_ulp_err {
- let mut r = IeeeFloat {
- sig: [0],
- exp,
- category: Category::Normal,
- sign: false,
- marker: PhantomData,
- };
- sig::extract(&mut r.sig, &sig_calc, used_bits, calc_precision - used_bits);
- // If we extracted less bits above we must adjust our exponent
- // to compensate for the implicit right shift.
- r.exp += (S::PRECISION - used_bits) as ExpInt;
- let loss = Loss::through_truncation(&sig_calc, truncated_bits);
- return Ok(r.normalize(round, loss));
- }
- }
- }
-}
-
-impl Loss {
- /// Combine the effect of two lost fractions.
- fn combine(self, less_significant: Loss) -> Loss {
- let mut more_significant = self;
- if less_significant != Loss::ExactlyZero {
- if more_significant == Loss::ExactlyZero {
- more_significant = Loss::LessThanHalf;
- } else if more_significant == Loss::ExactlyHalf {
- more_significant = Loss::MoreThanHalf;
- }
- }
-
- more_significant
- }
-
- /// Returns the fraction lost were a bignum truncated losing the least
- /// significant `bits` bits.
- fn through_truncation(limbs: &[Limb], bits: usize) -> Loss {
- if bits == 0 {
- return Loss::ExactlyZero;
- }
-
- let half_bit = bits - 1;
- let half_limb = half_bit / LIMB_BITS;
- let (half_limb, rest) = if half_limb < limbs.len() {
- (limbs[half_limb], &limbs[..half_limb])
- } else {
- (0, limbs)
- };
- let half = 1 << (half_bit % LIMB_BITS);
- let has_half = half_limb & half != 0;
- let has_rest = half_limb & (half - 1) != 0 || !sig::is_all_zeros(rest);
-
- match (has_half, has_rest) {
- (false, false) => Loss::ExactlyZero,
- (false, true) => Loss::LessThanHalf,
- (true, false) => Loss::ExactlyHalf,
- (true, true) => Loss::MoreThanHalf,
- }
- }
-}
-
-/// Implementation details of IeeeFloat significands, such as big integer arithmetic.
-/// As a rule of thumb, no functions in this module should dynamically allocate.
-mod sig {
- use super::{limbs_for_bits, ExpInt, Limb, Loss, LIMB_BITS};
- use core::cmp::Ordering;
- use core::iter;
- use core::mem;
-
- pub(super) fn is_all_zeros(limbs: &[Limb]) -> bool {
- limbs.iter().all(|&l| l == 0)
- }
-
- /// One, not zero, based LSB. That is, returns 0 for a zeroed significand.
- pub(super) fn olsb(limbs: &[Limb]) -> usize {
- limbs
- .iter()
- .enumerate()
- .find(|(_, &limb)| limb != 0)
- .map_or(0, |(i, limb)| i * LIMB_BITS + limb.trailing_zeros() as usize + 1)
- }
-
- /// One, not zero, based MSB. That is, returns 0 for a zeroed significand.
- pub(super) fn omsb(limbs: &[Limb]) -> usize {
- limbs
- .iter()
- .enumerate()
- .rfind(|(_, &limb)| limb != 0)
- .map_or(0, |(i, limb)| (i + 1) * LIMB_BITS - limb.leading_zeros() as usize)
- }
-
- /// Comparison (unsigned) of two significands.
- pub(super) fn cmp(a: &[Limb], b: &[Limb]) -> Ordering {
- assert_eq!(a.len(), b.len());
- for (a, b) in a.iter().zip(b).rev() {
- match a.cmp(b) {
- Ordering::Equal => {}
- o => return o,
- }
- }
-
- Ordering::Equal
- }
-
- /// Extracts the given bit.
- pub(super) fn get_bit(limbs: &[Limb], bit: usize) -> bool {
- limbs[bit / LIMB_BITS] & (1 << (bit % LIMB_BITS)) != 0
- }
-
- /// Sets the given bit.
- pub(super) fn set_bit(limbs: &mut [Limb], bit: usize) {
- limbs[bit / LIMB_BITS] |= 1 << (bit % LIMB_BITS);
- }
-
- /// Clear the given bit.
- pub(super) fn clear_bit(limbs: &mut [Limb], bit: usize) {
- limbs[bit / LIMB_BITS] &= !(1 << (bit % LIMB_BITS));
- }
-
- /// Shifts `dst` left `bits` bits, subtract `bits` from its exponent.
- pub(super) fn shift_left(dst: &mut [Limb], exp: &mut ExpInt, bits: usize) {
- if bits > 0 {
- // Our exponent should not underflow.
- *exp = exp.checked_sub(bits as ExpInt).unwrap();
-
- // Jump is the inter-limb jump; shift is the intra-limb shift.
- let jump = bits / LIMB_BITS;
- let shift = bits % LIMB_BITS;
-
- for i in (0..dst.len()).rev() {
- let mut limb;
-
- if i < jump {
- limb = 0;
- } else {
- // dst[i] comes from the two limbs src[i - jump] and, if we have
- // an intra-limb shift, src[i - jump - 1].
- limb = dst[i - jump];
- if shift > 0 {
- limb <<= shift;
- if i > jump {
- limb |= dst[i - jump - 1] >> (LIMB_BITS - shift);
- }
- }
- }
-
- dst[i] = limb;
- }
- }
- }
-
- /// Shifts `dst` right `bits` bits noting lost fraction.
- pub(super) fn shift_right(dst: &mut [Limb], exp: &mut ExpInt, bits: usize) -> Loss {
- let loss = Loss::through_truncation(dst, bits);
-
- if bits > 0 {
- // Our exponent should not overflow.
- *exp = exp.checked_add(bits as ExpInt).unwrap();
-
- // Jump is the inter-limb jump; shift is the intra-limb shift.
- let jump = bits / LIMB_BITS;
- let shift = bits % LIMB_BITS;
-
- // Perform the shift. This leaves the most significant `bits` bits
- // of the result at zero.
- for i in 0..dst.len() {
- let mut limb;
-
- if i + jump >= dst.len() {
- limb = 0;
- } else {
- limb = dst[i + jump];
- if shift > 0 {
- limb >>= shift;
- if i + jump + 1 < dst.len() {
- limb |= dst[i + jump + 1] << (LIMB_BITS - shift);
- }
- }
- }
-
- dst[i] = limb;
- }
- }
-
- loss
- }
-
- /// Copies the bit vector of width `src_bits` from `src`, starting at bit SRC_LSB,
- /// to `dst`, such that the bit SRC_LSB becomes the least significant bit of `dst`.
- /// All high bits above `src_bits` in `dst` are zero-filled.
- pub(super) fn extract(dst: &mut [Limb], src: &[Limb], src_bits: usize, src_lsb: usize) {
- if src_bits == 0 {
- return;
- }
-
- let dst_limbs = limbs_for_bits(src_bits);
- assert!(dst_limbs <= dst.len());
-
- let src = &src[src_lsb / LIMB_BITS..];
- dst[..dst_limbs].copy_from_slice(&src[..dst_limbs]);
-
- let shift = src_lsb % LIMB_BITS;
- let _: Loss = shift_right(&mut dst[..dst_limbs], &mut 0, shift);
-
- // We now have (dst_limbs * LIMB_BITS - shift) bits from `src`
- // in `dst`. If this is less that src_bits, append the rest, else
- // clear the high bits.
- let n = dst_limbs * LIMB_BITS - shift;
- if n < src_bits {
- let mask = (1 << (src_bits - n)) - 1;
- dst[dst_limbs - 1] |= (src[dst_limbs] & mask) << (n % LIMB_BITS);
- } else if n > src_bits && src_bits % LIMB_BITS > 0 {
- dst[dst_limbs - 1] &= (1 << (src_bits % LIMB_BITS)) - 1;
- }
-
- // Clear high limbs.
- for x in &mut dst[dst_limbs..] {
- *x = 0;
- }
- }
-
- /// We want the most significant PRECISION bits of `src`. There may not
- /// be that many; extract what we can.
- pub(super) fn from_limbs(dst: &mut [Limb], src: &[Limb], precision: usize) -> (Loss, ExpInt) {
- let omsb = omsb(src);
-
- if precision <= omsb {
- extract(dst, src, precision, omsb - precision);
- (Loss::through_truncation(src, omsb - precision), omsb as ExpInt - 1)
- } else {
- extract(dst, src, omsb, 0);
- (Loss::ExactlyZero, precision as ExpInt - 1)
- }
- }
-
- /// For every consecutive chunk of `bits` bits from `limbs`,
- /// going from most significant to the least significant bits,
- /// call `f` to transform those bits and store the result back.
- pub(super) fn each_chunk<F: FnMut(Limb) -> Limb>(limbs: &mut [Limb], bits: usize, mut f: F) {
- assert_eq!(LIMB_BITS % bits, 0);
- for limb in limbs.iter_mut().rev() {
- let mut r = 0;
- for i in (0..LIMB_BITS / bits).rev() {
- r |= f((*limb >> (i * bits)) & ((1 << bits) - 1)) << (i * bits);
- }
- *limb = r;
- }
- }
-
- /// Increment in-place, return the carry flag.
- pub(super) fn increment(dst: &mut [Limb]) -> Limb {
- for x in dst {
- *x = x.wrapping_add(1);
- if *x != 0 {
- return 0;
- }
- }
-
- 1
- }
-
- /// Decrement in-place, return the borrow flag.
- pub(super) fn decrement(dst: &mut [Limb]) -> Limb {
- for x in dst {
- *x = x.wrapping_sub(1);
- if *x != !0 {
- return 0;
- }
- }
-
- 1
- }
-
- /// `a += b + c` where `c` is zero or one. Returns the carry flag.
- pub(super) fn add(a: &mut [Limb], b: &[Limb], mut c: Limb) -> Limb {
- assert!(c <= 1);
-
- for (a, &b) in iter::zip(a, b) {
- let (r, overflow) = a.overflowing_add(b);
- let (r, overflow2) = r.overflowing_add(c);
- *a = r;
- c = (overflow | overflow2) as Limb;
- }
-
- c
- }
-
- /// `a -= b + c` where `c` is zero or one. Returns the borrow flag.
- pub(super) fn sub(a: &mut [Limb], b: &[Limb], mut c: Limb) -> Limb {
- assert!(c <= 1);
-
- for (a, &b) in iter::zip(a, b) {
- let (r, overflow) = a.overflowing_sub(b);
- let (r, overflow2) = r.overflowing_sub(c);
- *a = r;
- c = (overflow | overflow2) as Limb;
- }
-
- c
- }
-
- /// `a += b` or `a -= b`. Does not preserve `b`.
- pub(super) fn add_or_sub(
- a_sig: &mut [Limb],
- a_exp: &mut ExpInt,
- a_sign: &mut bool,
- b_sig: &mut [Limb],
- b_exp: ExpInt,
- b_sign: bool,
- ) -> Loss {
- // Are we bigger exponent-wise than the RHS?
- let bits = *a_exp - b_exp;
-
- // Determine if the operation on the absolute values is effectively
- // an addition or subtraction.
- // Subtraction is more subtle than one might naively expect.
- if *a_sign ^ b_sign {
- let (reverse, loss);
-
- if bits == 0 {
- reverse = cmp(a_sig, b_sig) == Ordering::Less;
- loss = Loss::ExactlyZero;
- } else if bits > 0 {
- loss = shift_right(b_sig, &mut 0, (bits - 1) as usize);
- shift_left(a_sig, a_exp, 1);
- reverse = false;
- } else {
- loss = shift_right(a_sig, a_exp, (-bits - 1) as usize);
- shift_left(b_sig, &mut 0, 1);
- reverse = true;
- }
-
- let borrow = (loss != Loss::ExactlyZero) as Limb;
- if reverse {
- // The code above is intended to ensure that no borrow is necessary.
- assert_eq!(sub(b_sig, a_sig, borrow), 0);
- a_sig.copy_from_slice(b_sig);
- *a_sign = !*a_sign;
- } else {
- // The code above is intended to ensure that no borrow is necessary.
- assert_eq!(sub(a_sig, b_sig, borrow), 0);
- }
-
- // Invert the lost fraction - it was on the RHS and subtracted.
- match loss {
- Loss::LessThanHalf => Loss::MoreThanHalf,
- Loss::MoreThanHalf => Loss::LessThanHalf,
- _ => loss,
- }
- } else {
- let loss = if bits > 0 {
- shift_right(b_sig, &mut 0, bits as usize)
- } else {
- shift_right(a_sig, a_exp, -bits as usize)
- };
- // We have a guard bit; generating a carry cannot happen.
- assert_eq!(add(a_sig, b_sig, 0), 0);
- loss
- }
- }
-
- /// `[low, high] = a * b`.
- ///
- /// This cannot overflow, because
- ///
- /// `(n - 1) * (n - 1) + 2 * (n - 1) == (n - 1) * (n + 1)`
- ///
- /// which is less than n<sup>2</sup>.
- pub(super) fn widening_mul(a: Limb, b: Limb) -> [Limb; 2] {
- let mut wide = [0, 0];
-
- if a == 0 || b == 0 {
- return wide;
- }
-
- const HALF_BITS: usize = LIMB_BITS / 2;
-
- let select = |limb, i| (limb >> (i * HALF_BITS)) & ((1 << HALF_BITS) - 1);
- for i in 0..2 {
- for j in 0..2 {
- let mut x = [select(a, i) * select(b, j), 0];
- shift_left(&mut x, &mut 0, (i + j) * HALF_BITS);
- assert_eq!(add(&mut wide, &x, 0), 0);
- }
- }
-
- wide
- }
-
- /// `dst = a * b` (for normal `a` and `b`). Returns the lost fraction.
- pub(super) fn mul<'a>(
- dst: &mut [Limb],
- exp: &mut ExpInt,
- mut a: &'a [Limb],
- mut b: &'a [Limb],
- precision: usize,
- ) -> Loss {
- // Put the narrower number on the `a` for less loops below.
- if a.len() > b.len() {
- mem::swap(&mut a, &mut b);
- }
-
- for x in &mut dst[..b.len()] {
- *x = 0;
- }
-
- for i in 0..a.len() {
- let mut carry = 0;
- for j in 0..b.len() {
- let [low, mut high] = widening_mul(a[i], b[j]);
-
- // Now add carry.
- let (low, overflow) = low.overflowing_add(carry);
- high += overflow as Limb;
-
- // And now `dst[i + j]`, and store the new low part there.
- let (low, overflow) = low.overflowing_add(dst[i + j]);
- high += overflow as Limb;
-
- dst[i + j] = low;
- carry = high;
- }
- dst[i + b.len()] = carry;
- }
-
- // Assume the operands involved in the multiplication are single-precision
- // FP, and the two multiplicants are:
- // a = a23 . a22 ... a0 * 2^e1
- // b = b23 . b22 ... b0 * 2^e2
- // the result of multiplication is:
- // dst = c48 c47 c46 . c45 ... c0 * 2^(e1+e2)
- // Note that there are three significant bits at the left-hand side of the
- // radix point: two for the multiplication, and an overflow bit for the
- // addition (that will always be zero at this point). Move the radix point
- // toward left by two bits, and adjust exponent accordingly.
- *exp += 2;
-
- // Convert the result having "2 * precision" significant-bits back to the one
- // having "precision" significant-bits. First, move the radix point from
- // poision "2*precision - 1" to "precision - 1". The exponent need to be
- // adjusted by "2*precision - 1" - "precision - 1" = "precision".
- *exp -= precision as ExpInt + 1;
-
- // In case MSB resides at the left-hand side of radix point, shift the
- // mantissa right by some amount to make sure the MSB reside right before
- // the radix point (i.e., "MSB . rest-significant-bits").
- //
- // Note that the result is not normalized when "omsb < precision". So, the
- // caller needs to call IeeeFloat::normalize() if normalized value is
- // expected.
- let omsb = omsb(dst);
- if omsb <= precision { Loss::ExactlyZero } else { shift_right(dst, exp, omsb - precision) }
- }
-
- /// `quotient = dividend / divisor`. Returns the lost fraction.
- /// Does not preserve `dividend` or `divisor`.
- pub(super) fn div(
- quotient: &mut [Limb],
- exp: &mut ExpInt,
- dividend: &mut [Limb],
- divisor: &mut [Limb],
- precision: usize,
- ) -> Loss {
- // Normalize the divisor.
- let bits = precision - omsb(divisor);
- shift_left(divisor, &mut 0, bits);
- *exp += bits as ExpInt;
-
- // Normalize the dividend.
- let bits = precision - omsb(dividend);
- shift_left(dividend, exp, bits);
-
- // Division by 1.
- let olsb_divisor = olsb(divisor);
- if olsb_divisor == precision {
- quotient.copy_from_slice(dividend);
- return Loss::ExactlyZero;
- }
-
- // Ensure the dividend >= divisor initially for the loop below.
- // Incidentally, this means that the division loop below is
- // guaranteed to set the integer bit to one.
- if cmp(dividend, divisor) == Ordering::Less {
- shift_left(dividend, exp, 1);
- assert_ne!(cmp(dividend, divisor), Ordering::Less)
- }
-
- // Helper for figuring out the lost fraction.
- let lost_fraction = |dividend: &[Limb], divisor: &[Limb]| match cmp(dividend, divisor) {
- Ordering::Greater => Loss::MoreThanHalf,
- Ordering::Equal => Loss::ExactlyHalf,
- Ordering::Less => {
- if is_all_zeros(dividend) {
- Loss::ExactlyZero
- } else {
- Loss::LessThanHalf
- }
- }
- };
-
- // Try to perform a (much faster) short division for small divisors.
- let divisor_bits = precision - (olsb_divisor - 1);
- macro_rules! try_short_div {
- ($W:ty, $H:ty, $half:expr) => {
- if divisor_bits * 2 <= $half {
- // Extract the small divisor.
- let _: Loss = shift_right(divisor, &mut 0, olsb_divisor - 1);
- let divisor = divisor[0] as $H as $W;
-
- // Shift the dividend to produce a quotient with the unit bit set.
- let top_limb = *dividend.last().unwrap();
- let mut rem = (top_limb >> (LIMB_BITS - (divisor_bits - 1))) as $H;
- shift_left(dividend, &mut 0, divisor_bits - 1);
-
- // Apply short division in place on $H (of $half bits) chunks.
- each_chunk(dividend, $half, |chunk| {
- let chunk = chunk as $H;
- let combined = ((rem as $W) << $half) | (chunk as $W);
- rem = (combined % divisor) as $H;
- (combined / divisor) as $H as Limb
- });
- quotient.copy_from_slice(dividend);
-
- return lost_fraction(&[(rem as Limb) << 1], &[divisor as Limb]);
- }
- };
- }
-
- try_short_div!(u32, u16, 16);
- try_short_div!(u64, u32, 32);
- try_short_div!(u128, u64, 64);
-
- // Zero the quotient before setting bits in it.
- for x in &mut quotient[..limbs_for_bits(precision)] {
- *x = 0;
- }
-
- // Long division.
- for bit in (0..precision).rev() {
- if cmp(dividend, divisor) != Ordering::Less {
- sub(dividend, divisor, 0);
- set_bit(quotient, bit);
- }
- shift_left(dividend, &mut 0, 1);
- }
-
- lost_fraction(dividend, divisor)
- }
-}
diff --git a/compiler/rustc_apfloat/src/lib.rs b/compiler/rustc_apfloat/src/lib.rs
deleted file mode 100644
index dde368e7b..000000000
--- a/compiler/rustc_apfloat/src/lib.rs
+++ /dev/null
@@ -1,695 +0,0 @@
-//! Port of LLVM's APFloat software floating-point implementation from the
-//! following C++ sources (please update commit hash when backporting):
-//! <https://github.com/llvm-mirror/llvm/tree/23efab2bbd424ed13495a420ad8641cb2c6c28f9>
-//!
-//! * `include/llvm/ADT/APFloat.h` -> `Float` and `FloatConvert` traits
-//! * `lib/Support/APFloat.cpp` -> `ieee` and `ppc` modules
-//! * `unittests/ADT/APFloatTest.cpp` -> `tests` directory
-//!
-//! The port contains no unsafe code, global state, or side-effects in general,
-//! and the only allocations are in the conversion to/from decimal strings.
-//!
-//! Most of the API and the testcases are intact in some form or another,
-//! with some ergonomic changes, such as idiomatic short names, returning
-//! new values instead of mutating the receiver, and having separate method
-//! variants that take a non-default rounding mode (with the suffix `_r`).
-//! Comments have been preserved where possible, only slightly adapted.
-//!
-//! Instead of keeping a pointer to a configuration struct and inspecting it
-//! dynamically on every operation, types (e.g., `ieee::Double`), traits
-//! (e.g., `ieee::Semantics`) and associated constants are employed for
-//! increased type safety and performance.
-//!
-//! On-heap bigints are replaced everywhere (except in decimal conversion),
-//! with short arrays of `type Limb = u128` elements (instead of `u64`),
-//! This allows fitting the largest supported significands in one integer
-//! (`ieee::Quad` and `ppc::Fallback` use slightly less than 128 bits).
-//! All of the functions in the `ieee::sig` module operate on slices.
-//!
-//! # Note
-//!
-//! This API is completely unstable and subject to change.
-
-#![doc(html_root_url = "https://doc.rust-lang.org/nightly/nightly-rustc/")]
-#![no_std]
-#![forbid(unsafe_code)]
-#![deny(rustc::untranslatable_diagnostic)]
-#![deny(rustc::diagnostic_outside_of_impl)]
-
-#[macro_use]
-extern crate alloc;
-
-use core::cmp::Ordering;
-use core::fmt;
-use core::ops::{Add, Div, Mul, Neg, Rem, Sub};
-use core::ops::{AddAssign, DivAssign, MulAssign, RemAssign, SubAssign};
-use core::str::FromStr;
-
-bitflags::bitflags! {
- /// IEEE-754R 7: Default exception handling.
- ///
- /// UNDERFLOW or OVERFLOW are always returned or-ed with INEXACT.
- #[must_use]
- pub struct Status: u8 {
- const OK = 0x00;
- const INVALID_OP = 0x01;
- const DIV_BY_ZERO = 0x02;
- const OVERFLOW = 0x04;
- const UNDERFLOW = 0x08;
- const INEXACT = 0x10;
- }
-}
-
-#[must_use]
-#[derive(Copy, Clone, PartialEq, Eq, PartialOrd, Ord, Debug)]
-pub struct StatusAnd<T> {
- pub status: Status,
- pub value: T,
-}
-
-impl Status {
- pub fn and<T>(self, value: T) -> StatusAnd<T> {
- StatusAnd { status: self, value }
- }
-}
-
-impl<T> StatusAnd<T> {
- pub fn map<F: FnOnce(T) -> U, U>(self, f: F) -> StatusAnd<U> {
- StatusAnd { status: self.status, value: f(self.value) }
- }
-}
-
-#[macro_export]
-macro_rules! unpack {
- ($status:ident|=, $e:expr) => {
- match $e {
- $crate::StatusAnd { status, value } => {
- $status |= status;
- value
- }
- }
- };
- ($status:ident=, $e:expr) => {
- match $e {
- $crate::StatusAnd { status, value } => {
- $status = status;
- value
- }
- }
- };
-}
-
-/// Category of internally-represented number.
-#[derive(Copy, Clone, PartialEq, Eq, Debug)]
-pub enum Category {
- Infinity,
- NaN,
- Normal,
- Zero,
-}
-
-/// IEEE-754R 4.3: Rounding-direction attributes.
-#[derive(Copy, Clone, PartialEq, Eq, Debug)]
-pub enum Round {
- NearestTiesToEven,
- TowardPositive,
- TowardNegative,
- TowardZero,
- NearestTiesToAway,
-}
-
-impl Neg for Round {
- type Output = Round;
- fn neg(self) -> Round {
- match self {
- Round::TowardPositive => Round::TowardNegative,
- Round::TowardNegative => Round::TowardPositive,
- Round::NearestTiesToEven | Round::TowardZero | Round::NearestTiesToAway => self,
- }
- }
-}
-
-/// A signed type to represent a floating point number's unbiased exponent.
-pub type ExpInt = i16;
-
-// \c ilogb error results.
-pub const IEK_INF: ExpInt = ExpInt::MAX;
-pub const IEK_NAN: ExpInt = ExpInt::MIN;
-pub const IEK_ZERO: ExpInt = ExpInt::MIN + 1;
-
-#[derive(Copy, Clone, PartialEq, Eq, Debug)]
-pub struct ParseError(pub &'static str);
-
-/// A self-contained host- and target-independent arbitrary-precision
-/// floating-point software implementation.
-///
-/// `apfloat` uses significand bignum integer arithmetic as provided by functions
-/// in the `ieee::sig`.
-///
-/// Written for clarity rather than speed, in particular with a view to use in
-/// the front-end of a cross compiler so that target arithmetic can be correctly
-/// performed on the host. Performance should nonetheless be reasonable,
-/// particularly for its intended use. It may be useful as a base
-/// implementation for a run-time library during development of a faster
-/// target-specific one.
-///
-/// All 5 rounding modes in the IEEE-754R draft are handled correctly for all
-/// implemented operations. Currently implemented operations are add, subtract,
-/// multiply, divide, fused-multiply-add, conversion-to-float,
-/// conversion-to-integer and conversion-from-integer. New rounding modes
-/// (e.g., away from zero) can be added with three or four lines of code.
-///
-/// Four formats are built-in: IEEE single precision, double precision,
-/// quadruple precision, and x87 80-bit extended double (when operating with
-/// full extended precision). Adding a new format that obeys IEEE semantics
-/// only requires adding two lines of code: a declaration and definition of the
-/// format.
-///
-/// All operations return the status of that operation as an exception bit-mask,
-/// so multiple operations can be done consecutively with their results or-ed
-/// together. The returned status can be useful for compiler diagnostics; e.g.,
-/// inexact, underflow and overflow can be easily diagnosed on constant folding,
-/// and compiler optimizers can determine what exceptions would be raised by
-/// folding operations and optimize, or perhaps not optimize, accordingly.
-///
-/// At present, underflow tininess is detected after rounding; it should be
-/// straight forward to add support for the before-rounding case too.
-///
-/// The library reads hexadecimal floating point numbers as per C99, and
-/// correctly rounds if necessary according to the specified rounding mode.
-/// Syntax is required to have been validated by the caller.
-///
-/// It also reads decimal floating point numbers and correctly rounds according
-/// to the specified rounding mode.
-///
-/// Non-zero finite numbers are represented internally as a sign bit, a 16-bit
-/// signed exponent, and the significand as an array of integer limbs. After
-/// normalization of a number of precision P the exponent is within the range of
-/// the format, and if the number is not denormal the P-th bit of the
-/// significand is set as an explicit integer bit. For denormals the most
-/// significant bit is shifted right so that the exponent is maintained at the
-/// format's minimum, so that the smallest denormal has just the least
-/// significant bit of the significand set. The sign of zeros and infinities
-/// is significant; the exponent and significand of such numbers is not stored,
-/// but has a known implicit (deterministic) value: 0 for the significands, 0
-/// for zero exponent, all 1 bits for infinity exponent. For NaNs the sign and
-/// significand are deterministic, although not really meaningful, and preserved
-/// in non-conversion operations. The exponent is implicitly all 1 bits.
-///
-/// `apfloat` does not provide any exception handling beyond default exception
-/// handling. We represent Signaling NaNs via IEEE-754R 2008 6.2.1 should clause
-/// by encoding Signaling NaNs with the first bit of its trailing significand
-/// as 0.
-///
-/// Future work
-/// ===========
-///
-/// Some features that may or may not be worth adding:
-///
-/// Optional ability to detect underflow tininess before rounding.
-///
-/// New formats: x87 in single and double precision mode (IEEE apart from
-/// extended exponent range) (hard).
-///
-/// New operations: sqrt, nexttoward.
-///
-pub trait Float:
- Copy
- + Default
- + FromStr<Err = ParseError>
- + PartialOrd
- + fmt::Display
- + Neg<Output = Self>
- + AddAssign
- + SubAssign
- + MulAssign
- + DivAssign
- + RemAssign
- + Add<Output = StatusAnd<Self>>
- + Sub<Output = StatusAnd<Self>>
- + Mul<Output = StatusAnd<Self>>
- + Div<Output = StatusAnd<Self>>
- + Rem<Output = StatusAnd<Self>>
-{
- /// Total number of bits in the in-memory format.
- const BITS: usize;
-
- /// Number of bits in the significand. This includes the integer bit.
- const PRECISION: usize;
-
- /// The largest E such that 2<sup>E</sup> is representable; this matches the
- /// definition of IEEE 754.
- const MAX_EXP: ExpInt;
-
- /// The smallest E such that 2<sup>E</sup> is a normalized number; this
- /// matches the definition of IEEE 754.
- const MIN_EXP: ExpInt;
-
- /// Positive Zero.
- const ZERO: Self;
-
- /// Positive Infinity.
- const INFINITY: Self;
-
- /// NaN (Not a Number).
- // FIXME(eddyb) provide a default when qnan becomes const fn.
- const NAN: Self;
-
- /// Factory for QNaN values.
- // FIXME(eddyb) should be const fn.
- fn qnan(payload: Option<u128>) -> Self;
-
- /// Factory for SNaN values.
- // FIXME(eddyb) should be const fn.
- fn snan(payload: Option<u128>) -> Self;
-
- /// Largest finite number.
- // FIXME(eddyb) should be const (but FloatPair::largest is nontrivial).
- fn largest() -> Self;
-
- /// Smallest (by magnitude) finite number.
- /// Might be denormalized, which implies a relative loss of precision.
- const SMALLEST: Self;
-
- /// Smallest (by magnitude) normalized finite number.
- // FIXME(eddyb) should be const (but FloatPair::smallest_normalized is nontrivial).
- fn smallest_normalized() -> Self;
-
- // Arithmetic
-
- fn add_r(self, rhs: Self, round: Round) -> StatusAnd<Self>;
- fn sub_r(self, rhs: Self, round: Round) -> StatusAnd<Self> {
- self.add_r(-rhs, round)
- }
- fn mul_r(self, rhs: Self, round: Round) -> StatusAnd<Self>;
- fn mul_add_r(self, multiplicand: Self, addend: Self, round: Round) -> StatusAnd<Self>;
- fn mul_add(self, multiplicand: Self, addend: Self) -> StatusAnd<Self> {
- self.mul_add_r(multiplicand, addend, Round::NearestTiesToEven)
- }
- fn div_r(self, rhs: Self, round: Round) -> StatusAnd<Self>;
- /// IEEE remainder.
- // This is not currently correct in all cases.
- fn ieee_rem(self, rhs: Self) -> StatusAnd<Self> {
- let mut v = self;
-
- let status;
- v = unpack!(status=, v / rhs);
- if status == Status::DIV_BY_ZERO {
- return status.and(self);
- }
-
- assert!(Self::PRECISION < 128);
-
- let status;
- let x = unpack!(status=, v.to_i128_r(128, Round::NearestTiesToEven, &mut false));
- if status == Status::INVALID_OP {
- return status.and(self);
- }
-
- let status;
- let mut v = unpack!(status=, Self::from_i128(x));
- assert_eq!(status, Status::OK); // should always work
-
- let status;
- v = unpack!(status=, v * rhs);
- assert_eq!(status - Status::INEXACT, Status::OK); // should not overflow or underflow
-
- let status;
- v = unpack!(status=, self - v);
- assert_eq!(status - Status::INEXACT, Status::OK); // likewise
-
- if v.is_zero() {
- status.and(v.copy_sign(self)) // IEEE754 requires this
- } else {
- status.and(v)
- }
- }
- /// C fmod, or llvm frem.
- fn c_fmod(self, rhs: Self) -> StatusAnd<Self>;
- fn round_to_integral(self, round: Round) -> StatusAnd<Self>;
-
- /// IEEE-754R 2008 5.3.1: nextUp.
- fn next_up(self) -> StatusAnd<Self>;
-
- /// IEEE-754R 2008 5.3.1: nextDown.
- ///
- /// *NOTE* since nextDown(x) = -nextUp(-x), we only implement nextUp with
- /// appropriate sign switching before/after the computation.
- fn next_down(self) -> StatusAnd<Self> {
- (-self).next_up().map(|r| -r)
- }
-
- fn abs(self) -> Self {
- if self.is_negative() { -self } else { self }
- }
- fn copy_sign(self, rhs: Self) -> Self {
- if self.is_negative() != rhs.is_negative() { -self } else { self }
- }
-
- // Conversions
- fn from_bits(input: u128) -> Self;
- fn from_i128_r(input: i128, round: Round) -> StatusAnd<Self> {
- if input < 0 {
- Self::from_u128_r(input.wrapping_neg() as u128, -round).map(|r| -r)
- } else {
- Self::from_u128_r(input as u128, round)
- }
- }
- fn from_i128(input: i128) -> StatusAnd<Self> {
- Self::from_i128_r(input, Round::NearestTiesToEven)
- }
- fn from_u128_r(input: u128, round: Round) -> StatusAnd<Self>;
- fn from_u128(input: u128) -> StatusAnd<Self> {
- Self::from_u128_r(input, Round::NearestTiesToEven)
- }
- fn from_str_r(s: &str, round: Round) -> Result<StatusAnd<Self>, ParseError>;
- fn to_bits(self) -> u128;
-
- /// Converts a floating point number to an integer according to the
- /// rounding mode. In case of an invalid operation exception,
- /// deterministic values are returned, namely zero for NaNs and the
- /// minimal or maximal value respectively for underflow or overflow.
- /// If the rounded value is in range but the floating point number is
- /// not the exact integer, the C standard doesn't require an inexact
- /// exception to be raised. IEEE-854 does require it so we do that.
- ///
- /// Note that for conversions to integer type the C standard requires
- /// round-to-zero to always be used.
- ///
- /// The *is_exact output tells whether the result is exact, in the sense
- /// that converting it back to the original floating point type produces
- /// the original value. This is almost equivalent to `result == Status::OK`,
- /// except for negative zeroes.
- fn to_i128_r(self, width: usize, round: Round, is_exact: &mut bool) -> StatusAnd<i128> {
- let status;
- if self.is_negative() {
- if self.is_zero() {
- // Negative zero can't be represented as an int.
- *is_exact = false;
- }
- let r = unpack!(status=, (-self).to_u128_r(width, -round, is_exact));
-
- // Check for values that don't fit in the signed integer.
- if r > (1 << (width - 1)) {
- // Return the most negative integer for the given width.
- *is_exact = false;
- Status::INVALID_OP.and(-1 << (width - 1))
- } else {
- status.and(r.wrapping_neg() as i128)
- }
- } else {
- // Positive case is simpler, can pretend it's a smaller unsigned
- // integer, and `to_u128` will take care of all the edge cases.
- self.to_u128_r(width - 1, round, is_exact).map(|r| r as i128)
- }
- }
- fn to_i128(self, width: usize) -> StatusAnd<i128> {
- self.to_i128_r(width, Round::TowardZero, &mut true)
- }
- fn to_u128_r(self, width: usize, round: Round, is_exact: &mut bool) -> StatusAnd<u128>;
- fn to_u128(self, width: usize) -> StatusAnd<u128> {
- self.to_u128_r(width, Round::TowardZero, &mut true)
- }
-
- fn cmp_abs_normal(self, rhs: Self) -> Ordering;
-
- /// Bitwise comparison for equality (QNaNs compare equal, 0!=-0).
- fn bitwise_eq(self, rhs: Self) -> bool;
-
- // IEEE-754R 5.7.2 General operations.
-
- /// Implements IEEE minNum semantics. Returns the smaller of the 2 arguments if
- /// both are not NaN. If either argument is a NaN, returns the other argument.
- fn min(self, other: Self) -> Self {
- if self.is_nan() {
- other
- } else if other.is_nan() {
- self
- } else if other.partial_cmp(&self) == Some(Ordering::Less) {
- other
- } else {
- self
- }
- }
-
- /// Implements IEEE maxNum semantics. Returns the larger of the 2 arguments if
- /// both are not NaN. If either argument is a NaN, returns the other argument.
- fn max(self, other: Self) -> Self {
- if self.is_nan() {
- other
- } else if other.is_nan() {
- self
- } else if self.partial_cmp(&other) == Some(Ordering::Less) {
- other
- } else {
- self
- }
- }
-
- /// IEEE-754R isSignMinus: Returns whether the current value is
- /// negative.
- ///
- /// This applies to zeros and NaNs as well.
- fn is_negative(self) -> bool;
-
- /// IEEE-754R isNormal: Returns whether the current value is normal.
- ///
- /// This implies that the current value of the float is not zero, subnormal,
- /// infinite, or NaN following the definition of normality from IEEE-754R.
- fn is_normal(self) -> bool {
- !self.is_denormal() && self.is_finite_non_zero()
- }
-
- /// Returns `true` if the current value is zero, subnormal, or
- /// normal.
- ///
- /// This means that the value is not infinite or NaN.
- fn is_finite(self) -> bool {
- !self.is_nan() && !self.is_infinite()
- }
-
- /// Returns `true` if the float is plus or minus zero.
- fn is_zero(self) -> bool {
- self.category() == Category::Zero
- }
-
- /// IEEE-754R isSubnormal(): Returns whether the float is a
- /// denormal.
- fn is_denormal(self) -> bool;
-
- /// IEEE-754R isInfinite(): Returns whether the float is infinity.
- fn is_infinite(self) -> bool {
- self.category() == Category::Infinity
- }
-
- /// Returns `true` if the float is a quiet or signaling NaN.
- fn is_nan(self) -> bool {
- self.category() == Category::NaN
- }
-
- /// Returns `true` if the float is a signaling NaN.
- fn is_signaling(self) -> bool;
-
- // Simple Queries
-
- fn category(self) -> Category;
- fn is_non_zero(self) -> bool {
- !self.is_zero()
- }
- fn is_finite_non_zero(self) -> bool {
- self.is_finite() && !self.is_zero()
- }
- fn is_pos_zero(self) -> bool {
- self.is_zero() && !self.is_negative()
- }
- fn is_neg_zero(self) -> bool {
- self.is_zero() && self.is_negative()
- }
-
- /// Returns `true` if the number has the smallest possible non-zero
- /// magnitude in the current semantics.
- fn is_smallest(self) -> bool {
- Self::SMALLEST.copy_sign(self).bitwise_eq(self)
- }
-
- /// Returns `true` if the number has the largest possible finite
- /// magnitude in the current semantics.
- fn is_largest(self) -> bool {
- Self::largest().copy_sign(self).bitwise_eq(self)
- }
-
- /// Returns `true` if the number is an exact integer.
- fn is_integer(self) -> bool {
- // This could be made more efficient; I'm going for obviously correct.
- if !self.is_finite() {
- return false;
- }
- self.round_to_integral(Round::TowardZero).value.bitwise_eq(self)
- }
-
- /// If this value has an exact multiplicative inverse, return it.
- fn get_exact_inverse(self) -> Option<Self>;
-
- /// Returns the exponent of the internal representation of the Float.
- ///
- /// Because the radix of Float is 2, this is equivalent to floor(log2(x)).
- /// For special Float values, this returns special error codes:
- ///
- /// NaN -> \c IEK_NAN
- /// 0 -> \c IEK_ZERO
- /// Inf -> \c IEK_INF
- ///
- fn ilogb(self) -> ExpInt;
-
- /// Returns: self * 2<sup>exp</sup> for integral exponents.
- /// Equivalent to C standard library function `ldexp`.
- fn scalbn_r(self, exp: ExpInt, round: Round) -> Self;
- fn scalbn(self, exp: ExpInt) -> Self {
- self.scalbn_r(exp, Round::NearestTiesToEven)
- }
-
- /// Equivalent to C standard library function with the same name.
- ///
- /// While the C standard says exp is an unspecified value for infinity and nan,
- /// this returns INT_MAX for infinities, and INT_MIN for NaNs (see `ilogb`).
- fn frexp_r(self, exp: &mut ExpInt, round: Round) -> Self;
- fn frexp(self, exp: &mut ExpInt) -> Self {
- self.frexp_r(exp, Round::NearestTiesToEven)
- }
-}
-
-pub trait FloatConvert<T: Float>: Float {
- /// Converts a value of one floating point type to another.
- /// The return value corresponds to the IEEE754 exceptions. *loses_info
- /// records whether the transformation lost information, i.e., whether
- /// converting the result back to the original type will produce the
- /// original value (this is almost the same as return `value == Status::OK`,
- /// but there are edge cases where this is not so).
- fn convert_r(self, round: Round, loses_info: &mut bool) -> StatusAnd<T>;
- fn convert(self, loses_info: &mut bool) -> StatusAnd<T> {
- self.convert_r(Round::NearestTiesToEven, loses_info)
- }
-}
-
-macro_rules! float_common_impls {
- ($ty:ident<$t:tt>) => {
- impl<$t> Default for $ty<$t>
- where
- Self: Float,
- {
- fn default() -> Self {
- Self::ZERO
- }
- }
-
- impl<$t> ::core::str::FromStr for $ty<$t>
- where
- Self: Float,
- {
- type Err = ParseError;
- fn from_str(s: &str) -> Result<Self, ParseError> {
- Self::from_str_r(s, Round::NearestTiesToEven).map(|x| x.value)
- }
- }
-
- // Rounding ties to the nearest even, by default.
-
- impl<$t> ::core::ops::Add for $ty<$t>
- where
- Self: Float,
- {
- type Output = StatusAnd<Self>;
- fn add(self, rhs: Self) -> StatusAnd<Self> {
- self.add_r(rhs, Round::NearestTiesToEven)
- }
- }
-
- impl<$t> ::core::ops::Sub for $ty<$t>
- where
- Self: Float,
- {
- type Output = StatusAnd<Self>;
- fn sub(self, rhs: Self) -> StatusAnd<Self> {
- self.sub_r(rhs, Round::NearestTiesToEven)
- }
- }
-
- impl<$t> ::core::ops::Mul for $ty<$t>
- where
- Self: Float,
- {
- type Output = StatusAnd<Self>;
- fn mul(self, rhs: Self) -> StatusAnd<Self> {
- self.mul_r(rhs, Round::NearestTiesToEven)
- }
- }
-
- impl<$t> ::core::ops::Div for $ty<$t>
- where
- Self: Float,
- {
- type Output = StatusAnd<Self>;
- fn div(self, rhs: Self) -> StatusAnd<Self> {
- self.div_r(rhs, Round::NearestTiesToEven)
- }
- }
-
- impl<$t> ::core::ops::Rem for $ty<$t>
- where
- Self: Float,
- {
- type Output = StatusAnd<Self>;
- fn rem(self, rhs: Self) -> StatusAnd<Self> {
- self.c_fmod(rhs)
- }
- }
-
- impl<$t> ::core::ops::AddAssign for $ty<$t>
- where
- Self: Float,
- {
- fn add_assign(&mut self, rhs: Self) {
- *self = (*self + rhs).value;
- }
- }
-
- impl<$t> ::core::ops::SubAssign for $ty<$t>
- where
- Self: Float,
- {
- fn sub_assign(&mut self, rhs: Self) {
- *self = (*self - rhs).value;
- }
- }
-
- impl<$t> ::core::ops::MulAssign for $ty<$t>
- where
- Self: Float,
- {
- fn mul_assign(&mut self, rhs: Self) {
- *self = (*self * rhs).value;
- }
- }
-
- impl<$t> ::core::ops::DivAssign for $ty<$t>
- where
- Self: Float,
- {
- fn div_assign(&mut self, rhs: Self) {
- *self = (*self / rhs).value;
- }
- }
-
- impl<$t> ::core::ops::RemAssign for $ty<$t>
- where
- Self: Float,
- {
- fn rem_assign(&mut self, rhs: Self) {
- *self = (*self % rhs).value;
- }
- }
- };
-}
-
-pub mod ieee;
-pub mod ppc;
diff --git a/compiler/rustc_apfloat/src/ppc.rs b/compiler/rustc_apfloat/src/ppc.rs
deleted file mode 100644
index 65a0f6664..000000000
--- a/compiler/rustc_apfloat/src/ppc.rs
+++ /dev/null
@@ -1,434 +0,0 @@
-use crate::ieee;
-use crate::{Category, ExpInt, Float, FloatConvert, ParseError, Round, Status, StatusAnd};
-
-use core::cmp::Ordering;
-use core::fmt;
-use core::ops::Neg;
-
-#[must_use]
-#[derive(Copy, Clone, PartialEq, PartialOrd, Debug)]
-pub struct DoubleFloat<F>(F, F);
-pub type DoubleDouble = DoubleFloat<ieee::Double>;
-
-// These are legacy semantics for the Fallback, inaccurate implementation of
-// IBM double-double, if the accurate DoubleDouble doesn't handle the
-// operation. It's equivalent to having an IEEE number with consecutive 106
-// bits of mantissa and 11 bits of exponent.
-//
-// It's not equivalent to IBM double-double. For example, a legit IBM
-// double-double, 1 + epsilon:
-//
-// 1 + epsilon = 1 + (1 >> 1076)
-//
-// is not representable by a consecutive 106 bits of mantissa.
-//
-// Currently, these semantics are used in the following way:
-//
-// DoubleDouble -> (Double, Double) ->
-// DoubleDouble's Fallback -> IEEE operations
-//
-// FIXME: Implement all operations in DoubleDouble, and delete these
-// semantics.
-// FIXME(eddyb) This shouldn't need to be `pub`, it's only used in bounds.
-pub struct FallbackS<F>(#[allow(unused)] F);
-type Fallback<F> = ieee::IeeeFloat<FallbackS<F>>;
-impl<F: Float> ieee::Semantics for FallbackS<F> {
- // Forbid any conversion to/from bits.
- const BITS: usize = 0;
- const PRECISION: usize = F::PRECISION * 2;
- const MAX_EXP: ExpInt = F::MAX_EXP as ExpInt;
- const MIN_EXP: ExpInt = F::MIN_EXP as ExpInt + F::PRECISION as ExpInt;
-}
-
-// Convert number to F. To avoid spurious underflows, we re-
-// normalize against the F exponent range first, and only *then*
-// truncate the mantissa. The result of that second conversion
-// may be inexact, but should never underflow.
-// FIXME(eddyb) This shouldn't need to be `pub`, it's only used in bounds.
-pub struct FallbackExtendedS<F>(#[allow(unused)] F);
-type FallbackExtended<F> = ieee::IeeeFloat<FallbackExtendedS<F>>;
-impl<F: Float> ieee::Semantics for FallbackExtendedS<F> {
- // Forbid any conversion to/from bits.
- const BITS: usize = 0;
- const PRECISION: usize = Fallback::<F>::PRECISION;
- const MAX_EXP: ExpInt = F::MAX_EXP as ExpInt;
-}
-
-impl<F: Float> From<Fallback<F>> for DoubleFloat<F>
-where
- F: FloatConvert<FallbackExtended<F>>,
- FallbackExtended<F>: FloatConvert<F>,
-{
- fn from(x: Fallback<F>) -> Self {
- let mut status;
- let mut loses_info = false;
-
- let extended: FallbackExtended<F> = unpack!(status=, x.convert(&mut loses_info));
- assert_eq!((status, loses_info), (Status::OK, false));
-
- let a = unpack!(status=, extended.convert(&mut loses_info));
- assert_eq!(status - Status::INEXACT, Status::OK);
-
- // If conversion was exact or resulted in a special case, we're done;
- // just set the second double to zero. Otherwise, re-convert back to
- // the extended format and compute the difference. This now should
- // convert exactly to double.
- let b = if a.is_finite_non_zero() && loses_info {
- let u: FallbackExtended<F> = unpack!(status=, a.convert(&mut loses_info));
- assert_eq!((status, loses_info), (Status::OK, false));
- let v = unpack!(status=, extended - u);
- assert_eq!(status, Status::OK);
- let v = unpack!(status=, v.convert(&mut loses_info));
- assert_eq!((status, loses_info), (Status::OK, false));
- v
- } else {
- F::ZERO
- };
-
- DoubleFloat(a, b)
- }
-}
-
-impl<F: FloatConvert<Self>> From<DoubleFloat<F>> for Fallback<F> {
- fn from(DoubleFloat(a, b): DoubleFloat<F>) -> Self {
- let mut status;
- let mut loses_info = false;
-
- // Get the first F and convert to our format.
- let a = unpack!(status=, a.convert(&mut loses_info));
- assert_eq!((status, loses_info), (Status::OK, false));
-
- // Unless we have a special case, add in second F.
- if a.is_finite_non_zero() {
- let b = unpack!(status=, b.convert(&mut loses_info));
- assert_eq!((status, loses_info), (Status::OK, false));
-
- (a + b).value
- } else {
- a
- }
- }
-}
-
-float_common_impls!(DoubleFloat<F>);
-
-impl<F: Float> Neg for DoubleFloat<F> {
- type Output = Self;
- fn neg(self) -> Self {
- if self.1.is_finite_non_zero() {
- DoubleFloat(-self.0, -self.1)
- } else {
- DoubleFloat(-self.0, self.1)
- }
- }
-}
-
-impl<F: FloatConvert<Fallback<F>>> fmt::Display for DoubleFloat<F> {
- fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
- fmt::Display::fmt(&Fallback::from(*self), f)
- }
-}
-
-impl<F: FloatConvert<Fallback<F>>> Float for DoubleFloat<F>
-where
- Self: From<Fallback<F>>,
-{
- const BITS: usize = F::BITS * 2;
- const PRECISION: usize = Fallback::<F>::PRECISION;
- const MAX_EXP: ExpInt = Fallback::<F>::MAX_EXP;
- const MIN_EXP: ExpInt = Fallback::<F>::MIN_EXP;
-
- const ZERO: Self = DoubleFloat(F::ZERO, F::ZERO);
-
- const INFINITY: Self = DoubleFloat(F::INFINITY, F::ZERO);
-
- // FIXME(eddyb) remove when qnan becomes const fn.
- const NAN: Self = DoubleFloat(F::NAN, F::ZERO);
-
- fn qnan(payload: Option<u128>) -> Self {
- DoubleFloat(F::qnan(payload), F::ZERO)
- }
-
- fn snan(payload: Option<u128>) -> Self {
- DoubleFloat(F::snan(payload), F::ZERO)
- }
-
- fn largest() -> Self {
- let status;
- let mut r = DoubleFloat(F::largest(), F::largest());
- r.1 = r.1.scalbn(-(F::PRECISION as ExpInt + 1));
- r.1 = unpack!(status=, r.1.next_down());
- assert_eq!(status, Status::OK);
- r
- }
-
- const SMALLEST: Self = DoubleFloat(F::SMALLEST, F::ZERO);
-
- fn smallest_normalized() -> Self {
- DoubleFloat(F::smallest_normalized().scalbn(F::PRECISION as ExpInt), F::ZERO)
- }
-
- // Implement addition, subtraction, multiplication and division based on:
- // "Software for Doubled-Precision Floating-Point Computations",
- // by Seppo Linnainmaa, ACM TOMS vol 7 no 3, September 1981, pages 272-283.
-
- fn add_r(mut self, rhs: Self, round: Round) -> StatusAnd<Self> {
- match (self.category(), rhs.category()) {
- (Category::Infinity, Category::Infinity) => {
- if self.is_negative() != rhs.is_negative() {
- Status::INVALID_OP.and(Self::NAN.copy_sign(self))
- } else {
- Status::OK.and(self)
- }
- }
-
- (_, Category::Zero) | (Category::NaN, _) | (Category::Infinity, Category::Normal) => {
- Status::OK.and(self)
- }
-
- (Category::Zero, _) | (_, Category::NaN | Category::Infinity) => Status::OK.and(rhs),
-
- (Category::Normal, Category::Normal) => {
- let mut status = Status::OK;
- let (a, aa, c, cc) = (self.0, self.1, rhs.0, rhs.1);
- let mut z = a;
- z = unpack!(status|=, z.add_r(c, round));
- if !z.is_finite() {
- if !z.is_infinite() {
- return status.and(DoubleFloat(z, F::ZERO));
- }
- status = Status::OK;
- let a_cmp_c = a.cmp_abs_normal(c);
- z = cc;
- z = unpack!(status|=, z.add_r(aa, round));
- if a_cmp_c == Ordering::Greater {
- // z = cc + aa + c + a;
- z = unpack!(status|=, z.add_r(c, round));
- z = unpack!(status|=, z.add_r(a, round));
- } else {
- // z = cc + aa + a + c;
- z = unpack!(status|=, z.add_r(a, round));
- z = unpack!(status|=, z.add_r(c, round));
- }
- if !z.is_finite() {
- return status.and(DoubleFloat(z, F::ZERO));
- }
- self.0 = z;
- let mut zz = aa;
- zz = unpack!(status|=, zz.add_r(cc, round));
- if a_cmp_c == Ordering::Greater {
- // self.1 = a - z + c + zz;
- self.1 = a;
- self.1 = unpack!(status|=, self.1.sub_r(z, round));
- self.1 = unpack!(status|=, self.1.add_r(c, round));
- self.1 = unpack!(status|=, self.1.add_r(zz, round));
- } else {
- // self.1 = c - z + a + zz;
- self.1 = c;
- self.1 = unpack!(status|=, self.1.sub_r(z, round));
- self.1 = unpack!(status|=, self.1.add_r(a, round));
- self.1 = unpack!(status|=, self.1.add_r(zz, round));
- }
- } else {
- // q = a - z;
- let mut q = a;
- q = unpack!(status|=, q.sub_r(z, round));
-
- // zz = q + c + (a - (q + z)) + aa + cc;
- // Compute a - (q + z) as -((q + z) - a) to avoid temporary copies.
- let mut zz = q;
- zz = unpack!(status|=, zz.add_r(c, round));
- q = unpack!(status|=, q.add_r(z, round));
- q = unpack!(status|=, q.sub_r(a, round));
- q = -q;
- zz = unpack!(status|=, zz.add_r(q, round));
- zz = unpack!(status|=, zz.add_r(aa, round));
- zz = unpack!(status|=, zz.add_r(cc, round));
- if zz.is_zero() && !zz.is_negative() {
- return Status::OK.and(DoubleFloat(z, F::ZERO));
- }
- self.0 = z;
- self.0 = unpack!(status|=, self.0.add_r(zz, round));
- if !self.0.is_finite() {
- self.1 = F::ZERO;
- return status.and(self);
- }
- self.1 = z;
- self.1 = unpack!(status|=, self.1.sub_r(self.0, round));
- self.1 = unpack!(status|=, self.1.add_r(zz, round));
- }
- status.and(self)
- }
- }
- }
-
- fn mul_r(mut self, rhs: Self, round: Round) -> StatusAnd<Self> {
- // Interesting observation: For special categories, finding the lowest
- // common ancestor of the following layered graph gives the correct
- // return category:
- //
- // NaN
- // / \
- // Zero Inf
- // \ /
- // Normal
- //
- // e.g., NaN * NaN = NaN
- // Zero * Inf = NaN
- // Normal * Zero = Zero
- // Normal * Inf = Inf
- match (self.category(), rhs.category()) {
- (Category::NaN, _) => Status::OK.and(self),
-
- (_, Category::NaN) => Status::OK.and(rhs),
-
- (Category::Zero, Category::Infinity) | (Category::Infinity, Category::Zero) => {
- Status::OK.and(Self::NAN)
- }
-
- (Category::Zero | Category::Infinity, _) => Status::OK.and(self),
-
- (_, Category::Zero | Category::Infinity) => Status::OK.and(rhs),
-
- (Category::Normal, Category::Normal) => {
- let mut status = Status::OK;
- let (a, b, c, d) = (self.0, self.1, rhs.0, rhs.1);
- // t = a * c
- let mut t = a;
- t = unpack!(status|=, t.mul_r(c, round));
- if !t.is_finite_non_zero() {
- return status.and(DoubleFloat(t, F::ZERO));
- }
-
- // tau = fmsub(a, c, t), that is -fmadd(-a, c, t).
- let mut tau = a;
- tau = unpack!(status|=, tau.mul_add_r(c, -t, round));
- // v = a * d
- let mut v = a;
- v = unpack!(status|=, v.mul_r(d, round));
- // w = b * c
- let mut w = b;
- w = unpack!(status|=, w.mul_r(c, round));
- v = unpack!(status|=, v.add_r(w, round));
- // tau += v + w
- tau = unpack!(status|=, tau.add_r(v, round));
- // u = t + tau
- let mut u = t;
- u = unpack!(status|=, u.add_r(tau, round));
-
- self.0 = u;
- if !u.is_finite() {
- self.1 = F::ZERO;
- } else {
- // self.1 = (t - u) + tau
- t = unpack!(status|=, t.sub_r(u, round));
- t = unpack!(status|=, t.add_r(tau, round));
- self.1 = t;
- }
- status.and(self)
- }
- }
- }
-
- fn mul_add_r(self, multiplicand: Self, addend: Self, round: Round) -> StatusAnd<Self> {
- Fallback::from(self)
- .mul_add_r(Fallback::from(multiplicand), Fallback::from(addend), round)
- .map(Self::from)
- }
-
- fn div_r(self, rhs: Self, round: Round) -> StatusAnd<Self> {
- Fallback::from(self).div_r(Fallback::from(rhs), round).map(Self::from)
- }
-
- fn c_fmod(self, rhs: Self) -> StatusAnd<Self> {
- Fallback::from(self).c_fmod(Fallback::from(rhs)).map(Self::from)
- }
-
- fn round_to_integral(self, round: Round) -> StatusAnd<Self> {
- Fallback::from(self).round_to_integral(round).map(Self::from)
- }
-
- fn next_up(self) -> StatusAnd<Self> {
- Fallback::from(self).next_up().map(Self::from)
- }
-
- fn from_bits(input: u128) -> Self {
- let (a, b) = (input, input >> F::BITS);
- DoubleFloat(F::from_bits(a & ((1 << F::BITS) - 1)), F::from_bits(b & ((1 << F::BITS) - 1)))
- }
-
- fn from_u128_r(input: u128, round: Round) -> StatusAnd<Self> {
- Fallback::from_u128_r(input, round).map(Self::from)
- }
-
- fn from_str_r(s: &str, round: Round) -> Result<StatusAnd<Self>, ParseError> {
- Fallback::from_str_r(s, round).map(|r| r.map(Self::from))
- }
-
- fn to_bits(self) -> u128 {
- self.0.to_bits() | (self.1.to_bits() << F::BITS)
- }
-
- fn to_u128_r(self, width: usize, round: Round, is_exact: &mut bool) -> StatusAnd<u128> {
- Fallback::from(self).to_u128_r(width, round, is_exact)
- }
-
- fn cmp_abs_normal(self, rhs: Self) -> Ordering {
- self.0.cmp_abs_normal(rhs.0).then_with(|| {
- let result = self.1.cmp_abs_normal(rhs.1);
- if result != Ordering::Equal {
- let against = self.0.is_negative() ^ self.1.is_negative();
- let rhs_against = rhs.0.is_negative() ^ rhs.1.is_negative();
- (!against)
- .cmp(&!rhs_against)
- .then_with(|| if against { result.reverse() } else { result })
- } else {
- result
- }
- })
- }
-
- fn bitwise_eq(self, rhs: Self) -> bool {
- self.0.bitwise_eq(rhs.0) && self.1.bitwise_eq(rhs.1)
- }
-
- fn is_negative(self) -> bool {
- self.0.is_negative()
- }
-
- fn is_denormal(self) -> bool {
- self.category() == Category::Normal
- && (self.0.is_denormal() || self.0.is_denormal() ||
- // (double)(Hi + Lo) == Hi defines a normal number.
- !(self.0 + self.1).value.bitwise_eq(self.0))
- }
-
- fn is_signaling(self) -> bool {
- self.0.is_signaling()
- }
-
- fn category(self) -> Category {
- self.0.category()
- }
-
- fn get_exact_inverse(self) -> Option<Self> {
- Fallback::from(self).get_exact_inverse().map(Self::from)
- }
-
- fn ilogb(self) -> ExpInt {
- self.0.ilogb()
- }
-
- fn scalbn_r(self, exp: ExpInt, round: Round) -> Self {
- DoubleFloat(self.0.scalbn_r(exp, round), self.1.scalbn_r(exp, round))
- }
-
- fn frexp_r(self, exp: &mut ExpInt, round: Round) -> Self {
- let a = self.0.frexp_r(exp, round);
- let mut b = self.1;
- if self.category() == Category::Normal {
- b = b.scalbn_r(-*exp, round);
- }
- DoubleFloat(a, b)
- }
-}
diff --git a/compiler/rustc_apfloat/tests/ieee.rs b/compiler/rustc_apfloat/tests/ieee.rs
deleted file mode 100644
index f8fac0c23..000000000
--- a/compiler/rustc_apfloat/tests/ieee.rs
+++ /dev/null
@@ -1,3301 +0,0 @@
-// ignore-tidy-filelength
-
-use rustc_apfloat::ieee::{Double, Half, Quad, Single, X87DoubleExtended};
-use rustc_apfloat::unpack;
-use rustc_apfloat::{Category, ExpInt, IEK_INF, IEK_NAN, IEK_ZERO};
-use rustc_apfloat::{Float, FloatConvert, ParseError, Round, Status};
-
-trait SingleExt {
- fn from_f32(input: f32) -> Self;
- fn to_f32(self) -> f32;
-}
-
-impl SingleExt for Single {
- fn from_f32(input: f32) -> Self {
- Self::from_bits(input.to_bits() as u128)
- }
-
- fn to_f32(self) -> f32 {
- f32::from_bits(self.to_bits() as u32)
- }
-}
-
-trait DoubleExt {
- fn from_f64(input: f64) -> Self;
- fn to_f64(self) -> f64;
-}
-
-impl DoubleExt for Double {
- fn from_f64(input: f64) -> Self {
- Self::from_bits(input.to_bits() as u128)
- }
-
- fn to_f64(self) -> f64 {
- f64::from_bits(self.to_bits() as u64)
- }
-}
-
-#[test]
-fn is_signaling() {
- // We test qNaN, -qNaN, +sNaN, -sNaN with and without payloads.
- let payload = 4;
- assert!(!Single::qnan(None).is_signaling());
- assert!(!(-Single::qnan(None)).is_signaling());
- assert!(!Single::qnan(Some(payload)).is_signaling());
- assert!(!(-Single::qnan(Some(payload))).is_signaling());
- assert!(Single::snan(None).is_signaling());
- assert!((-Single::snan(None)).is_signaling());
- assert!(Single::snan(Some(payload)).is_signaling());
- assert!((-Single::snan(Some(payload))).is_signaling());
-}
-
-#[test]
-fn next() {
- // 1. Test Special Cases Values.
- //
- // Test all special values for nextUp and nextDown perscribed by IEEE-754R
- // 2008. These are:
- // 1. +inf
- // 2. -inf
- // 3. largest
- // 4. -largest
- // 5. smallest
- // 6. -smallest
- // 7. qNaN
- // 8. sNaN
- // 9. +0
- // 10. -0
-
- let mut status;
-
- // nextUp(+inf) = +inf.
- let test = unpack!(status=, Quad::INFINITY.next_up());
- let expected = Quad::INFINITY;
- assert_eq!(status, Status::OK);
- assert!(test.is_infinite());
- assert!(!test.is_negative());
- assert!(test.bitwise_eq(expected));
-
- // nextDown(+inf) = -nextUp(-inf) = -(-largest) = largest
- let test = unpack!(status=, Quad::INFINITY.next_down());
- let expected = Quad::largest();
- assert_eq!(status, Status::OK);
- assert!(!test.is_negative());
- assert!(test.bitwise_eq(expected));
-
- // nextUp(-inf) = -largest
- let test = unpack!(status=, (-Quad::INFINITY).next_up());
- let expected = -Quad::largest();
- assert_eq!(status, Status::OK);
- assert!(test.is_negative());
- assert!(test.bitwise_eq(expected));
-
- // nextDown(-inf) = -nextUp(+inf) = -(+inf) = -inf.
- let test = unpack!(status=, (-Quad::INFINITY).next_down());
- let expected = -Quad::INFINITY;
- assert_eq!(status, Status::OK);
- assert!(test.is_infinite() && test.is_negative());
- assert!(test.bitwise_eq(expected));
-
- // nextUp(largest) = +inf
- let test = unpack!(status=, Quad::largest().next_up());
- let expected = Quad::INFINITY;
- assert_eq!(status, Status::OK);
- assert!(test.is_infinite() && !test.is_negative());
- assert!(test.bitwise_eq(expected));
-
- // nextDown(largest) = -nextUp(-largest)
- // = -(-largest + inc)
- // = largest - inc.
- let test = unpack!(status=, Quad::largest().next_down());
- let expected = "0x1.fffffffffffffffffffffffffffep+16383".parse::<Quad>().unwrap();
- assert_eq!(status, Status::OK);
- assert!(!test.is_infinite() && !test.is_negative());
- assert!(test.bitwise_eq(expected));
-
- // nextUp(-largest) = -largest + inc.
- let test = unpack!(status=, (-Quad::largest()).next_up());
- let expected = "-0x1.fffffffffffffffffffffffffffep+16383".parse::<Quad>().unwrap();
- assert_eq!(status, Status::OK);
- assert!(test.bitwise_eq(expected));
-
- // nextDown(-largest) = -nextUp(largest) = -(inf) = -inf.
- let test = unpack!(status=, (-Quad::largest()).next_down());
- let expected = -Quad::INFINITY;
- assert_eq!(status, Status::OK);
- assert!(test.is_infinite() && test.is_negative());
- assert!(test.bitwise_eq(expected));
-
- // nextUp(smallest) = smallest + inc.
- let test = unpack!(status=, "0x0.0000000000000000000000000001p-16382"
- .parse::<Quad>()
- .unwrap()
- .next_up());
- let expected = "0x0.0000000000000000000000000002p-16382".parse::<Quad>().unwrap();
- assert_eq!(status, Status::OK);
- assert!(test.bitwise_eq(expected));
-
- // nextDown(smallest) = -nextUp(-smallest) = -(-0) = +0.
- let test = unpack!(status=, "0x0.0000000000000000000000000001p-16382"
- .parse::<Quad>()
- .unwrap()
- .next_down());
- let expected = Quad::ZERO;
- assert_eq!(status, Status::OK);
- assert!(test.is_pos_zero());
- assert!(test.bitwise_eq(expected));
-
- // nextUp(-smallest) = -0.
- let test = unpack!(status=, "-0x0.0000000000000000000000000001p-16382"
- .parse::<Quad>()
- .unwrap()
- .next_up());
- let expected = -Quad::ZERO;
- assert_eq!(status, Status::OK);
- assert!(test.is_neg_zero());
- assert!(test.bitwise_eq(expected));
-
- // nextDown(-smallest) = -nextUp(smallest) = -smallest - inc.
- let test = unpack!(status=, "-0x0.0000000000000000000000000001p-16382"
- .parse::<Quad>()
- .unwrap()
- .next_down());
- let expected = "-0x0.0000000000000000000000000002p-16382".parse::<Quad>().unwrap();
- assert_eq!(status, Status::OK);
- assert!(test.bitwise_eq(expected));
-
- // nextUp(qNaN) = qNaN
- let test = unpack!(status=, Quad::qnan(None).next_up());
- let expected = Quad::qnan(None);
- assert_eq!(status, Status::OK);
- assert!(test.bitwise_eq(expected));
-
- // nextDown(qNaN) = qNaN
- let test = unpack!(status=, Quad::qnan(None).next_down());
- let expected = Quad::qnan(None);
- assert_eq!(status, Status::OK);
- assert!(test.bitwise_eq(expected));
-
- // nextUp(sNaN) = qNaN
- let test = unpack!(status=, Quad::snan(None).next_up());
- let expected = Quad::qnan(None);
- assert_eq!(status, Status::INVALID_OP);
- assert!(test.bitwise_eq(expected));
-
- // nextDown(sNaN) = qNaN
- let test = unpack!(status=, Quad::snan(None).next_down());
- let expected = Quad::qnan(None);
- assert_eq!(status, Status::INVALID_OP);
- assert!(test.bitwise_eq(expected));
-
- // nextUp(+0) = +smallest
- let test = unpack!(status=, Quad::ZERO.next_up());
- let expected = Quad::SMALLEST;
- assert_eq!(status, Status::OK);
- assert!(test.bitwise_eq(expected));
-
- // nextDown(+0) = -nextUp(-0) = -smallest
- let test = unpack!(status=, Quad::ZERO.next_down());
- let expected = -Quad::SMALLEST;
- assert_eq!(status, Status::OK);
- assert!(test.bitwise_eq(expected));
-
- // nextUp(-0) = +smallest
- let test = unpack!(status=, (-Quad::ZERO).next_up());
- let expected = Quad::SMALLEST;
- assert_eq!(status, Status::OK);
- assert!(test.bitwise_eq(expected));
-
- // nextDown(-0) = -nextUp(0) = -smallest
- let test = unpack!(status=, (-Quad::ZERO).next_down());
- let expected = -Quad::SMALLEST;
- assert_eq!(status, Status::OK);
- assert!(test.bitwise_eq(expected));
-
- // 2. Binade Boundary Tests.
-
- // 2a. Test denormal <-> normal binade boundaries.
- // * nextUp(+Largest Denormal) -> +Smallest Normal.
- // * nextDown(-Largest Denormal) -> -Smallest Normal.
- // * nextUp(-Smallest Normal) -> -Largest Denormal.
- // * nextDown(+Smallest Normal) -> +Largest Denormal.
-
- // nextUp(+Largest Denormal) -> +Smallest Normal.
- let test = unpack!(status=, "0x0.ffffffffffffffffffffffffffffp-16382"
- .parse::<Quad>()
- .unwrap()
- .next_up());
- let expected = "0x1.0000000000000000000000000000p-16382".parse::<Quad>().unwrap();
- assert_eq!(status, Status::OK);
- assert!(!test.is_denormal());
- assert!(test.bitwise_eq(expected));
-
- // nextDown(-Largest Denormal) -> -Smallest Normal.
- let test = unpack!(status=, "-0x0.ffffffffffffffffffffffffffffp-16382"
- .parse::<Quad>()
- .unwrap()
- .next_down());
- let expected = "-0x1.0000000000000000000000000000p-16382".parse::<Quad>().unwrap();
- assert_eq!(status, Status::OK);
- assert!(!test.is_denormal());
- assert!(test.bitwise_eq(expected));
-
- // nextUp(-Smallest Normal) -> -Largest Denormal.
- let test = unpack!(status=, "-0x1.0000000000000000000000000000p-16382"
- .parse::<Quad>()
- .unwrap()
- .next_up());
- let expected = "-0x0.ffffffffffffffffffffffffffffp-16382".parse::<Quad>().unwrap();
- assert_eq!(status, Status::OK);
- assert!(test.is_denormal());
- assert!(test.bitwise_eq(expected));
-
- // nextDown(+Smallest Normal) -> +Largest Denormal.
- let test = unpack!(status=, "+0x1.0000000000000000000000000000p-16382"
- .parse::<Quad>()
- .unwrap()
- .next_down());
- let expected = "+0x0.ffffffffffffffffffffffffffffp-16382".parse::<Quad>().unwrap();
- assert_eq!(status, Status::OK);
- assert!(test.is_denormal());
- assert!(test.bitwise_eq(expected));
-
- // 2b. Test normal <-> normal binade boundaries.
- // * nextUp(-Normal Binade Boundary) -> -Normal Binade Boundary + 1.
- // * nextDown(+Normal Binade Boundary) -> +Normal Binade Boundary - 1.
- // * nextUp(+Normal Binade Boundary - 1) -> +Normal Binade Boundary.
- // * nextDown(-Normal Binade Boundary + 1) -> -Normal Binade Boundary.
-
- // nextUp(-Normal Binade Boundary) -> -Normal Binade Boundary + 1.
- let test = unpack!(status=, "-0x1p+1".parse::<Quad>().unwrap().next_up());
- let expected = "-0x1.ffffffffffffffffffffffffffffp+0".parse::<Quad>().unwrap();
- assert_eq!(status, Status::OK);
- assert!(test.bitwise_eq(expected));
-
- // nextDown(+Normal Binade Boundary) -> +Normal Binade Boundary - 1.
- let test = unpack!(status=, "0x1p+1".parse::<Quad>().unwrap().next_down());
- let expected = "0x1.ffffffffffffffffffffffffffffp+0".parse::<Quad>().unwrap();
- assert_eq!(status, Status::OK);
- assert!(test.bitwise_eq(expected));
-
- // nextUp(+Normal Binade Boundary - 1) -> +Normal Binade Boundary.
- let test = unpack!(status=, "0x1.ffffffffffffffffffffffffffffp+0"
- .parse::<Quad>()
- .unwrap()
- .next_up());
- let expected = "0x1p+1".parse::<Quad>().unwrap();
- assert_eq!(status, Status::OK);
- assert!(test.bitwise_eq(expected));
-
- // nextDown(-Normal Binade Boundary + 1) -> -Normal Binade Boundary.
- let test = unpack!(status=, "-0x1.ffffffffffffffffffffffffffffp+0"
- .parse::<Quad>()
- .unwrap()
- .next_down());
- let expected = "-0x1p+1".parse::<Quad>().unwrap();
- assert_eq!(status, Status::OK);
- assert!(test.bitwise_eq(expected));
-
- // 2c. Test using next at binade boundaries with a direction away from the
- // binade boundary. Away from denormal <-> normal boundaries.
- //
- // This is to make sure that even though we are at a binade boundary, since
- // we are rounding away, we do not trigger the binade boundary code. Thus we
- // test:
- // * nextUp(-Largest Denormal) -> -Largest Denormal + inc.
- // * nextDown(+Largest Denormal) -> +Largest Denormal - inc.
- // * nextUp(+Smallest Normal) -> +Smallest Normal + inc.
- // * nextDown(-Smallest Normal) -> -Smallest Normal - inc.
-
- // nextUp(-Largest Denormal) -> -Largest Denormal + inc.
- let test = unpack!(status=, "-0x0.ffffffffffffffffffffffffffffp-16382"
- .parse::<Quad>()
- .unwrap()
- .next_up());
- let expected = "-0x0.fffffffffffffffffffffffffffep-16382".parse::<Quad>().unwrap();
- assert_eq!(status, Status::OK);
- assert!(test.is_denormal());
- assert!(test.is_negative());
- assert!(test.bitwise_eq(expected));
-
- // nextDown(+Largest Denormal) -> +Largest Denormal - inc.
- let test = unpack!(status=, "0x0.ffffffffffffffffffffffffffffp-16382"
- .parse::<Quad>()
- .unwrap()
- .next_down());
- let expected = "0x0.fffffffffffffffffffffffffffep-16382".parse::<Quad>().unwrap();
- assert_eq!(status, Status::OK);
- assert!(test.is_denormal());
- assert!(!test.is_negative());
- assert!(test.bitwise_eq(expected));
-
- // nextUp(+Smallest Normal) -> +Smallest Normal + inc.
- let test = unpack!(status=, "0x1.0000000000000000000000000000p-16382"
- .parse::<Quad>()
- .unwrap()
- .next_up());
- let expected = "0x1.0000000000000000000000000001p-16382".parse::<Quad>().unwrap();
- assert_eq!(status, Status::OK);
- assert!(!test.is_denormal());
- assert!(!test.is_negative());
- assert!(test.bitwise_eq(expected));
-
- // nextDown(-Smallest Normal) -> -Smallest Normal - inc.
- let test = unpack!(status=, "-0x1.0000000000000000000000000000p-16382"
- .parse::<Quad>()
- .unwrap()
- .next_down());
- let expected = "-0x1.0000000000000000000000000001p-16382".parse::<Quad>().unwrap();
- assert_eq!(status, Status::OK);
- assert!(!test.is_denormal());
- assert!(test.is_negative());
- assert!(test.bitwise_eq(expected));
-
- // 2d. Test values which cause our exponent to go to min exponent. This
- // is to ensure that guards in the code to check for min exponent
- // trigger properly.
- // * nextUp(-0x1p-16381) -> -0x1.ffffffffffffffffffffffffffffp-16382
- // * nextDown(-0x1.ffffffffffffffffffffffffffffp-16382) ->
- // -0x1p-16381
- // * nextUp(0x1.ffffffffffffffffffffffffffffp-16382) -> 0x1p-16382
- // * nextDown(0x1p-16382) -> 0x1.ffffffffffffffffffffffffffffp-16382
-
- // nextUp(-0x1p-16381) -> -0x1.ffffffffffffffffffffffffffffp-16382
- let test = unpack!(status=, "-0x1p-16381".parse::<Quad>().unwrap().next_up());
- let expected = "-0x1.ffffffffffffffffffffffffffffp-16382".parse::<Quad>().unwrap();
- assert_eq!(status, Status::OK);
- assert!(test.bitwise_eq(expected));
-
- // nextDown(-0x1.ffffffffffffffffffffffffffffp-16382) ->
- // -0x1p-16381
- let test = unpack!(status=, "-0x1.ffffffffffffffffffffffffffffp-16382"
- .parse::<Quad>()
- .unwrap()
- .next_down());
- let expected = "-0x1p-16381".parse::<Quad>().unwrap();
- assert_eq!(status, Status::OK);
- assert!(test.bitwise_eq(expected));
-
- // nextUp(0x1.ffffffffffffffffffffffffffffp-16382) -> 0x1p-16381
- let test = unpack!(status=, "0x1.ffffffffffffffffffffffffffffp-16382"
- .parse::<Quad>()
- .unwrap()
- .next_up());
- let expected = "0x1p-16381".parse::<Quad>().unwrap();
- assert_eq!(status, Status::OK);
- assert!(test.bitwise_eq(expected));
-
- // nextDown(0x1p-16381) -> 0x1.ffffffffffffffffffffffffffffp-16382
- let test = unpack!(status=, "0x1p-16381".parse::<Quad>().unwrap().next_down());
- let expected = "0x1.ffffffffffffffffffffffffffffp-16382".parse::<Quad>().unwrap();
- assert_eq!(status, Status::OK);
- assert!(test.bitwise_eq(expected));
-
- // 3. Now we test both denormal/normal computation which will not cause us
- // to go across binade boundaries. Specifically we test:
- // * nextUp(+Denormal) -> +Denormal.
- // * nextDown(+Denormal) -> +Denormal.
- // * nextUp(-Denormal) -> -Denormal.
- // * nextDown(-Denormal) -> -Denormal.
- // * nextUp(+Normal) -> +Normal.
- // * nextDown(+Normal) -> +Normal.
- // * nextUp(-Normal) -> -Normal.
- // * nextDown(-Normal) -> -Normal.
-
- // nextUp(+Denormal) -> +Denormal.
- let test = unpack!(status=, "0x0.ffffffffffffffffffffffff000cp-16382"
- .parse::<Quad>()
- .unwrap()
- .next_up());
- let expected = "0x0.ffffffffffffffffffffffff000dp-16382".parse::<Quad>().unwrap();
- assert_eq!(status, Status::OK);
- assert!(test.is_denormal());
- assert!(!test.is_negative());
- assert!(test.bitwise_eq(expected));
-
- // nextDown(+Denormal) -> +Denormal.
- let test = unpack!(status=, "0x0.ffffffffffffffffffffffff000cp-16382"
- .parse::<Quad>()
- .unwrap()
- .next_down());
- let expected = "0x0.ffffffffffffffffffffffff000bp-16382".parse::<Quad>().unwrap();
- assert_eq!(status, Status::OK);
- assert!(test.is_denormal());
- assert!(!test.is_negative());
- assert!(test.bitwise_eq(expected));
-
- // nextUp(-Denormal) -> -Denormal.
- let test = unpack!(status=, "-0x0.ffffffffffffffffffffffff000cp-16382"
- .parse::<Quad>()
- .unwrap()
- .next_up());
- let expected = "-0x0.ffffffffffffffffffffffff000bp-16382".parse::<Quad>().unwrap();
- assert_eq!(status, Status::OK);
- assert!(test.is_denormal());
- assert!(test.is_negative());
- assert!(test.bitwise_eq(expected));
-
- // nextDown(-Denormal) -> -Denormal
- let test = unpack!(status=, "-0x0.ffffffffffffffffffffffff000cp-16382"
- .parse::<Quad>()
- .unwrap()
- .next_down());
- let expected = "-0x0.ffffffffffffffffffffffff000dp-16382".parse::<Quad>().unwrap();
- assert_eq!(status, Status::OK);
- assert!(test.is_denormal());
- assert!(test.is_negative());
- assert!(test.bitwise_eq(expected));
-
- // nextUp(+Normal) -> +Normal.
- let test = unpack!(status=, "0x1.ffffffffffffffffffffffff000cp-16000"
- .parse::<Quad>()
- .unwrap()
- .next_up());
- let expected = "0x1.ffffffffffffffffffffffff000dp-16000".parse::<Quad>().unwrap();
- assert_eq!(status, Status::OK);
- assert!(!test.is_denormal());
- assert!(!test.is_negative());
- assert!(test.bitwise_eq(expected));
-
- // nextDown(+Normal) -> +Normal.
- let test = unpack!(status=, "0x1.ffffffffffffffffffffffff000cp-16000"
- .parse::<Quad>()
- .unwrap()
- .next_down());
- let expected = "0x1.ffffffffffffffffffffffff000bp-16000".parse::<Quad>().unwrap();
- assert_eq!(status, Status::OK);
- assert!(!test.is_denormal());
- assert!(!test.is_negative());
- assert!(test.bitwise_eq(expected));
-
- // nextUp(-Normal) -> -Normal.
- let test = unpack!(status=, "-0x1.ffffffffffffffffffffffff000cp-16000"
- .parse::<Quad>()
- .unwrap()
- .next_up());
- let expected = "-0x1.ffffffffffffffffffffffff000bp-16000".parse::<Quad>().unwrap();
- assert_eq!(status, Status::OK);
- assert!(!test.is_denormal());
- assert!(test.is_negative());
- assert!(test.bitwise_eq(expected));
-
- // nextDown(-Normal) -> -Normal.
- let test = unpack!(status=, "-0x1.ffffffffffffffffffffffff000cp-16000"
- .parse::<Quad>()
- .unwrap()
- .next_down());
- let expected = "-0x1.ffffffffffffffffffffffff000dp-16000".parse::<Quad>().unwrap();
- assert_eq!(status, Status::OK);
- assert!(!test.is_denormal());
- assert!(test.is_negative());
- assert!(test.bitwise_eq(expected));
-}
-
-#[test]
-fn fma() {
- {
- let mut f1 = Single::from_f32(14.5);
- let f2 = Single::from_f32(-14.5);
- let f3 = Single::from_f32(225.0);
- f1 = f1.mul_add(f2, f3).value;
- assert_eq!(14.75, f1.to_f32());
- }
-
- {
- let val2 = Single::from_f32(2.0);
- let mut f1 = Single::from_f32(1.17549435e-38);
- let mut f2 = Single::from_f32(1.17549435e-38);
- f1 /= val2;
- f2 /= val2;
- let f3 = Single::from_f32(12.0);
- f1 = f1.mul_add(f2, f3).value;
- assert_eq!(12.0, f1.to_f32());
- }
-
- // Test for correct zero sign when answer is exactly zero.
- // fma(1.0, -1.0, 1.0) -> +ve 0.
- {
- let mut f1 = Double::from_f64(1.0);
- let f2 = Double::from_f64(-1.0);
- let f3 = Double::from_f64(1.0);
- f1 = f1.mul_add(f2, f3).value;
- assert!(!f1.is_negative() && f1.is_zero());
- }
-
- // Test for correct zero sign when answer is exactly zero and rounding towards
- // negative.
- // fma(1.0, -1.0, 1.0) -> +ve 0.
- {
- let mut f1 = Double::from_f64(1.0);
- let f2 = Double::from_f64(-1.0);
- let f3 = Double::from_f64(1.0);
- f1 = f1.mul_add_r(f2, f3, Round::TowardNegative).value;
- assert!(f1.is_negative() && f1.is_zero());
- }
-
- // Test for correct (in this case -ve) sign when adding like signed zeros.
- // Test fma(0.0, -0.0, -0.0) -> -ve 0.
- {
- let mut f1 = Double::from_f64(0.0);
- let f2 = Double::from_f64(-0.0);
- let f3 = Double::from_f64(-0.0);
- f1 = f1.mul_add(f2, f3).value;
- assert!(f1.is_negative() && f1.is_zero());
- }
-
- // Test -ve sign preservation when small negative results underflow.
- {
- let mut f1 = "-0x1p-1074".parse::<Double>().unwrap();
- let f2 = "+0x1p-1074".parse::<Double>().unwrap();
- let f3 = Double::from_f64(0.0);
- f1 = f1.mul_add(f2, f3).value;
- assert!(f1.is_negative() && f1.is_zero());
- }
-
- // Test x87 extended precision case from https://llvm.org/PR20728.
- {
- let mut m1 = X87DoubleExtended::from_u128(1).value;
- let m2 = X87DoubleExtended::from_u128(1).value;
- let a = X87DoubleExtended::from_u128(3).value;
-
- let mut loses_info = false;
- m1 = m1.mul_add(m2, a).value;
- let r: Single = m1.convert(&mut loses_info).value;
- assert!(!loses_info);
- assert_eq!(4.0, r.to_f32());
- }
-}
-
-#[test]
-fn issue_69532() {
- let f = Double::from_bits(0x7FF0_0000_0000_0001u64 as u128);
- let mut loses_info = false;
- let sta = f.convert(&mut loses_info);
- let r: Single = sta.value;
- assert!(loses_info);
- assert!(r.is_nan());
- assert_eq!(sta.status, Status::INVALID_OP);
-}
-
-#[test]
-fn min_num() {
- let f1 = Double::from_f64(1.0);
- let f2 = Double::from_f64(2.0);
- let nan = Double::NAN;
-
- assert_eq!(1.0, f1.min(f2).to_f64());
- assert_eq!(1.0, f2.min(f1).to_f64());
- assert_eq!(1.0, f1.min(nan).to_f64());
- assert_eq!(1.0, nan.min(f1).to_f64());
-}
-
-#[test]
-fn max_num() {
- let f1 = Double::from_f64(1.0);
- let f2 = Double::from_f64(2.0);
- let nan = Double::NAN;
-
- assert_eq!(2.0, f1.max(f2).to_f64());
- assert_eq!(2.0, f2.max(f1).to_f64());
- assert_eq!(1.0, f1.max(nan).to_f64());
- assert_eq!(1.0, nan.max(f1).to_f64());
-}
-
-#[test]
-fn denormal() {
- // Test single precision
- {
- assert!(!Single::from_f32(0.0).is_denormal());
-
- let mut t = "1.17549435082228750797e-38".parse::<Single>().unwrap();
- assert!(!t.is_denormal());
-
- let val2 = Single::from_f32(2.0e0);
- t /= val2;
- assert!(t.is_denormal());
- }
-
- // Test double precision
- {
- assert!(!Double::from_f64(0.0).is_denormal());
-
- let mut t = "2.22507385850720138309e-308".parse::<Double>().unwrap();
- assert!(!t.is_denormal());
-
- let val2 = Double::from_f64(2.0e0);
- t /= val2;
- assert!(t.is_denormal());
- }
-
- // Test Intel double-ext
- {
- assert!(!X87DoubleExtended::from_u128(0).value.is_denormal());
-
- let mut t = "3.36210314311209350626e-4932".parse::<X87DoubleExtended>().unwrap();
- assert!(!t.is_denormal());
-
- t /= X87DoubleExtended::from_u128(2).value;
- assert!(t.is_denormal());
- }
-
- // Test quadruple precision
- {
- assert!(!Quad::from_u128(0).value.is_denormal());
-
- let mut t = "3.36210314311209350626267781732175260e-4932".parse::<Quad>().unwrap();
- assert!(!t.is_denormal());
-
- t /= Quad::from_u128(2).value;
- assert!(t.is_denormal());
- }
-}
-
-#[test]
-fn decimal_strings_without_null_terminators() {
- // Make sure that we can parse strings without null terminators.
- // rdar://14323230.
- let val = "0.00"[..3].parse::<Double>().unwrap();
- assert_eq!(val.to_f64(), 0.0);
- let val = "0.01"[..3].parse::<Double>().unwrap();
- assert_eq!(val.to_f64(), 0.0);
- let val = "0.09"[..3].parse::<Double>().unwrap();
- assert_eq!(val.to_f64(), 0.0);
- let val = "0.095"[..4].parse::<Double>().unwrap();
- assert_eq!(val.to_f64(), 0.09);
- let val = "0.00e+3"[..7].parse::<Double>().unwrap();
- assert_eq!(val.to_f64(), 0.00);
- let val = "0e+3"[..4].parse::<Double>().unwrap();
- assert_eq!(val.to_f64(), 0.00);
-}
-
-#[test]
-fn from_zero_decimal_string() {
- assert_eq!(0.0, "0".parse::<Double>().unwrap().to_f64());
- assert_eq!(0.0, "+0".parse::<Double>().unwrap().to_f64());
- assert_eq!(-0.0, "-0".parse::<Double>().unwrap().to_f64());
-
- assert_eq!(0.0, "0.".parse::<Double>().unwrap().to_f64());
- assert_eq!(0.0, "+0.".parse::<Double>().unwrap().to_f64());
- assert_eq!(-0.0, "-0.".parse::<Double>().unwrap().to_f64());
-
- assert_eq!(0.0, ".0".parse::<Double>().unwrap().to_f64());
- assert_eq!(0.0, "+.0".parse::<Double>().unwrap().to_f64());
- assert_eq!(-0.0, "-.0".parse::<Double>().unwrap().to_f64());
-
- assert_eq!(0.0, "0.0".parse::<Double>().unwrap().to_f64());
- assert_eq!(0.0, "+0.0".parse::<Double>().unwrap().to_f64());
- assert_eq!(-0.0, "-0.0".parse::<Double>().unwrap().to_f64());
-
- assert_eq!(0.0, "00000.".parse::<Double>().unwrap().to_f64());
- assert_eq!(0.0, "+00000.".parse::<Double>().unwrap().to_f64());
- assert_eq!(-0.0, "-00000.".parse::<Double>().unwrap().to_f64());
-
- assert_eq!(0.0, ".00000".parse::<Double>().unwrap().to_f64());
- assert_eq!(0.0, "+.00000".parse::<Double>().unwrap().to_f64());
- assert_eq!(-0.0, "-.00000".parse::<Double>().unwrap().to_f64());
-
- assert_eq!(0.0, "0000.00000".parse::<Double>().unwrap().to_f64());
- assert_eq!(0.0, "+0000.00000".parse::<Double>().unwrap().to_f64());
- assert_eq!(-0.0, "-0000.00000".parse::<Double>().unwrap().to_f64());
-}
-
-#[test]
-fn from_zero_decimal_single_exponent_string() {
- assert_eq!(0.0, "0e1".parse::<Double>().unwrap().to_f64());
- assert_eq!(0.0, "+0e1".parse::<Double>().unwrap().to_f64());
- assert_eq!(-0.0, "-0e1".parse::<Double>().unwrap().to_f64());
-
- assert_eq!(0.0, "0e+1".parse::<Double>().unwrap().to_f64());
- assert_eq!(0.0, "+0e+1".parse::<Double>().unwrap().to_f64());
- assert_eq!(-0.0, "-0e+1".parse::<Double>().unwrap().to_f64());
-
- assert_eq!(0.0, "0e-1".parse::<Double>().unwrap().to_f64());
- assert_eq!(0.0, "+0e-1".parse::<Double>().unwrap().to_f64());
- assert_eq!(-0.0, "-0e-1".parse::<Double>().unwrap().to_f64());
-
- assert_eq!(0.0, "0.e1".parse::<Double>().unwrap().to_f64());
- assert_eq!(0.0, "+0.e1".parse::<Double>().unwrap().to_f64());
- assert_eq!(-0.0, "-0.e1".parse::<Double>().unwrap().to_f64());
-
- assert_eq!(0.0, "0.e+1".parse::<Double>().unwrap().to_f64());
- assert_eq!(0.0, "+0.e+1".parse::<Double>().unwrap().to_f64());
- assert_eq!(-0.0, "-0.e+1".parse::<Double>().unwrap().to_f64());
-
- assert_eq!(0.0, "0.e-1".parse::<Double>().unwrap().to_f64());
- assert_eq!(0.0, "+0.e-1".parse::<Double>().unwrap().to_f64());
- assert_eq!(-0.0, "-0.e-1".parse::<Double>().unwrap().to_f64());
-
- assert_eq!(0.0, ".0e1".parse::<Double>().unwrap().to_f64());
- assert_eq!(0.0, "+.0e1".parse::<Double>().unwrap().to_f64());
- assert_eq!(-0.0, "-.0e1".parse::<Double>().unwrap().to_f64());
-
- assert_eq!(0.0, ".0e+1".parse::<Double>().unwrap().to_f64());
- assert_eq!(0.0, "+.0e+1".parse::<Double>().unwrap().to_f64());
- assert_eq!(-0.0, "-.0e+1".parse::<Double>().unwrap().to_f64());
-
- assert_eq!(0.0, ".0e-1".parse::<Double>().unwrap().to_f64());
- assert_eq!(0.0, "+.0e-1".parse::<Double>().unwrap().to_f64());
- assert_eq!(-0.0, "-.0e-1".parse::<Double>().unwrap().to_f64());
-
- assert_eq!(0.0, "0.0e1".parse::<Double>().unwrap().to_f64());
- assert_eq!(0.0, "+0.0e1".parse::<Double>().unwrap().to_f64());
- assert_eq!(-0.0, "-0.0e1".parse::<Double>().unwrap().to_f64());
-
- assert_eq!(0.0, "0.0e+1".parse::<Double>().unwrap().to_f64());
- assert_eq!(0.0, "+0.0e+1".parse::<Double>().unwrap().to_f64());
- assert_eq!(-0.0, "-0.0e+1".parse::<Double>().unwrap().to_f64());
-
- assert_eq!(0.0, "0.0e-1".parse::<Double>().unwrap().to_f64());
- assert_eq!(0.0, "+0.0e-1".parse::<Double>().unwrap().to_f64());
- assert_eq!(-0.0, "-0.0e-1".parse::<Double>().unwrap().to_f64());
-
- assert_eq!(0.0, "000.0000e1".parse::<Double>().unwrap().to_f64());
- assert_eq!(0.0, "+000.0000e+1".parse::<Double>().unwrap().to_f64());
- assert_eq!(-0.0, "-000.0000e+1".parse::<Double>().unwrap().to_f64());
-}
-
-#[test]
-fn from_zero_decimal_large_exponent_string() {
- assert_eq!(0.0, "0e1234".parse::<Double>().unwrap().to_f64());
- assert_eq!(0.0, "+0e1234".parse::<Double>().unwrap().to_f64());
- assert_eq!(-0.0, "-0e1234".parse::<Double>().unwrap().to_f64());
-
- assert_eq!(0.0, "0e+1234".parse::<Double>().unwrap().to_f64());
- assert_eq!(0.0, "+0e+1234".parse::<Double>().unwrap().to_f64());
- assert_eq!(-0.0, "-0e+1234".parse::<Double>().unwrap().to_f64());
-
- assert_eq!(0.0, "0e-1234".parse::<Double>().unwrap().to_f64());
- assert_eq!(0.0, "+0e-1234".parse::<Double>().unwrap().to_f64());
- assert_eq!(-0.0, "-0e-1234".parse::<Double>().unwrap().to_f64());
-
- assert_eq!(0.0, "000.0000e1234".parse::<Double>().unwrap().to_f64());
- assert_eq!(0.0, "000.0000e-1234".parse::<Double>().unwrap().to_f64());
-}
-
-#[test]
-fn from_zero_hexadecimal_string() {
- assert_eq!(0.0, "0x0p1".parse::<Double>().unwrap().to_f64());
- assert_eq!(0.0, "+0x0p1".parse::<Double>().unwrap().to_f64());
- assert_eq!(-0.0, "-0x0p1".parse::<Double>().unwrap().to_f64());
-
- assert_eq!(0.0, "0x0p+1".parse::<Double>().unwrap().to_f64());
- assert_eq!(0.0, "+0x0p+1".parse::<Double>().unwrap().to_f64());
- assert_eq!(-0.0, "-0x0p+1".parse::<Double>().unwrap().to_f64());
-
- assert_eq!(0.0, "0x0p-1".parse::<Double>().unwrap().to_f64());
- assert_eq!(0.0, "+0x0p-1".parse::<Double>().unwrap().to_f64());
- assert_eq!(-0.0, "-0x0p-1".parse::<Double>().unwrap().to_f64());
-
- assert_eq!(0.0, "0x0.p1".parse::<Double>().unwrap().to_f64());
- assert_eq!(0.0, "+0x0.p1".parse::<Double>().unwrap().to_f64());
- assert_eq!(-0.0, "-0x0.p1".parse::<Double>().unwrap().to_f64());
-
- assert_eq!(0.0, "0x0.p+1".parse::<Double>().unwrap().to_f64());
- assert_eq!(0.0, "+0x0.p+1".parse::<Double>().unwrap().to_f64());
- assert_eq!(-0.0, "-0x0.p+1".parse::<Double>().unwrap().to_f64());
-
- assert_eq!(0.0, "0x0.p-1".parse::<Double>().unwrap().to_f64());
- assert_eq!(0.0, "+0x0.p-1".parse::<Double>().unwrap().to_f64());
- assert_eq!(-0.0, "-0x0.p-1".parse::<Double>().unwrap().to_f64());
-
- assert_eq!(0.0, "0x.0p1".parse::<Double>().unwrap().to_f64());
- assert_eq!(0.0, "+0x.0p1".parse::<Double>().unwrap().to_f64());
- assert_eq!(-0.0, "-0x.0p1".parse::<Double>().unwrap().to_f64());
-
- assert_eq!(0.0, "0x.0p+1".parse::<Double>().unwrap().to_f64());
- assert_eq!(0.0, "+0x.0p+1".parse::<Double>().unwrap().to_f64());
- assert_eq!(-0.0, "-0x.0p+1".parse::<Double>().unwrap().to_f64());
-
- assert_eq!(0.0, "0x.0p-1".parse::<Double>().unwrap().to_f64());
- assert_eq!(0.0, "+0x.0p-1".parse::<Double>().unwrap().to_f64());
- assert_eq!(-0.0, "-0x.0p-1".parse::<Double>().unwrap().to_f64());
-
- assert_eq!(0.0, "0x0.0p1".parse::<Double>().unwrap().to_f64());
- assert_eq!(0.0, "+0x0.0p1".parse::<Double>().unwrap().to_f64());
- assert_eq!(-0.0, "-0x0.0p1".parse::<Double>().unwrap().to_f64());
-
- assert_eq!(0.0, "0x0.0p+1".parse::<Double>().unwrap().to_f64());
- assert_eq!(0.0, "+0x0.0p+1".parse::<Double>().unwrap().to_f64());
- assert_eq!(-0.0, "-0x0.0p+1".parse::<Double>().unwrap().to_f64());
-
- assert_eq!(0.0, "0x0.0p-1".parse::<Double>().unwrap().to_f64());
- assert_eq!(0.0, "+0x0.0p-1".parse::<Double>().unwrap().to_f64());
- assert_eq!(-0.0, "-0x0.0p-1".parse::<Double>().unwrap().to_f64());
-
- assert_eq!(0.0, "0x00000.p1".parse::<Double>().unwrap().to_f64());
- assert_eq!(0.0, "0x0000.00000p1".parse::<Double>().unwrap().to_f64());
- assert_eq!(0.0, "0x.00000p1".parse::<Double>().unwrap().to_f64());
- assert_eq!(0.0, "0x0.p1".parse::<Double>().unwrap().to_f64());
- assert_eq!(0.0, "0x0p1234".parse::<Double>().unwrap().to_f64());
- assert_eq!(-0.0, "-0x0p1234".parse::<Double>().unwrap().to_f64());
- assert_eq!(0.0, "0x00000.p1234".parse::<Double>().unwrap().to_f64());
- assert_eq!(0.0, "0x0000.00000p1234".parse::<Double>().unwrap().to_f64());
- assert_eq!(0.0, "0x.00000p1234".parse::<Double>().unwrap().to_f64());
- assert_eq!(0.0, "0x0.p1234".parse::<Double>().unwrap().to_f64());
-}
-
-#[test]
-fn from_decimal_string() {
- assert_eq!(1.0, "1".parse::<Double>().unwrap().to_f64());
- assert_eq!(2.0, "2.".parse::<Double>().unwrap().to_f64());
- assert_eq!(0.5, ".5".parse::<Double>().unwrap().to_f64());
- assert_eq!(1.0, "1.0".parse::<Double>().unwrap().to_f64());
- assert_eq!(-2.0, "-2".parse::<Double>().unwrap().to_f64());
- assert_eq!(-4.0, "-4.".parse::<Double>().unwrap().to_f64());
- assert_eq!(-0.5, "-.5".parse::<Double>().unwrap().to_f64());
- assert_eq!(-1.5, "-1.5".parse::<Double>().unwrap().to_f64());
- assert_eq!(1.25e12, "1.25e12".parse::<Double>().unwrap().to_f64());
- assert_eq!(1.25e+12, "1.25e+12".parse::<Double>().unwrap().to_f64());
- assert_eq!(1.25e-12, "1.25e-12".parse::<Double>().unwrap().to_f64());
- assert_eq!(1024.0, "1024.".parse::<Double>().unwrap().to_f64());
- assert_eq!(1024.05, "1024.05000".parse::<Double>().unwrap().to_f64());
- assert_eq!(0.05, ".05000".parse::<Double>().unwrap().to_f64());
- assert_eq!(2.0, "2.".parse::<Double>().unwrap().to_f64());
- assert_eq!(2.0e2, "2.e2".parse::<Double>().unwrap().to_f64());
- assert_eq!(2.0e+2, "2.e+2".parse::<Double>().unwrap().to_f64());
- assert_eq!(2.0e-2, "2.e-2".parse::<Double>().unwrap().to_f64());
- assert_eq!(2.05e2, "002.05000e2".parse::<Double>().unwrap().to_f64());
- assert_eq!(2.05e+2, "002.05000e+2".parse::<Double>().unwrap().to_f64());
- assert_eq!(2.05e-2, "002.05000e-2".parse::<Double>().unwrap().to_f64());
- assert_eq!(2.05e12, "002.05000e12".parse::<Double>().unwrap().to_f64());
- assert_eq!(2.05e+12, "002.05000e+12".parse::<Double>().unwrap().to_f64());
- assert_eq!(2.05e-12, "002.05000e-12".parse::<Double>().unwrap().to_f64());
-
- // These are "carefully selected" to overflow the fast log-base
- // calculations in the implementation.
- assert!("99e99999".parse::<Double>().unwrap().is_infinite());
- assert!("-99e99999".parse::<Double>().unwrap().is_infinite());
- assert!("1e-99999".parse::<Double>().unwrap().is_pos_zero());
- assert!("-1e-99999".parse::<Double>().unwrap().is_neg_zero());
-
- assert_eq!(2.71828, "2.71828".parse::<Double>().unwrap().to_f64());
-}
-
-#[test]
-fn from_hexadecimal_string() {
- assert_eq!(1.0, "0x1p0".parse::<Double>().unwrap().to_f64());
- assert_eq!(1.0, "+0x1p0".parse::<Double>().unwrap().to_f64());
- assert_eq!(-1.0, "-0x1p0".parse::<Double>().unwrap().to_f64());
-
- assert_eq!(1.0, "0x1p+0".parse::<Double>().unwrap().to_f64());
- assert_eq!(1.0, "+0x1p+0".parse::<Double>().unwrap().to_f64());
- assert_eq!(-1.0, "-0x1p+0".parse::<Double>().unwrap().to_f64());
-
- assert_eq!(1.0, "0x1p-0".parse::<Double>().unwrap().to_f64());
- assert_eq!(1.0, "+0x1p-0".parse::<Double>().unwrap().to_f64());
- assert_eq!(-1.0, "-0x1p-0".parse::<Double>().unwrap().to_f64());
-
- assert_eq!(2.0, "0x1p1".parse::<Double>().unwrap().to_f64());
- assert_eq!(2.0, "+0x1p1".parse::<Double>().unwrap().to_f64());
- assert_eq!(-2.0, "-0x1p1".parse::<Double>().unwrap().to_f64());
-
- assert_eq!(2.0, "0x1p+1".parse::<Double>().unwrap().to_f64());
- assert_eq!(2.0, "+0x1p+1".parse::<Double>().unwrap().to_f64());
- assert_eq!(-2.0, "-0x1p+1".parse::<Double>().unwrap().to_f64());
-
- assert_eq!(0.5, "0x1p-1".parse::<Double>().unwrap().to_f64());
- assert_eq!(0.5, "+0x1p-1".parse::<Double>().unwrap().to_f64());
- assert_eq!(-0.5, "-0x1p-1".parse::<Double>().unwrap().to_f64());
-
- assert_eq!(3.0, "0x1.8p1".parse::<Double>().unwrap().to_f64());
- assert_eq!(3.0, "+0x1.8p1".parse::<Double>().unwrap().to_f64());
- assert_eq!(-3.0, "-0x1.8p1".parse::<Double>().unwrap().to_f64());
-
- assert_eq!(3.0, "0x1.8p+1".parse::<Double>().unwrap().to_f64());
- assert_eq!(3.0, "+0x1.8p+1".parse::<Double>().unwrap().to_f64());
- assert_eq!(-3.0, "-0x1.8p+1".parse::<Double>().unwrap().to_f64());
-
- assert_eq!(0.75, "0x1.8p-1".parse::<Double>().unwrap().to_f64());
- assert_eq!(0.75, "+0x1.8p-1".parse::<Double>().unwrap().to_f64());
- assert_eq!(-0.75, "-0x1.8p-1".parse::<Double>().unwrap().to_f64());
-
- assert_eq!(8192.0, "0x1000.000p1".parse::<Double>().unwrap().to_f64());
- assert_eq!(8192.0, "+0x1000.000p1".parse::<Double>().unwrap().to_f64());
- assert_eq!(-8192.0, "-0x1000.000p1".parse::<Double>().unwrap().to_f64());
-
- assert_eq!(8192.0, "0x1000.000p+1".parse::<Double>().unwrap().to_f64());
- assert_eq!(8192.0, "+0x1000.000p+1".parse::<Double>().unwrap().to_f64());
- assert_eq!(-8192.0, "-0x1000.000p+1".parse::<Double>().unwrap().to_f64());
-
- assert_eq!(2048.0, "0x1000.000p-1".parse::<Double>().unwrap().to_f64());
- assert_eq!(2048.0, "+0x1000.000p-1".parse::<Double>().unwrap().to_f64());
- assert_eq!(-2048.0, "-0x1000.000p-1".parse::<Double>().unwrap().to_f64());
-
- assert_eq!(8192.0, "0x1000p1".parse::<Double>().unwrap().to_f64());
- assert_eq!(8192.0, "+0x1000p1".parse::<Double>().unwrap().to_f64());
- assert_eq!(-8192.0, "-0x1000p1".parse::<Double>().unwrap().to_f64());
-
- assert_eq!(8192.0, "0x1000p+1".parse::<Double>().unwrap().to_f64());
- assert_eq!(8192.0, "+0x1000p+1".parse::<Double>().unwrap().to_f64());
- assert_eq!(-8192.0, "-0x1000p+1".parse::<Double>().unwrap().to_f64());
-
- assert_eq!(2048.0, "0x1000p-1".parse::<Double>().unwrap().to_f64());
- assert_eq!(2048.0, "+0x1000p-1".parse::<Double>().unwrap().to_f64());
- assert_eq!(-2048.0, "-0x1000p-1".parse::<Double>().unwrap().to_f64());
-
- assert_eq!(16384.0, "0x10p10".parse::<Double>().unwrap().to_f64());
- assert_eq!(16384.0, "+0x10p10".parse::<Double>().unwrap().to_f64());
- assert_eq!(-16384.0, "-0x10p10".parse::<Double>().unwrap().to_f64());
-
- assert_eq!(16384.0, "0x10p+10".parse::<Double>().unwrap().to_f64());
- assert_eq!(16384.0, "+0x10p+10".parse::<Double>().unwrap().to_f64());
- assert_eq!(-16384.0, "-0x10p+10".parse::<Double>().unwrap().to_f64());
-
- assert_eq!(0.015625, "0x10p-10".parse::<Double>().unwrap().to_f64());
- assert_eq!(0.015625, "+0x10p-10".parse::<Double>().unwrap().to_f64());
- assert_eq!(-0.015625, "-0x10p-10".parse::<Double>().unwrap().to_f64());
-
- assert_eq!(1.0625, "0x1.1p0".parse::<Double>().unwrap().to_f64());
- assert_eq!(1.0, "0x1p0".parse::<Double>().unwrap().to_f64());
-
- assert_eq!(
- "0x1p-150".parse::<Double>().unwrap().to_f64(),
- "+0x800000000000000001.p-221".parse::<Double>().unwrap().to_f64()
- );
- assert_eq!(
- 2251799813685248.5,
- "0x80000000000004000000.010p-28".parse::<Double>().unwrap().to_f64()
- );
-}
-
-#[test]
-fn to_string() {
- let to_string = |d: f64, precision: usize, width: usize| {
- let x = Double::from_f64(d);
- if precision == 0 {
- format!("{:1$}", x, width)
- } else {
- format!("{:2$.1$}", x, precision, width)
- }
- };
- assert_eq!("10", to_string(10.0, 6, 3));
- assert_eq!("1.0E+1", to_string(10.0, 6, 0));
- assert_eq!("10100", to_string(1.01E+4, 5, 2));
- assert_eq!("1.01E+4", to_string(1.01E+4, 4, 2));
- assert_eq!("1.01E+4", to_string(1.01E+4, 5, 1));
- assert_eq!("0.0101", to_string(1.01E-2, 5, 2));
- assert_eq!("0.0101", to_string(1.01E-2, 4, 2));
- assert_eq!("1.01E-2", to_string(1.01E-2, 5, 1));
- assert_eq!("0.78539816339744828", to_string(0.78539816339744830961, 0, 3));
- assert_eq!("4.9406564584124654E-324", to_string(4.9406564584124654e-324, 0, 3));
- assert_eq!("873.18340000000001", to_string(873.1834, 0, 1));
- assert_eq!("8.7318340000000001E+2", to_string(873.1834, 0, 0));
- assert_eq!("1.7976931348623157E+308", to_string(1.7976931348623157E+308, 0, 0));
-
- let to_string = |d: f64, precision: usize, width: usize| {
- let x = Double::from_f64(d);
- if precision == 0 {
- format!("{:#1$}", x, width)
- } else {
- format!("{:#2$.1$}", x, precision, width)
- }
- };
- assert_eq!("10", to_string(10.0, 6, 3));
- assert_eq!("1.000000e+01", to_string(10.0, 6, 0));
- assert_eq!("10100", to_string(1.01E+4, 5, 2));
- assert_eq!("1.0100e+04", to_string(1.01E+4, 4, 2));
- assert_eq!("1.01000e+04", to_string(1.01E+4, 5, 1));
- assert_eq!("0.0101", to_string(1.01E-2, 5, 2));
- assert_eq!("0.0101", to_string(1.01E-2, 4, 2));
- assert_eq!("1.01000e-02", to_string(1.01E-2, 5, 1));
- assert_eq!("0.78539816339744828", to_string(0.78539816339744830961, 0, 3));
- assert_eq!("4.94065645841246540e-324", to_string(4.9406564584124654e-324, 0, 3));
- assert_eq!("873.18340000000001", to_string(873.1834, 0, 1));
- assert_eq!("8.73183400000000010e+02", to_string(873.1834, 0, 0));
- assert_eq!("1.79769313486231570e+308", to_string(1.7976931348623157E+308, 0, 0));
-}
-
-#[test]
-fn to_integer() {
- let mut is_exact = false;
-
- assert_eq!(
- Status::OK.and(10),
- "10".parse::<Double>().unwrap().to_u128_r(5, Round::TowardZero, &mut is_exact,)
- );
- assert!(is_exact);
-
- assert_eq!(
- Status::INVALID_OP.and(0),
- "-10".parse::<Double>().unwrap().to_u128_r(5, Round::TowardZero, &mut is_exact,)
- );
- assert!(!is_exact);
-
- assert_eq!(
- Status::INVALID_OP.and(31),
- "32".parse::<Double>().unwrap().to_u128_r(5, Round::TowardZero, &mut is_exact,)
- );
- assert!(!is_exact);
-
- assert_eq!(
- Status::INEXACT.and(7),
- "7.9".parse::<Double>().unwrap().to_u128_r(5, Round::TowardZero, &mut is_exact,)
- );
- assert!(!is_exact);
-
- assert_eq!(
- Status::OK.and(-10),
- "-10".parse::<Double>().unwrap().to_i128_r(5, Round::TowardZero, &mut is_exact,)
- );
- assert!(is_exact);
-
- assert_eq!(
- Status::INVALID_OP.and(-16),
- "-17".parse::<Double>().unwrap().to_i128_r(5, Round::TowardZero, &mut is_exact,)
- );
- assert!(!is_exact);
-
- assert_eq!(
- Status::INVALID_OP.and(15),
- "16".parse::<Double>().unwrap().to_i128_r(5, Round::TowardZero, &mut is_exact,)
- );
- assert!(!is_exact);
-}
-
-#[test]
-fn nan() {
- fn nanbits<T: Float>(signaling: bool, negative: bool, fill: u128) -> u128 {
- let x = if signaling { T::snan(Some(fill)) } else { T::qnan(Some(fill)) };
- if negative { (-x).to_bits() } else { x.to_bits() }
- }
-
- assert_eq!(0x7fc00000, nanbits::<Single>(false, false, 0));
- assert_eq!(0xffc00000, nanbits::<Single>(false, true, 0));
- assert_eq!(0x7fc0ae72, nanbits::<Single>(false, false, 0xae72));
- assert_eq!(0x7fffae72, nanbits::<Single>(false, false, 0xffffae72));
- assert_eq!(0x7fa00000, nanbits::<Single>(true, false, 0));
- assert_eq!(0xffa00000, nanbits::<Single>(true, true, 0));
- assert_eq!(0x7f80ae72, nanbits::<Single>(true, false, 0xae72));
- assert_eq!(0x7fbfae72, nanbits::<Single>(true, false, 0xffffae72));
-
- assert_eq!(0x7ff8000000000000, nanbits::<Double>(false, false, 0));
- assert_eq!(0xfff8000000000000, nanbits::<Double>(false, true, 0));
- assert_eq!(0x7ff800000000ae72, nanbits::<Double>(false, false, 0xae72));
- assert_eq!(0x7fffffffffffae72, nanbits::<Double>(false, false, 0xffffffffffffae72));
- assert_eq!(0x7ff4000000000000, nanbits::<Double>(true, false, 0));
- assert_eq!(0xfff4000000000000, nanbits::<Double>(true, true, 0));
- assert_eq!(0x7ff000000000ae72, nanbits::<Double>(true, false, 0xae72));
- assert_eq!(0x7ff7ffffffffae72, nanbits::<Double>(true, false, 0xffffffffffffae72));
-}
-
-#[test]
-fn string_decimal_death() {
- assert_eq!("".parse::<Double>(), Err(ParseError("Invalid string length")));
- assert_eq!("+".parse::<Double>(), Err(ParseError("String has no digits")));
- assert_eq!("-".parse::<Double>(), Err(ParseError("String has no digits")));
-
- assert_eq!("\0".parse::<Double>(), Err(ParseError("Invalid character in significand")));
- assert_eq!("1\0".parse::<Double>(), Err(ParseError("Invalid character in significand")));
- assert_eq!("1\02".parse::<Double>(), Err(ParseError("Invalid character in significand")));
- assert_eq!("1\02e1".parse::<Double>(), Err(ParseError("Invalid character in significand")));
- assert_eq!("1e\0".parse::<Double>(), Err(ParseError("Invalid character in exponent")));
- assert_eq!("1e1\0".parse::<Double>(), Err(ParseError("Invalid character in exponent")));
- assert_eq!("1e1\02".parse::<Double>(), Err(ParseError("Invalid character in exponent")));
-
- assert_eq!("1.0f".parse::<Double>(), Err(ParseError("Invalid character in significand")));
-
- assert_eq!("..".parse::<Double>(), Err(ParseError("String contains multiple dots")));
- assert_eq!("..0".parse::<Double>(), Err(ParseError("String contains multiple dots")));
- assert_eq!("1.0.0".parse::<Double>(), Err(ParseError("String contains multiple dots")));
-}
-
-#[test]
-fn string_decimal_significand_death() {
- assert_eq!(".".parse::<Double>(), Err(ParseError("Significand has no digits")));
- assert_eq!("+.".parse::<Double>(), Err(ParseError("Significand has no digits")));
- assert_eq!("-.".parse::<Double>(), Err(ParseError("Significand has no digits")));
-
- assert_eq!("e".parse::<Double>(), Err(ParseError("Significand has no digits")));
- assert_eq!("+e".parse::<Double>(), Err(ParseError("Significand has no digits")));
- assert_eq!("-e".parse::<Double>(), Err(ParseError("Significand has no digits")));
-
- assert_eq!("e1".parse::<Double>(), Err(ParseError("Significand has no digits")));
- assert_eq!("+e1".parse::<Double>(), Err(ParseError("Significand has no digits")));
- assert_eq!("-e1".parse::<Double>(), Err(ParseError("Significand has no digits")));
-
- assert_eq!(".e1".parse::<Double>(), Err(ParseError("Significand has no digits")));
- assert_eq!("+.e1".parse::<Double>(), Err(ParseError("Significand has no digits")));
- assert_eq!("-.e1".parse::<Double>(), Err(ParseError("Significand has no digits")));
-
- assert_eq!(".e".parse::<Double>(), Err(ParseError("Significand has no digits")));
- assert_eq!("+.e".parse::<Double>(), Err(ParseError("Significand has no digits")));
- assert_eq!("-.e".parse::<Double>(), Err(ParseError("Significand has no digits")));
-}
-
-#[test]
-fn string_decimal_exponent_death() {
- assert_eq!("1e".parse::<Double>(), Err(ParseError("Exponent has no digits")));
- assert_eq!("+1e".parse::<Double>(), Err(ParseError("Exponent has no digits")));
- assert_eq!("-1e".parse::<Double>(), Err(ParseError("Exponent has no digits")));
-
- assert_eq!("1.e".parse::<Double>(), Err(ParseError("Exponent has no digits")));
- assert_eq!("+1.e".parse::<Double>(), Err(ParseError("Exponent has no digits")));
- assert_eq!("-1.e".parse::<Double>(), Err(ParseError("Exponent has no digits")));
-
- assert_eq!(".1e".parse::<Double>(), Err(ParseError("Exponent has no digits")));
- assert_eq!("+.1e".parse::<Double>(), Err(ParseError("Exponent has no digits")));
- assert_eq!("-.1e".parse::<Double>(), Err(ParseError("Exponent has no digits")));
-
- assert_eq!("1.1e".parse::<Double>(), Err(ParseError("Exponent has no digits")));
- assert_eq!("+1.1e".parse::<Double>(), Err(ParseError("Exponent has no digits")));
- assert_eq!("-1.1e".parse::<Double>(), Err(ParseError("Exponent has no digits")));
-
- assert_eq!("1e+".parse::<Double>(), Err(ParseError("Exponent has no digits")));
- assert_eq!("1e-".parse::<Double>(), Err(ParseError("Exponent has no digits")));
-
- assert_eq!(".1e".parse::<Double>(), Err(ParseError("Exponent has no digits")));
- assert_eq!(".1e+".parse::<Double>(), Err(ParseError("Exponent has no digits")));
- assert_eq!(".1e-".parse::<Double>(), Err(ParseError("Exponent has no digits")));
-
- assert_eq!("1.0e".parse::<Double>(), Err(ParseError("Exponent has no digits")));
- assert_eq!("1.0e+".parse::<Double>(), Err(ParseError("Exponent has no digits")));
- assert_eq!("1.0e-".parse::<Double>(), Err(ParseError("Exponent has no digits")));
-}
-
-#[test]
-fn string_hexadecimal_death() {
- assert_eq!("0x".parse::<Double>(), Err(ParseError("Invalid string")));
- assert_eq!("+0x".parse::<Double>(), Err(ParseError("Invalid string")));
- assert_eq!("-0x".parse::<Double>(), Err(ParseError("Invalid string")));
-
- assert_eq!("0x0".parse::<Double>(), Err(ParseError("Hex strings require an exponent")));
- assert_eq!("+0x0".parse::<Double>(), Err(ParseError("Hex strings require an exponent")));
- assert_eq!("-0x0".parse::<Double>(), Err(ParseError("Hex strings require an exponent")));
-
- assert_eq!("0x0.".parse::<Double>(), Err(ParseError("Hex strings require an exponent")));
- assert_eq!("+0x0.".parse::<Double>(), Err(ParseError("Hex strings require an exponent")));
- assert_eq!("-0x0.".parse::<Double>(), Err(ParseError("Hex strings require an exponent")));
-
- assert_eq!("0x.0".parse::<Double>(), Err(ParseError("Hex strings require an exponent")));
- assert_eq!("+0x.0".parse::<Double>(), Err(ParseError("Hex strings require an exponent")));
- assert_eq!("-0x.0".parse::<Double>(), Err(ParseError("Hex strings require an exponent")));
-
- assert_eq!("0x0.0".parse::<Double>(), Err(ParseError("Hex strings require an exponent")));
- assert_eq!("+0x0.0".parse::<Double>(), Err(ParseError("Hex strings require an exponent")));
- assert_eq!("-0x0.0".parse::<Double>(), Err(ParseError("Hex strings require an exponent")));
-
- assert_eq!("0x\0".parse::<Double>(), Err(ParseError("Invalid character in significand")));
- assert_eq!("0x1\0".parse::<Double>(), Err(ParseError("Invalid character in significand")));
- assert_eq!("0x1\02".parse::<Double>(), Err(ParseError("Invalid character in significand")));
- assert_eq!("0x1\02p1".parse::<Double>(), Err(ParseError("Invalid character in significand")));
- assert_eq!("0x1p\0".parse::<Double>(), Err(ParseError("Invalid character in exponent")));
- assert_eq!("0x1p1\0".parse::<Double>(), Err(ParseError("Invalid character in exponent")));
- assert_eq!("0x1p1\02".parse::<Double>(), Err(ParseError("Invalid character in exponent")));
-
- assert_eq!("0x1p0f".parse::<Double>(), Err(ParseError("Invalid character in exponent")));
-
- assert_eq!("0x..p1".parse::<Double>(), Err(ParseError("String contains multiple dots")));
- assert_eq!("0x..0p1".parse::<Double>(), Err(ParseError("String contains multiple dots")));
- assert_eq!("0x1.0.0p1".parse::<Double>(), Err(ParseError("String contains multiple dots")));
-}
-
-#[test]
-fn string_hexadecimal_significand_death() {
- assert_eq!("0x.".parse::<Double>(), Err(ParseError("Significand has no digits")));
- assert_eq!("+0x.".parse::<Double>(), Err(ParseError("Significand has no digits")));
- assert_eq!("-0x.".parse::<Double>(), Err(ParseError("Significand has no digits")));
-
- assert_eq!("0xp".parse::<Double>(), Err(ParseError("Significand has no digits")));
- assert_eq!("+0xp".parse::<Double>(), Err(ParseError("Significand has no digits")));
- assert_eq!("-0xp".parse::<Double>(), Err(ParseError("Significand has no digits")));
-
- assert_eq!("0xp+".parse::<Double>(), Err(ParseError("Significand has no digits")));
- assert_eq!("+0xp+".parse::<Double>(), Err(ParseError("Significand has no digits")));
- assert_eq!("-0xp+".parse::<Double>(), Err(ParseError("Significand has no digits")));
-
- assert_eq!("0xp-".parse::<Double>(), Err(ParseError("Significand has no digits")));
- assert_eq!("+0xp-".parse::<Double>(), Err(ParseError("Significand has no digits")));
- assert_eq!("-0xp-".parse::<Double>(), Err(ParseError("Significand has no digits")));
-
- assert_eq!("0x.p".parse::<Double>(), Err(ParseError("Significand has no digits")));
- assert_eq!("+0x.p".parse::<Double>(), Err(ParseError("Significand has no digits")));
- assert_eq!("-0x.p".parse::<Double>(), Err(ParseError("Significand has no digits")));
-
- assert_eq!("0x.p+".parse::<Double>(), Err(ParseError("Significand has no digits")));
- assert_eq!("+0x.p+".parse::<Double>(), Err(ParseError("Significand has no digits")));
- assert_eq!("-0x.p+".parse::<Double>(), Err(ParseError("Significand has no digits")));
-
- assert_eq!("0x.p-".parse::<Double>(), Err(ParseError("Significand has no digits")));
- assert_eq!("+0x.p-".parse::<Double>(), Err(ParseError("Significand has no digits")));
- assert_eq!("-0x.p-".parse::<Double>(), Err(ParseError("Significand has no digits")));
-}
-
-#[test]
-fn string_hexadecimal_exponent_death() {
- assert_eq!("0x1p".parse::<Double>(), Err(ParseError("Exponent has no digits")));
- assert_eq!("+0x1p".parse::<Double>(), Err(ParseError("Exponent has no digits")));
- assert_eq!("-0x1p".parse::<Double>(), Err(ParseError("Exponent has no digits")));
-
- assert_eq!("0x1p+".parse::<Double>(), Err(ParseError("Exponent has no digits")));
- assert_eq!("+0x1p+".parse::<Double>(), Err(ParseError("Exponent has no digits")));
- assert_eq!("-0x1p+".parse::<Double>(), Err(ParseError("Exponent has no digits")));
-
- assert_eq!("0x1p-".parse::<Double>(), Err(ParseError("Exponent has no digits")));
- assert_eq!("+0x1p-".parse::<Double>(), Err(ParseError("Exponent has no digits")));
- assert_eq!("-0x1p-".parse::<Double>(), Err(ParseError("Exponent has no digits")));
-
- assert_eq!("0x1.p".parse::<Double>(), Err(ParseError("Exponent has no digits")));
- assert_eq!("+0x1.p".parse::<Double>(), Err(ParseError("Exponent has no digits")));
- assert_eq!("-0x1.p".parse::<Double>(), Err(ParseError("Exponent has no digits")));
-
- assert_eq!("0x1.p+".parse::<Double>(), Err(ParseError("Exponent has no digits")));
- assert_eq!("+0x1.p+".parse::<Double>(), Err(ParseError("Exponent has no digits")));
- assert_eq!("-0x1.p+".parse::<Double>(), Err(ParseError("Exponent has no digits")));
-
- assert_eq!("0x1.p-".parse::<Double>(), Err(ParseError("Exponent has no digits")));
- assert_eq!("+0x1.p-".parse::<Double>(), Err(ParseError("Exponent has no digits")));
- assert_eq!("-0x1.p-".parse::<Double>(), Err(ParseError("Exponent has no digits")));
-
- assert_eq!("0x.1p".parse::<Double>(), Err(ParseError("Exponent has no digits")));
- assert_eq!("+0x.1p".parse::<Double>(), Err(ParseError("Exponent has no digits")));
- assert_eq!("-0x.1p".parse::<Double>(), Err(ParseError("Exponent has no digits")));
-
- assert_eq!("0x.1p+".parse::<Double>(), Err(ParseError("Exponent has no digits")));
- assert_eq!("+0x.1p+".parse::<Double>(), Err(ParseError("Exponent has no digits")));
- assert_eq!("-0x.1p+".parse::<Double>(), Err(ParseError("Exponent has no digits")));
-
- assert_eq!("0x.1p-".parse::<Double>(), Err(ParseError("Exponent has no digits")));
- assert_eq!("+0x.1p-".parse::<Double>(), Err(ParseError("Exponent has no digits")));
- assert_eq!("-0x.1p-".parse::<Double>(), Err(ParseError("Exponent has no digits")));
-
- assert_eq!("0x1.1p".parse::<Double>(), Err(ParseError("Exponent has no digits")));
- assert_eq!("+0x1.1p".parse::<Double>(), Err(ParseError("Exponent has no digits")));
- assert_eq!("-0x1.1p".parse::<Double>(), Err(ParseError("Exponent has no digits")));
-
- assert_eq!("0x1.1p+".parse::<Double>(), Err(ParseError("Exponent has no digits")));
- assert_eq!("+0x1.1p+".parse::<Double>(), Err(ParseError("Exponent has no digits")));
- assert_eq!("-0x1.1p+".parse::<Double>(), Err(ParseError("Exponent has no digits")));
-
- assert_eq!("0x1.1p-".parse::<Double>(), Err(ParseError("Exponent has no digits")));
- assert_eq!("+0x1.1p-".parse::<Double>(), Err(ParseError("Exponent has no digits")));
- assert_eq!("-0x1.1p-".parse::<Double>(), Err(ParseError("Exponent has no digits")));
-}
-
-#[test]
-fn exact_inverse() {
- // Trivial operation.
- assert!(Double::from_f64(2.0).get_exact_inverse().unwrap().bitwise_eq(Double::from_f64(0.5)));
- assert!(Single::from_f32(2.0).get_exact_inverse().unwrap().bitwise_eq(Single::from_f32(0.5)));
- assert!(
- "2.0"
- .parse::<Quad>()
- .unwrap()
- .get_exact_inverse()
- .unwrap()
- .bitwise_eq("0.5".parse::<Quad>().unwrap())
- );
- assert!(
- "2.0"
- .parse::<X87DoubleExtended>()
- .unwrap()
- .get_exact_inverse()
- .unwrap()
- .bitwise_eq("0.5".parse::<X87DoubleExtended>().unwrap())
- );
-
- // FLT_MIN
- assert!(
- Single::from_f32(1.17549435e-38)
- .get_exact_inverse()
- .unwrap()
- .bitwise_eq(Single::from_f32(8.5070592e+37))
- );
-
- // Large float, inverse is a denormal.
- assert!(Single::from_f32(1.7014118e38).get_exact_inverse().is_none());
- // Zero
- assert!(Double::from_f64(0.0).get_exact_inverse().is_none());
- // Denormalized float
- assert!(Single::from_f32(1.40129846e-45).get_exact_inverse().is_none());
-}
-
-#[test]
-fn round_to_integral() {
- let t = Double::from_f64(-0.5);
- assert_eq!(-0.0, t.round_to_integral(Round::TowardZero).value.to_f64());
- assert_eq!(-1.0, t.round_to_integral(Round::TowardNegative).value.to_f64());
- assert_eq!(-0.0, t.round_to_integral(Round::TowardPositive).value.to_f64());
- assert_eq!(-0.0, t.round_to_integral(Round::NearestTiesToEven).value.to_f64());
-
- let s = Double::from_f64(3.14);
- assert_eq!(3.0, s.round_to_integral(Round::TowardZero).value.to_f64());
- assert_eq!(3.0, s.round_to_integral(Round::TowardNegative).value.to_f64());
- assert_eq!(4.0, s.round_to_integral(Round::TowardPositive).value.to_f64());
- assert_eq!(3.0, s.round_to_integral(Round::NearestTiesToEven).value.to_f64());
-
- let r = Double::largest();
- assert_eq!(r.to_f64(), r.round_to_integral(Round::TowardZero).value.to_f64());
- assert_eq!(r.to_f64(), r.round_to_integral(Round::TowardNegative).value.to_f64());
- assert_eq!(r.to_f64(), r.round_to_integral(Round::TowardPositive).value.to_f64());
- assert_eq!(r.to_f64(), r.round_to_integral(Round::NearestTiesToEven).value.to_f64());
-
- let p = Double::ZERO.round_to_integral(Round::TowardZero).value;
- assert_eq!(0.0, p.to_f64());
- let p = (-Double::ZERO).round_to_integral(Round::TowardZero).value;
- assert_eq!(-0.0, p.to_f64());
- let p = Double::NAN.round_to_integral(Round::TowardZero).value;
- assert!(p.to_f64().is_nan());
- let p = Double::INFINITY.round_to_integral(Round::TowardZero).value;
- assert!(p.to_f64().is_infinite() && p.to_f64() > 0.0);
- let p = (-Double::INFINITY).round_to_integral(Round::TowardZero).value;
- assert!(p.to_f64().is_infinite() && p.to_f64() < 0.0);
-}
-
-#[test]
-fn is_integer() {
- let t = Double::from_f64(-0.0);
- assert!(t.is_integer());
- let t = Double::from_f64(3.14159);
- assert!(!t.is_integer());
- let t = Double::NAN;
- assert!(!t.is_integer());
- let t = Double::INFINITY;
- assert!(!t.is_integer());
- let t = -Double::INFINITY;
- assert!(!t.is_integer());
- let t = Double::largest();
- assert!(t.is_integer());
-}
-
-#[test]
-fn largest() {
- assert_eq!(3.402823466e+38, Single::largest().to_f32());
- assert_eq!(1.7976931348623158e+308, Double::largest().to_f64());
-}
-
-#[test]
-fn smallest() {
- let test = Single::SMALLEST;
- let expected = "0x0.000002p-126".parse::<Single>().unwrap();
- assert!(!test.is_negative());
- assert!(test.is_finite_non_zero());
- assert!(test.is_denormal());
- assert!(test.bitwise_eq(expected));
-
- let test = -Single::SMALLEST;
- let expected = "-0x0.000002p-126".parse::<Single>().unwrap();
- assert!(test.is_negative());
- assert!(test.is_finite_non_zero());
- assert!(test.is_denormal());
- assert!(test.bitwise_eq(expected));
-
- let test = Quad::SMALLEST;
- let expected = "0x0.0000000000000000000000000001p-16382".parse::<Quad>().unwrap();
- assert!(!test.is_negative());
- assert!(test.is_finite_non_zero());
- assert!(test.is_denormal());
- assert!(test.bitwise_eq(expected));
-
- let test = -Quad::SMALLEST;
- let expected = "-0x0.0000000000000000000000000001p-16382".parse::<Quad>().unwrap();
- assert!(test.is_negative());
- assert!(test.is_finite_non_zero());
- assert!(test.is_denormal());
- assert!(test.bitwise_eq(expected));
-}
-
-#[test]
-fn smallest_normalized() {
- let test = Single::smallest_normalized();
- let expected = "0x1p-126".parse::<Single>().unwrap();
- assert!(!test.is_negative());
- assert!(test.is_finite_non_zero());
- assert!(!test.is_denormal());
- assert!(test.bitwise_eq(expected));
-
- let test = -Single::smallest_normalized();
- let expected = "-0x1p-126".parse::<Single>().unwrap();
- assert!(test.is_negative());
- assert!(test.is_finite_non_zero());
- assert!(!test.is_denormal());
- assert!(test.bitwise_eq(expected));
-
- let test = Quad::smallest_normalized();
- let expected = "0x1p-16382".parse::<Quad>().unwrap();
- assert!(!test.is_negative());
- assert!(test.is_finite_non_zero());
- assert!(!test.is_denormal());
- assert!(test.bitwise_eq(expected));
-
- let test = -Quad::smallest_normalized();
- let expected = "-0x1p-16382".parse::<Quad>().unwrap();
- assert!(test.is_negative());
- assert!(test.is_finite_non_zero());
- assert!(!test.is_denormal());
- assert!(test.bitwise_eq(expected));
-}
-
-#[test]
-fn zero() {
- assert_eq!(0.0, Single::from_f32(0.0).to_f32());
- assert_eq!(-0.0, Single::from_f32(-0.0).to_f32());
- assert!(Single::from_f32(-0.0).is_negative());
-
- assert_eq!(0.0, Double::from_f64(0.0).to_f64());
- assert_eq!(-0.0, Double::from_f64(-0.0).to_f64());
- assert!(Double::from_f64(-0.0).is_negative());
-
- fn test<T: Float>(sign: bool, bits: u128) {
- let test = if sign { -T::ZERO } else { T::ZERO };
- let pattern = if sign { "-0x0p+0" } else { "0x0p+0" };
- let expected = pattern.parse::<T>().unwrap();
- assert!(test.is_zero());
- assert_eq!(sign, test.is_negative());
- assert!(test.bitwise_eq(expected));
- assert_eq!(bits, test.to_bits());
- }
- test::<Half>(false, 0);
- test::<Half>(true, 0x8000);
- test::<Single>(false, 0);
- test::<Single>(true, 0x80000000);
- test::<Double>(false, 0);
- test::<Double>(true, 0x8000000000000000);
- test::<Quad>(false, 0);
- test::<Quad>(true, 0x8000000000000000_0000000000000000);
- test::<X87DoubleExtended>(false, 0);
- test::<X87DoubleExtended>(true, 0x8000_0000000000000000);
-}
-
-#[test]
-fn copy_sign() {
- assert!(
- Double::from_f64(-42.0)
- .bitwise_eq(Double::from_f64(42.0).copy_sign(Double::from_f64(-1.0),),)
- );
- assert!(
- Double::from_f64(42.0)
- .bitwise_eq(Double::from_f64(-42.0).copy_sign(Double::from_f64(1.0),),)
- );
- assert!(
- Double::from_f64(-42.0)
- .bitwise_eq(Double::from_f64(-42.0).copy_sign(Double::from_f64(-1.0),),)
- );
- assert!(
- Double::from_f64(42.0)
- .bitwise_eq(Double::from_f64(42.0).copy_sign(Double::from_f64(1.0),),)
- );
-}
-
-#[test]
-fn convert() {
- let mut loses_info = false;
- let test = "1.0".parse::<Double>().unwrap();
- let test: Single = test.convert(&mut loses_info).value;
- assert_eq!(1.0, test.to_f32());
- assert!(!loses_info);
-
- let mut test = "0x1p-53".parse::<X87DoubleExtended>().unwrap();
- let one = "1.0".parse::<X87DoubleExtended>().unwrap();
- test += one;
- let test: Double = test.convert(&mut loses_info).value;
- assert_eq!(1.0, test.to_f64());
- assert!(loses_info);
-
- let mut test = "0x1p-53".parse::<Quad>().unwrap();
- let one = "1.0".parse::<Quad>().unwrap();
- test += one;
- let test: Double = test.convert(&mut loses_info).value;
- assert_eq!(1.0, test.to_f64());
- assert!(loses_info);
-
- let test = "0xf.fffffffp+28".parse::<X87DoubleExtended>().unwrap();
- let test: Double = test.convert(&mut loses_info).value;
- assert_eq!(4294967295.0, test.to_f64());
- assert!(!loses_info);
-
- let test = Single::qnan(None);
- let x87_qnan = X87DoubleExtended::qnan(None);
- let test: X87DoubleExtended = test.convert(&mut loses_info).value;
- assert!(test.bitwise_eq(x87_qnan));
- assert!(!loses_info);
-
- let test = Single::snan(None);
- let sta = test.convert(&mut loses_info);
- let test: X87DoubleExtended = sta.value;
- assert!(test.is_nan());
- assert!(!test.is_signaling());
- assert!(!loses_info);
- assert_eq!(sta.status, Status::INVALID_OP);
-
- let test = X87DoubleExtended::qnan(None);
- let test: X87DoubleExtended = test.convert(&mut loses_info).value;
- assert!(test.bitwise_eq(x87_qnan));
- assert!(!loses_info);
-
- let test = X87DoubleExtended::snan(None);
- let sta = test.convert(&mut loses_info);
- let test: X87DoubleExtended = sta.value;
- assert!(test.is_nan());
- assert!(!test.is_signaling());
- assert!(!loses_info);
- assert_eq!(sta.status, Status::INVALID_OP);
-}
-
-#[test]
-fn is_negative() {
- let t = "0x1p+0".parse::<Single>().unwrap();
- assert!(!t.is_negative());
- let t = "-0x1p+0".parse::<Single>().unwrap();
- assert!(t.is_negative());
-
- assert!(!Single::INFINITY.is_negative());
- assert!((-Single::INFINITY).is_negative());
-
- assert!(!Single::ZERO.is_negative());
- assert!((-Single::ZERO).is_negative());
-
- assert!(!Single::NAN.is_negative());
- assert!((-Single::NAN).is_negative());
-
- assert!(!Single::snan(None).is_negative());
- assert!((-Single::snan(None)).is_negative());
-}
-
-#[test]
-fn is_normal() {
- let t = "0x1p+0".parse::<Single>().unwrap();
- assert!(t.is_normal());
-
- assert!(!Single::INFINITY.is_normal());
- assert!(!Single::ZERO.is_normal());
- assert!(!Single::NAN.is_normal());
- assert!(!Single::snan(None).is_normal());
- assert!(!"0x1p-149".parse::<Single>().unwrap().is_normal());
-}
-
-#[test]
-fn is_finite() {
- let t = "0x1p+0".parse::<Single>().unwrap();
- assert!(t.is_finite());
- assert!(!Single::INFINITY.is_finite());
- assert!(Single::ZERO.is_finite());
- assert!(!Single::NAN.is_finite());
- assert!(!Single::snan(None).is_finite());
- assert!("0x1p-149".parse::<Single>().unwrap().is_finite());
-}
-
-#[test]
-fn is_infinite() {
- let t = "0x1p+0".parse::<Single>().unwrap();
- assert!(!t.is_infinite());
- assert!(Single::INFINITY.is_infinite());
- assert!(!Single::ZERO.is_infinite());
- assert!(!Single::NAN.is_infinite());
- assert!(!Single::snan(None).is_infinite());
- assert!(!"0x1p-149".parse::<Single>().unwrap().is_infinite());
-}
-
-#[test]
-fn is_nan() {
- let t = "0x1p+0".parse::<Single>().unwrap();
- assert!(!t.is_nan());
- assert!(!Single::INFINITY.is_nan());
- assert!(!Single::ZERO.is_nan());
- assert!(Single::NAN.is_nan());
- assert!(Single::snan(None).is_nan());
- assert!(!"0x1p-149".parse::<Single>().unwrap().is_nan());
-}
-
-#[test]
-fn is_finite_non_zero() {
- // Test positive/negative normal value.
- assert!("0x1p+0".parse::<Single>().unwrap().is_finite_non_zero());
- assert!("-0x1p+0".parse::<Single>().unwrap().is_finite_non_zero());
-
- // Test positive/negative denormal value.
- assert!("0x1p-149".parse::<Single>().unwrap().is_finite_non_zero());
- assert!("-0x1p-149".parse::<Single>().unwrap().is_finite_non_zero());
-
- // Test +/- Infinity.
- assert!(!Single::INFINITY.is_finite_non_zero());
- assert!(!(-Single::INFINITY).is_finite_non_zero());
-
- // Test +/- Zero.
- assert!(!Single::ZERO.is_finite_non_zero());
- assert!(!(-Single::ZERO).is_finite_non_zero());
-
- // Test +/- qNaN. +/- don't mean anything with qNaN but paranoia can't hurt in
- // this instance.
- assert!(!Single::NAN.is_finite_non_zero());
- assert!(!(-Single::NAN).is_finite_non_zero());
-
- // Test +/- sNaN. +/- don't mean anything with sNaN but paranoia can't hurt in
- // this instance.
- assert!(!Single::snan(None).is_finite_non_zero());
- assert!(!(-Single::snan(None)).is_finite_non_zero());
-}
-
-#[test]
-fn add() {
- // Test Special Cases against each other and normal values.
-
- // FIXMES/NOTES:
- // 1. Since we perform only default exception handling all operations with
- // signaling NaNs should have a result that is a quiet NaN. Currently they
- // return sNaN.
-
- let p_inf = Single::INFINITY;
- let m_inf = -Single::INFINITY;
- let p_zero = Single::ZERO;
- let m_zero = -Single::ZERO;
- let qnan = Single::NAN;
- let p_normal_value = "0x1p+0".parse::<Single>().unwrap();
- let m_normal_value = "-0x1p+0".parse::<Single>().unwrap();
- let p_largest_value = Single::largest();
- let m_largest_value = -Single::largest();
- let p_smallest_value = Single::SMALLEST;
- let m_smallest_value = -Single::SMALLEST;
- let p_smallest_normalized = Single::smallest_normalized();
- let m_smallest_normalized = -Single::smallest_normalized();
-
- let overflow_status = Status::OVERFLOW | Status::INEXACT;
-
- let special_cases = [
- (p_inf, p_inf, "inf", Status::OK, Category::Infinity),
- (p_inf, m_inf, "nan", Status::INVALID_OP, Category::NaN),
- (p_inf, p_zero, "inf", Status::OK, Category::Infinity),
- (p_inf, m_zero, "inf", Status::OK, Category::Infinity),
- (p_inf, qnan, "nan", Status::OK, Category::NaN),
- /*
- // See Note 1.
- (p_inf, snan, "nan", Status::INVALID_OP, Category::NaN),
- */
- (p_inf, p_normal_value, "inf", Status::OK, Category::Infinity),
- (p_inf, m_normal_value, "inf", Status::OK, Category::Infinity),
- (p_inf, p_largest_value, "inf", Status::OK, Category::Infinity),
- (p_inf, m_largest_value, "inf", Status::OK, Category::Infinity),
- (p_inf, p_smallest_value, "inf", Status::OK, Category::Infinity),
- (p_inf, m_smallest_value, "inf", Status::OK, Category::Infinity),
- (p_inf, p_smallest_normalized, "inf", Status::OK, Category::Infinity),
- (p_inf, m_smallest_normalized, "inf", Status::OK, Category::Infinity),
- (m_inf, p_inf, "nan", Status::INVALID_OP, Category::NaN),
- (m_inf, m_inf, "-inf", Status::OK, Category::Infinity),
- (m_inf, p_zero, "-inf", Status::OK, Category::Infinity),
- (m_inf, m_zero, "-inf", Status::OK, Category::Infinity),
- (m_inf, qnan, "nan", Status::OK, Category::NaN),
- /*
- // See Note 1.
- (m_inf, snan, "nan", Status::INVALID_OP, Category::NaN),
- */
- (m_inf, p_normal_value, "-inf", Status::OK, Category::Infinity),
- (m_inf, m_normal_value, "-inf", Status::OK, Category::Infinity),
- (m_inf, p_largest_value, "-inf", Status::OK, Category::Infinity),
- (m_inf, m_largest_value, "-inf", Status::OK, Category::Infinity),
- (m_inf, p_smallest_value, "-inf", Status::OK, Category::Infinity),
- (m_inf, m_smallest_value, "-inf", Status::OK, Category::Infinity),
- (m_inf, p_smallest_normalized, "-inf", Status::OK, Category::Infinity),
- (m_inf, m_smallest_normalized, "-inf", Status::OK, Category::Infinity),
- (p_zero, p_inf, "inf", Status::OK, Category::Infinity),
- (p_zero, m_inf, "-inf", Status::OK, Category::Infinity),
- (p_zero, p_zero, "0x0p+0", Status::OK, Category::Zero),
- (p_zero, m_zero, "0x0p+0", Status::OK, Category::Zero),
- (p_zero, qnan, "nan", Status::OK, Category::NaN),
- /*
- // See Note 1.
- (p_zero, snan, "nan", Status::INVALID_OP, Category::NaN),
- */
- (p_zero, p_normal_value, "0x1p+0", Status::OK, Category::Normal),
- (p_zero, m_normal_value, "-0x1p+0", Status::OK, Category::Normal),
- (p_zero, p_largest_value, "0x1.fffffep+127", Status::OK, Category::Normal),
- (p_zero, m_largest_value, "-0x1.fffffep+127", Status::OK, Category::Normal),
- (p_zero, p_smallest_value, "0x1p-149", Status::OK, Category::Normal),
- (p_zero, m_smallest_value, "-0x1p-149", Status::OK, Category::Normal),
- (p_zero, p_smallest_normalized, "0x1p-126", Status::OK, Category::Normal),
- (p_zero, m_smallest_normalized, "-0x1p-126", Status::OK, Category::Normal),
- (m_zero, p_inf, "inf", Status::OK, Category::Infinity),
- (m_zero, m_inf, "-inf", Status::OK, Category::Infinity),
- (m_zero, p_zero, "0x0p+0", Status::OK, Category::Zero),
- (m_zero, m_zero, "-0x0p+0", Status::OK, Category::Zero),
- (m_zero, qnan, "nan", Status::OK, Category::NaN),
- /*
- // See Note 1.
- (m_zero, snan, "nan", Status::INVALID_OP, Category::NaN),
- */
- (m_zero, p_normal_value, "0x1p+0", Status::OK, Category::Normal),
- (m_zero, m_normal_value, "-0x1p+0", Status::OK, Category::Normal),
- (m_zero, p_largest_value, "0x1.fffffep+127", Status::OK, Category::Normal),
- (m_zero, m_largest_value, "-0x1.fffffep+127", Status::OK, Category::Normal),
- (m_zero, p_smallest_value, "0x1p-149", Status::OK, Category::Normal),
- (m_zero, m_smallest_value, "-0x1p-149", Status::OK, Category::Normal),
- (m_zero, p_smallest_normalized, "0x1p-126", Status::OK, Category::Normal),
- (m_zero, m_smallest_normalized, "-0x1p-126", Status::OK, Category::Normal),
- (qnan, p_inf, "nan", Status::OK, Category::NaN),
- (qnan, m_inf, "nan", Status::OK, Category::NaN),
- (qnan, p_zero, "nan", Status::OK, Category::NaN),
- (qnan, m_zero, "nan", Status::OK, Category::NaN),
- (qnan, qnan, "nan", Status::OK, Category::NaN),
- /*
- // See Note 1.
- (qnan, snan, "nan", Status::INVALID_OP, Category::NaN),
- */
- (qnan, p_normal_value, "nan", Status::OK, Category::NaN),
- (qnan, m_normal_value, "nan", Status::OK, Category::NaN),
- (qnan, p_largest_value, "nan", Status::OK, Category::NaN),
- (qnan, m_largest_value, "nan", Status::OK, Category::NaN),
- (qnan, p_smallest_value, "nan", Status::OK, Category::NaN),
- (qnan, m_smallest_value, "nan", Status::OK, Category::NaN),
- (qnan, p_smallest_normalized, "nan", Status::OK, Category::NaN),
- (qnan, m_smallest_normalized, "nan", Status::OK, Category::NaN),
- /*
- // See Note 1.
- (snan, p_inf, "nan", Status::INVALID_OP, Category::NaN),
- (snan, m_inf, "nan", Status::INVALID_OP, Category::NaN),
- (snan, p_zero, "nan", Status::INVALID_OP, Category::NaN),
- (snan, m_zero, "nan", Status::INVALID_OP, Category::NaN),
- (snan, qnan, "nan", Status::INVALID_OP, Category::NaN),
- (snan, snan, "nan", Status::INVALID_OP, Category::NaN),
- (snan, p_normal_value, "nan", Status::INVALID_OP, Category::NaN),
- (snan, m_normal_value, "nan", Status::INVALID_OP, Category::NaN),
- (snan, p_largest_value, "nan", Status::INVALID_OP, Category::NaN),
- (snan, m_largest_value, "nan", Status::INVALID_OP, Category::NaN),
- (snan, p_smallest_value, "nan", Status::INVALID_OP, Category::NaN),
- (snan, m_smallest_value, "nan", Status::INVALID_OP, Category::NaN),
- (snan, p_smallest_normalized, "nan", Status::INVALID_OP, Category::NaN),
- (snan, m_smallest_normalized, "nan", Status::INVALID_OP, Category::NaN),
- */
- (p_normal_value, p_inf, "inf", Status::OK, Category::Infinity),
- (p_normal_value, m_inf, "-inf", Status::OK, Category::Infinity),
- (p_normal_value, p_zero, "0x1p+0", Status::OK, Category::Normal),
- (p_normal_value, m_zero, "0x1p+0", Status::OK, Category::Normal),
- (p_normal_value, qnan, "nan", Status::OK, Category::NaN),
- /*
- // See Note 1.
- (p_normal_value, snan, "nan", Status::INVALID_OP, Category::NaN),
- */
- (p_normal_value, p_normal_value, "0x1p+1", Status::OK, Category::Normal),
- (p_normal_value, m_normal_value, "0x0p+0", Status::OK, Category::Zero),
- (p_normal_value, p_largest_value, "0x1.fffffep+127", Status::INEXACT, Category::Normal),
- (p_normal_value, m_largest_value, "-0x1.fffffep+127", Status::INEXACT, Category::Normal),
- (p_normal_value, p_smallest_value, "0x1p+0", Status::INEXACT, Category::Normal),
- (p_normal_value, m_smallest_value, "0x1p+0", Status::INEXACT, Category::Normal),
- (p_normal_value, p_smallest_normalized, "0x1p+0", Status::INEXACT, Category::Normal),
- (p_normal_value, m_smallest_normalized, "0x1p+0", Status::INEXACT, Category::Normal),
- (m_normal_value, p_inf, "inf", Status::OK, Category::Infinity),
- (m_normal_value, m_inf, "-inf", Status::OK, Category::Infinity),
- (m_normal_value, p_zero, "-0x1p+0", Status::OK, Category::Normal),
- (m_normal_value, m_zero, "-0x1p+0", Status::OK, Category::Normal),
- (m_normal_value, qnan, "nan", Status::OK, Category::NaN),
- /*
- // See Note 1.
- (m_normal_value, snan, "nan", Status::INVALID_OP, Category::NaN),
- */
- (m_normal_value, p_normal_value, "0x0p+0", Status::OK, Category::Zero),
- (m_normal_value, m_normal_value, "-0x1p+1", Status::OK, Category::Normal),
- (m_normal_value, p_largest_value, "0x1.fffffep+127", Status::INEXACT, Category::Normal),
- (m_normal_value, m_largest_value, "-0x1.fffffep+127", Status::INEXACT, Category::Normal),
- (m_normal_value, p_smallest_value, "-0x1p+0", Status::INEXACT, Category::Normal),
- (m_normal_value, m_smallest_value, "-0x1p+0", Status::INEXACT, Category::Normal),
- (m_normal_value, p_smallest_normalized, "-0x1p+0", Status::INEXACT, Category::Normal),
- (m_normal_value, m_smallest_normalized, "-0x1p+0", Status::INEXACT, Category::Normal),
- (p_largest_value, p_inf, "inf", Status::OK, Category::Infinity),
- (p_largest_value, m_inf, "-inf", Status::OK, Category::Infinity),
- (p_largest_value, p_zero, "0x1.fffffep+127", Status::OK, Category::Normal),
- (p_largest_value, m_zero, "0x1.fffffep+127", Status::OK, Category::Normal),
- (p_largest_value, qnan, "nan", Status::OK, Category::NaN),
- /*
- // See Note 1.
- (p_largest_value, snan, "nan", Status::INVALID_OP, Category::NaN),
- */
- (p_largest_value, p_normal_value, "0x1.fffffep+127", Status::INEXACT, Category::Normal),
- (p_largest_value, m_normal_value, "0x1.fffffep+127", Status::INEXACT, Category::Normal),
- (p_largest_value, p_largest_value, "inf", overflow_status, Category::Infinity),
- (p_largest_value, m_largest_value, "0x0p+0", Status::OK, Category::Zero),
- (p_largest_value, p_smallest_value, "0x1.fffffep+127", Status::INEXACT, Category::Normal),
- (p_largest_value, m_smallest_value, "0x1.fffffep+127", Status::INEXACT, Category::Normal),
- (
- p_largest_value,
- p_smallest_normalized,
- "0x1.fffffep+127",
- Status::INEXACT,
- Category::Normal,
- ),
- (
- p_largest_value,
- m_smallest_normalized,
- "0x1.fffffep+127",
- Status::INEXACT,
- Category::Normal,
- ),
- (m_largest_value, p_inf, "inf", Status::OK, Category::Infinity),
- (m_largest_value, m_inf, "-inf", Status::OK, Category::Infinity),
- (m_largest_value, p_zero, "-0x1.fffffep+127", Status::OK, Category::Normal),
- (m_largest_value, m_zero, "-0x1.fffffep+127", Status::OK, Category::Normal),
- (m_largest_value, qnan, "nan", Status::OK, Category::NaN),
- /*
- // See Note 1.
- (m_largest_value, snan, "nan", Status::INVALID_OP, Category::NaN),
- */
- (m_largest_value, p_normal_value, "-0x1.fffffep+127", Status::INEXACT, Category::Normal),
- (m_largest_value, m_normal_value, "-0x1.fffffep+127", Status::INEXACT, Category::Normal),
- (m_largest_value, p_largest_value, "0x0p+0", Status::OK, Category::Zero),
- (m_largest_value, m_largest_value, "-inf", overflow_status, Category::Infinity),
- (m_largest_value, p_smallest_value, "-0x1.fffffep+127", Status::INEXACT, Category::Normal),
- (m_largest_value, m_smallest_value, "-0x1.fffffep+127", Status::INEXACT, Category::Normal),
- (
- m_largest_value,
- p_smallest_normalized,
- "-0x1.fffffep+127",
- Status::INEXACT,
- Category::Normal,
- ),
- (
- m_largest_value,
- m_smallest_normalized,
- "-0x1.fffffep+127",
- Status::INEXACT,
- Category::Normal,
- ),
- (p_smallest_value, p_inf, "inf", Status::OK, Category::Infinity),
- (p_smallest_value, m_inf, "-inf", Status::OK, Category::Infinity),
- (p_smallest_value, p_zero, "0x1p-149", Status::OK, Category::Normal),
- (p_smallest_value, m_zero, "0x1p-149", Status::OK, Category::Normal),
- (p_smallest_value, qnan, "nan", Status::OK, Category::NaN),
- /*
- // See Note 1.
- (p_smallest_value, snan, "nan", Status::INVALID_OP, Category::NaN),
- */
- (p_smallest_value, p_normal_value, "0x1p+0", Status::INEXACT, Category::Normal),
- (p_smallest_value, m_normal_value, "-0x1p+0", Status::INEXACT, Category::Normal),
- (p_smallest_value, p_largest_value, "0x1.fffffep+127", Status::INEXACT, Category::Normal),
- (p_smallest_value, m_largest_value, "-0x1.fffffep+127", Status::INEXACT, Category::Normal),
- (p_smallest_value, p_smallest_value, "0x1p-148", Status::OK, Category::Normal),
- (p_smallest_value, m_smallest_value, "0x0p+0", Status::OK, Category::Zero),
- (p_smallest_value, p_smallest_normalized, "0x1.000002p-126", Status::OK, Category::Normal),
- (p_smallest_value, m_smallest_normalized, "-0x1.fffffcp-127", Status::OK, Category::Normal),
- (m_smallest_value, p_inf, "inf", Status::OK, Category::Infinity),
- (m_smallest_value, m_inf, "-inf", Status::OK, Category::Infinity),
- (m_smallest_value, p_zero, "-0x1p-149", Status::OK, Category::Normal),
- (m_smallest_value, m_zero, "-0x1p-149", Status::OK, Category::Normal),
- (m_smallest_value, qnan, "nan", Status::OK, Category::NaN),
- /*
- // See Note 1.
- (m_smallest_value, snan, "nan", Status::INVALID_OP, Category::NaN),
- */
- (m_smallest_value, p_normal_value, "0x1p+0", Status::INEXACT, Category::Normal),
- (m_smallest_value, m_normal_value, "-0x1p+0", Status::INEXACT, Category::Normal),
- (m_smallest_value, p_largest_value, "0x1.fffffep+127", Status::INEXACT, Category::Normal),
- (m_smallest_value, m_largest_value, "-0x1.fffffep+127", Status::INEXACT, Category::Normal),
- (m_smallest_value, p_smallest_value, "0x0p+0", Status::OK, Category::Zero),
- (m_smallest_value, m_smallest_value, "-0x1p-148", Status::OK, Category::Normal),
- (m_smallest_value, p_smallest_normalized, "0x1.fffffcp-127", Status::OK, Category::Normal),
- (m_smallest_value, m_smallest_normalized, "-0x1.000002p-126", Status::OK, Category::Normal),
- (p_smallest_normalized, p_inf, "inf", Status::OK, Category::Infinity),
- (p_smallest_normalized, m_inf, "-inf", Status::OK, Category::Infinity),
- (p_smallest_normalized, p_zero, "0x1p-126", Status::OK, Category::Normal),
- (p_smallest_normalized, m_zero, "0x1p-126", Status::OK, Category::Normal),
- (p_smallest_normalized, qnan, "nan", Status::OK, Category::NaN),
- /*
- // See Note 1.
- (p_smallest_normalized, snan, "nan", Status::INVALID_OP, Category::NaN),
- */
- (p_smallest_normalized, p_normal_value, "0x1p+0", Status::INEXACT, Category::Normal),
- (p_smallest_normalized, m_normal_value, "-0x1p+0", Status::INEXACT, Category::Normal),
- (
- p_smallest_normalized,
- p_largest_value,
- "0x1.fffffep+127",
- Status::INEXACT,
- Category::Normal,
- ),
- (
- p_smallest_normalized,
- m_largest_value,
- "-0x1.fffffep+127",
- Status::INEXACT,
- Category::Normal,
- ),
- (p_smallest_normalized, p_smallest_value, "0x1.000002p-126", Status::OK, Category::Normal),
- (p_smallest_normalized, m_smallest_value, "0x1.fffffcp-127", Status::OK, Category::Normal),
- (p_smallest_normalized, p_smallest_normalized, "0x1p-125", Status::OK, Category::Normal),
- (p_smallest_normalized, m_smallest_normalized, "0x0p+0", Status::OK, Category::Zero),
- (m_smallest_normalized, p_inf, "inf", Status::OK, Category::Infinity),
- (m_smallest_normalized, m_inf, "-inf", Status::OK, Category::Infinity),
- (m_smallest_normalized, p_zero, "-0x1p-126", Status::OK, Category::Normal),
- (m_smallest_normalized, m_zero, "-0x1p-126", Status::OK, Category::Normal),
- (m_smallest_normalized, qnan, "nan", Status::OK, Category::NaN),
- /*
- // See Note 1.
- (m_smallest_normalized, snan, "nan", Status::INVALID_OP, Category::NaN),
- */
- (m_smallest_normalized, p_normal_value, "0x1p+0", Status::INEXACT, Category::Normal),
- (m_smallest_normalized, m_normal_value, "-0x1p+0", Status::INEXACT, Category::Normal),
- (
- m_smallest_normalized,
- p_largest_value,
- "0x1.fffffep+127",
- Status::INEXACT,
- Category::Normal,
- ),
- (
- m_smallest_normalized,
- m_largest_value,
- "-0x1.fffffep+127",
- Status::INEXACT,
- Category::Normal,
- ),
- (m_smallest_normalized, p_smallest_value, "-0x1.fffffcp-127", Status::OK, Category::Normal),
- (m_smallest_normalized, m_smallest_value, "-0x1.000002p-126", Status::OK, Category::Normal),
- (m_smallest_normalized, p_smallest_normalized, "0x0p+0", Status::OK, Category::Zero),
- (m_smallest_normalized, m_smallest_normalized, "-0x1p-125", Status::OK, Category::Normal),
- ];
-
- for (x, y, e_result, e_status, e_category) in special_cases {
- let status;
- let result = unpack!(status=, x + y);
- assert_eq!(status, e_status);
- assert_eq!(result.category(), e_category);
- assert!(result.bitwise_eq(e_result.parse::<Single>().unwrap()));
- }
-}
-
-#[test]
-fn subtract() {
- // Test Special Cases against each other and normal values.
-
- // FIXMES/NOTES:
- // 1. Since we perform only default exception handling all operations with
- // signaling NaNs should have a result that is a quiet NaN. Currently they
- // return sNaN.
-
- let p_inf = Single::INFINITY;
- let m_inf = -Single::INFINITY;
- let p_zero = Single::ZERO;
- let m_zero = -Single::ZERO;
- let qnan = Single::NAN;
- let p_normal_value = "0x1p+0".parse::<Single>().unwrap();
- let m_normal_value = "-0x1p+0".parse::<Single>().unwrap();
- let p_largest_value = Single::largest();
- let m_largest_value = -Single::largest();
- let p_smallest_value = Single::SMALLEST;
- let m_smallest_value = -Single::SMALLEST;
- let p_smallest_normalized = Single::smallest_normalized();
- let m_smallest_normalized = -Single::smallest_normalized();
-
- let overflow_status = Status::OVERFLOW | Status::INEXACT;
-
- let special_cases = [
- (p_inf, p_inf, "nan", Status::INVALID_OP, Category::NaN),
- (p_inf, m_inf, "inf", Status::OK, Category::Infinity),
- (p_inf, p_zero, "inf", Status::OK, Category::Infinity),
- (p_inf, m_zero, "inf", Status::OK, Category::Infinity),
- (p_inf, qnan, "-nan", Status::OK, Category::NaN),
- /*
- // See Note 1.
- (p_inf, snan, "-nan", Status::INVALID_OP, Category::NaN),
- */
- (p_inf, p_normal_value, "inf", Status::OK, Category::Infinity),
- (p_inf, m_normal_value, "inf", Status::OK, Category::Infinity),
- (p_inf, p_largest_value, "inf", Status::OK, Category::Infinity),
- (p_inf, m_largest_value, "inf", Status::OK, Category::Infinity),
- (p_inf, p_smallest_value, "inf", Status::OK, Category::Infinity),
- (p_inf, m_smallest_value, "inf", Status::OK, Category::Infinity),
- (p_inf, p_smallest_normalized, "inf", Status::OK, Category::Infinity),
- (p_inf, m_smallest_normalized, "inf", Status::OK, Category::Infinity),
- (m_inf, p_inf, "-inf", Status::OK, Category::Infinity),
- (m_inf, m_inf, "nan", Status::INVALID_OP, Category::NaN),
- (m_inf, p_zero, "-inf", Status::OK, Category::Infinity),
- (m_inf, m_zero, "-inf", Status::OK, Category::Infinity),
- (m_inf, qnan, "-nan", Status::OK, Category::NaN),
- /*
- // See Note 1.
- (m_inf, snan, "-nan", Status::INVALID_OP, Category::NaN),
- */
- (m_inf, p_normal_value, "-inf", Status::OK, Category::Infinity),
- (m_inf, m_normal_value, "-inf", Status::OK, Category::Infinity),
- (m_inf, p_largest_value, "-inf", Status::OK, Category::Infinity),
- (m_inf, m_largest_value, "-inf", Status::OK, Category::Infinity),
- (m_inf, p_smallest_value, "-inf", Status::OK, Category::Infinity),
- (m_inf, m_smallest_value, "-inf", Status::OK, Category::Infinity),
- (m_inf, p_smallest_normalized, "-inf", Status::OK, Category::Infinity),
- (m_inf, m_smallest_normalized, "-inf", Status::OK, Category::Infinity),
- (p_zero, p_inf, "-inf", Status::OK, Category::Infinity),
- (p_zero, m_inf, "inf", Status::OK, Category::Infinity),
- (p_zero, p_zero, "0x0p+0", Status::OK, Category::Zero),
- (p_zero, m_zero, "0x0p+0", Status::OK, Category::Zero),
- (p_zero, qnan, "-nan", Status::OK, Category::NaN),
- /*
- // See Note 1.
- (p_zero, snan, "-nan", Status::INVALID_OP, Category::NaN),
- */
- (p_zero, p_normal_value, "-0x1p+0", Status::OK, Category::Normal),
- (p_zero, m_normal_value, "0x1p+0", Status::OK, Category::Normal),
- (p_zero, p_largest_value, "-0x1.fffffep+127", Status::OK, Category::Normal),
- (p_zero, m_largest_value, "0x1.fffffep+127", Status::OK, Category::Normal),
- (p_zero, p_smallest_value, "-0x1p-149", Status::OK, Category::Normal),
- (p_zero, m_smallest_value, "0x1p-149", Status::OK, Category::Normal),
- (p_zero, p_smallest_normalized, "-0x1p-126", Status::OK, Category::Normal),
- (p_zero, m_smallest_normalized, "0x1p-126", Status::OK, Category::Normal),
- (m_zero, p_inf, "-inf", Status::OK, Category::Infinity),
- (m_zero, m_inf, "inf", Status::OK, Category::Infinity),
- (m_zero, p_zero, "-0x0p+0", Status::OK, Category::Zero),
- (m_zero, m_zero, "0x0p+0", Status::OK, Category::Zero),
- (m_zero, qnan, "-nan", Status::OK, Category::NaN),
- /*
- // See Note 1.
- (m_zero, snan, "-nan", Status::INVALID_OP, Category::NaN),
- */
- (m_zero, p_normal_value, "-0x1p+0", Status::OK, Category::Normal),
- (m_zero, m_normal_value, "0x1p+0", Status::OK, Category::Normal),
- (m_zero, p_largest_value, "-0x1.fffffep+127", Status::OK, Category::Normal),
- (m_zero, m_largest_value, "0x1.fffffep+127", Status::OK, Category::Normal),
- (m_zero, p_smallest_value, "-0x1p-149", Status::OK, Category::Normal),
- (m_zero, m_smallest_value, "0x1p-149", Status::OK, Category::Normal),
- (m_zero, p_smallest_normalized, "-0x1p-126", Status::OK, Category::Normal),
- (m_zero, m_smallest_normalized, "0x1p-126", Status::OK, Category::Normal),
- (qnan, p_inf, "nan", Status::OK, Category::NaN),
- (qnan, m_inf, "nan", Status::OK, Category::NaN),
- (qnan, p_zero, "nan", Status::OK, Category::NaN),
- (qnan, m_zero, "nan", Status::OK, Category::NaN),
- (qnan, qnan, "nan", Status::OK, Category::NaN),
- /*
- // See Note 1.
- (qnan, snan, "nan", Status::INVALID_OP, Category::NaN),
- */
- (qnan, p_normal_value, "nan", Status::OK, Category::NaN),
- (qnan, m_normal_value, "nan", Status::OK, Category::NaN),
- (qnan, p_largest_value, "nan", Status::OK, Category::NaN),
- (qnan, m_largest_value, "nan", Status::OK, Category::NaN),
- (qnan, p_smallest_value, "nan", Status::OK, Category::NaN),
- (qnan, m_smallest_value, "nan", Status::OK, Category::NaN),
- (qnan, p_smallest_normalized, "nan", Status::OK, Category::NaN),
- (qnan, m_smallest_normalized, "nan", Status::OK, Category::NaN),
- /*
- // See Note 1.
- (snan, p_inf, "nan", Status::INVALID_OP, Category::NaN),
- (snan, m_inf, "nan", Status::INVALID_OP, Category::NaN),
- (snan, p_zero, "nan", Status::INVALID_OP, Category::NaN),
- (snan, m_zero, "nan", Status::INVALID_OP, Category::NaN),
- (snan, qnan, "nan", Status::INVALID_OP, Category::NaN),
- (snan, snan, "nan", Status::INVALID_OP, Category::NaN),
- (snan, p_normal_value, "nan", Status::INVALID_OP, Category::NaN),
- (snan, m_normal_value, "nan", Status::INVALID_OP, Category::NaN),
- (snan, p_largest_value, "nan", Status::INVALID_OP, Category::NaN),
- (snan, m_largest_value, "nan", Status::INVALID_OP, Category::NaN),
- (snan, p_smallest_value, "nan", Status::INVALID_OP, Category::NaN),
- (snan, m_smallest_value, "nan", Status::INVALID_OP, Category::NaN),
- (snan, p_smallest_normalized, "nan", Status::INVALID_OP, Category::NaN),
- (snan, m_smallest_normalized, "nan", Status::INVALID_OP, Category::NaN),
- */
- (p_normal_value, p_inf, "-inf", Status::OK, Category::Infinity),
- (p_normal_value, m_inf, "inf", Status::OK, Category::Infinity),
- (p_normal_value, p_zero, "0x1p+0", Status::OK, Category::Normal),
- (p_normal_value, m_zero, "0x1p+0", Status::OK, Category::Normal),
- (p_normal_value, qnan, "-nan", Status::OK, Category::NaN),
- /*
- // See Note 1.
- (p_normal_value, snan, "-nan", Status::INVALID_OP, Category::NaN),
- */
- (p_normal_value, p_normal_value, "0x0p+0", Status::OK, Category::Zero),
- (p_normal_value, m_normal_value, "0x1p+1", Status::OK, Category::Normal),
- (p_normal_value, p_largest_value, "-0x1.fffffep+127", Status::INEXACT, Category::Normal),
- (p_normal_value, m_largest_value, "0x1.fffffep+127", Status::INEXACT, Category::Normal),
- (p_normal_value, p_smallest_value, "0x1p+0", Status::INEXACT, Category::Normal),
- (p_normal_value, m_smallest_value, "0x1p+0", Status::INEXACT, Category::Normal),
- (p_normal_value, p_smallest_normalized, "0x1p+0", Status::INEXACT, Category::Normal),
- (p_normal_value, m_smallest_normalized, "0x1p+0", Status::INEXACT, Category::Normal),
- (m_normal_value, p_inf, "-inf", Status::OK, Category::Infinity),
- (m_normal_value, m_inf, "inf", Status::OK, Category::Infinity),
- (m_normal_value, p_zero, "-0x1p+0", Status::OK, Category::Normal),
- (m_normal_value, m_zero, "-0x1p+0", Status::OK, Category::Normal),
- (m_normal_value, qnan, "-nan", Status::OK, Category::NaN),
- /*
- // See Note 1.
- (m_normal_value, snan, "-nan", Status::INVALID_OP, Category::NaN),
- */
- (m_normal_value, p_normal_value, "-0x1p+1", Status::OK, Category::Normal),
- (m_normal_value, m_normal_value, "0x0p+0", Status::OK, Category::Zero),
- (m_normal_value, p_largest_value, "-0x1.fffffep+127", Status::INEXACT, Category::Normal),
- (m_normal_value, m_largest_value, "0x1.fffffep+127", Status::INEXACT, Category::Normal),
- (m_normal_value, p_smallest_value, "-0x1p+0", Status::INEXACT, Category::Normal),
- (m_normal_value, m_smallest_value, "-0x1p+0", Status::INEXACT, Category::Normal),
- (m_normal_value, p_smallest_normalized, "-0x1p+0", Status::INEXACT, Category::Normal),
- (m_normal_value, m_smallest_normalized, "-0x1p+0", Status::INEXACT, Category::Normal),
- (p_largest_value, p_inf, "-inf", Status::OK, Category::Infinity),
- (p_largest_value, m_inf, "inf", Status::OK, Category::Infinity),
- (p_largest_value, p_zero, "0x1.fffffep+127", Status::OK, Category::Normal),
- (p_largest_value, m_zero, "0x1.fffffep+127", Status::OK, Category::Normal),
- (p_largest_value, qnan, "-nan", Status::OK, Category::NaN),
- /*
- // See Note 1.
- (p_largest_value, snan, "-nan", Status::INVALID_OP, Category::NaN),
- */
- (p_largest_value, p_normal_value, "0x1.fffffep+127", Status::INEXACT, Category::Normal),
- (p_largest_value, m_normal_value, "0x1.fffffep+127", Status::INEXACT, Category::Normal),
- (p_largest_value, p_largest_value, "0x0p+0", Status::OK, Category::Zero),
- (p_largest_value, m_largest_value, "inf", overflow_status, Category::Infinity),
- (p_largest_value, p_smallest_value, "0x1.fffffep+127", Status::INEXACT, Category::Normal),
- (p_largest_value, m_smallest_value, "0x1.fffffep+127", Status::INEXACT, Category::Normal),
- (
- p_largest_value,
- p_smallest_normalized,
- "0x1.fffffep+127",
- Status::INEXACT,
- Category::Normal,
- ),
- (
- p_largest_value,
- m_smallest_normalized,
- "0x1.fffffep+127",
- Status::INEXACT,
- Category::Normal,
- ),
- (m_largest_value, p_inf, "-inf", Status::OK, Category::Infinity),
- (m_largest_value, m_inf, "inf", Status::OK, Category::Infinity),
- (m_largest_value, p_zero, "-0x1.fffffep+127", Status::OK, Category::Normal),
- (m_largest_value, m_zero, "-0x1.fffffep+127", Status::OK, Category::Normal),
- (m_largest_value, qnan, "-nan", Status::OK, Category::NaN),
- /*
- // See Note 1.
- (m_largest_value, snan, "-nan", Status::INVALID_OP, Category::NaN),
- */
- (m_largest_value, p_normal_value, "-0x1.fffffep+127", Status::INEXACT, Category::Normal),
- (m_largest_value, m_normal_value, "-0x1.fffffep+127", Status::INEXACT, Category::Normal),
- (m_largest_value, p_largest_value, "-inf", overflow_status, Category::Infinity),
- (m_largest_value, m_largest_value, "0x0p+0", Status::OK, Category::Zero),
- (m_largest_value, p_smallest_value, "-0x1.fffffep+127", Status::INEXACT, Category::Normal),
- (m_largest_value, m_smallest_value, "-0x1.fffffep+127", Status::INEXACT, Category::Normal),
- (
- m_largest_value,
- p_smallest_normalized,
- "-0x1.fffffep+127",
- Status::INEXACT,
- Category::Normal,
- ),
- (
- m_largest_value,
- m_smallest_normalized,
- "-0x1.fffffep+127",
- Status::INEXACT,
- Category::Normal,
- ),
- (p_smallest_value, p_inf, "-inf", Status::OK, Category::Infinity),
- (p_smallest_value, m_inf, "inf", Status::OK, Category::Infinity),
- (p_smallest_value, p_zero, "0x1p-149", Status::OK, Category::Normal),
- (p_smallest_value, m_zero, "0x1p-149", Status::OK, Category::Normal),
- (p_smallest_value, qnan, "-nan", Status::OK, Category::NaN),
- /*
- // See Note 1.
- (p_smallest_value, snan, "-nan", Status::INVALID_OP, Category::NaN),
- */
- (p_smallest_value, p_normal_value, "-0x1p+0", Status::INEXACT, Category::Normal),
- (p_smallest_value, m_normal_value, "0x1p+0", Status::INEXACT, Category::Normal),
- (p_smallest_value, p_largest_value, "-0x1.fffffep+127", Status::INEXACT, Category::Normal),
- (p_smallest_value, m_largest_value, "0x1.fffffep+127", Status::INEXACT, Category::Normal),
- (p_smallest_value, p_smallest_value, "0x0p+0", Status::OK, Category::Zero),
- (p_smallest_value, m_smallest_value, "0x1p-148", Status::OK, Category::Normal),
- (p_smallest_value, p_smallest_normalized, "-0x1.fffffcp-127", Status::OK, Category::Normal),
- (p_smallest_value, m_smallest_normalized, "0x1.000002p-126", Status::OK, Category::Normal),
- (m_smallest_value, p_inf, "-inf", Status::OK, Category::Infinity),
- (m_smallest_value, m_inf, "inf", Status::OK, Category::Infinity),
- (m_smallest_value, p_zero, "-0x1p-149", Status::OK, Category::Normal),
- (m_smallest_value, m_zero, "-0x1p-149", Status::OK, Category::Normal),
- (m_smallest_value, qnan, "-nan", Status::OK, Category::NaN),
- /*
- // See Note 1.
- (m_smallest_value, snan, "-nan", Status::INVALID_OP, Category::NaN),
- */
- (m_smallest_value, p_normal_value, "-0x1p+0", Status::INEXACT, Category::Normal),
- (m_smallest_value, m_normal_value, "0x1p+0", Status::INEXACT, Category::Normal),
- (m_smallest_value, p_largest_value, "-0x1.fffffep+127", Status::INEXACT, Category::Normal),
- (m_smallest_value, m_largest_value, "0x1.fffffep+127", Status::INEXACT, Category::Normal),
- (m_smallest_value, p_smallest_value, "-0x1p-148", Status::OK, Category::Normal),
- (m_smallest_value, m_smallest_value, "0x0p+0", Status::OK, Category::Zero),
- (m_smallest_value, p_smallest_normalized, "-0x1.000002p-126", Status::OK, Category::Normal),
- (m_smallest_value, m_smallest_normalized, "0x1.fffffcp-127", Status::OK, Category::Normal),
- (p_smallest_normalized, p_inf, "-inf", Status::OK, Category::Infinity),
- (p_smallest_normalized, m_inf, "inf", Status::OK, Category::Infinity),
- (p_smallest_normalized, p_zero, "0x1p-126", Status::OK, Category::Normal),
- (p_smallest_normalized, m_zero, "0x1p-126", Status::OK, Category::Normal),
- (p_smallest_normalized, qnan, "-nan", Status::OK, Category::NaN),
- /*
- // See Note 1.
- (p_smallest_normalized, snan, "-nan", Status::INVALID_OP, Category::NaN),
- */
- (p_smallest_normalized, p_normal_value, "-0x1p+0", Status::INEXACT, Category::Normal),
- (p_smallest_normalized, m_normal_value, "0x1p+0", Status::INEXACT, Category::Normal),
- (
- p_smallest_normalized,
- p_largest_value,
- "-0x1.fffffep+127",
- Status::INEXACT,
- Category::Normal,
- ),
- (
- p_smallest_normalized,
- m_largest_value,
- "0x1.fffffep+127",
- Status::INEXACT,
- Category::Normal,
- ),
- (p_smallest_normalized, p_smallest_value, "0x1.fffffcp-127", Status::OK, Category::Normal),
- (p_smallest_normalized, m_smallest_value, "0x1.000002p-126", Status::OK, Category::Normal),
- (p_smallest_normalized, p_smallest_normalized, "0x0p+0", Status::OK, Category::Zero),
- (p_smallest_normalized, m_smallest_normalized, "0x1p-125", Status::OK, Category::Normal),
- (m_smallest_normalized, p_inf, "-inf", Status::OK, Category::Infinity),
- (m_smallest_normalized, m_inf, "inf", Status::OK, Category::Infinity),
- (m_smallest_normalized, p_zero, "-0x1p-126", Status::OK, Category::Normal),
- (m_smallest_normalized, m_zero, "-0x1p-126", Status::OK, Category::Normal),
- (m_smallest_normalized, qnan, "-nan", Status::OK, Category::NaN),
- /*
- // See Note 1.
- (m_smallest_normalized, snan, "-nan", Status::INVALID_OP, Category::NaN),
- */
- (m_smallest_normalized, p_normal_value, "-0x1p+0", Status::INEXACT, Category::Normal),
- (m_smallest_normalized, m_normal_value, "0x1p+0", Status::INEXACT, Category::Normal),
- (
- m_smallest_normalized,
- p_largest_value,
- "-0x1.fffffep+127",
- Status::INEXACT,
- Category::Normal,
- ),
- (
- m_smallest_normalized,
- m_largest_value,
- "0x1.fffffep+127",
- Status::INEXACT,
- Category::Normal,
- ),
- (m_smallest_normalized, p_smallest_value, "-0x1.000002p-126", Status::OK, Category::Normal),
- (m_smallest_normalized, m_smallest_value, "-0x1.fffffcp-127", Status::OK, Category::Normal),
- (m_smallest_normalized, p_smallest_normalized, "-0x1p-125", Status::OK, Category::Normal),
- (m_smallest_normalized, m_smallest_normalized, "0x0p+0", Status::OK, Category::Zero),
- ];
-
- for (x, y, e_result, e_status, e_category) in special_cases {
- let status;
- let result = unpack!(status=, x - y);
- assert_eq!(status, e_status);
- assert_eq!(result.category(), e_category);
- assert!(result.bitwise_eq(e_result.parse::<Single>().unwrap()));
- }
-}
-
-#[test]
-fn multiply() {
- // Test Special Cases against each other and normal values.
-
- // FIXMES/NOTES:
- // 1. Since we perform only default exception handling all operations with
- // signaling NaNs should have a result that is a quiet NaN. Currently they
- // return sNaN.
-
- let p_inf = Single::INFINITY;
- let m_inf = -Single::INFINITY;
- let p_zero = Single::ZERO;
- let m_zero = -Single::ZERO;
- let qnan = Single::NAN;
- let p_normal_value = "0x1p+0".parse::<Single>().unwrap();
- let m_normal_value = "-0x1p+0".parse::<Single>().unwrap();
- let p_largest_value = Single::largest();
- let m_largest_value = -Single::largest();
- let p_smallest_value = Single::SMALLEST;
- let m_smallest_value = -Single::SMALLEST;
- let p_smallest_normalized = Single::smallest_normalized();
- let m_smallest_normalized = -Single::smallest_normalized();
-
- let overflow_status = Status::OVERFLOW | Status::INEXACT;
- let underflow_status = Status::UNDERFLOW | Status::INEXACT;
-
- let special_cases = [
- (p_inf, p_inf, "inf", Status::OK, Category::Infinity),
- (p_inf, m_inf, "-inf", Status::OK, Category::Infinity),
- (p_inf, p_zero, "nan", Status::INVALID_OP, Category::NaN),
- (p_inf, m_zero, "nan", Status::INVALID_OP, Category::NaN),
- (p_inf, qnan, "nan", Status::OK, Category::NaN),
- /*
- // See Note 1.
- (p_inf, snan, "nan", Status::INVALID_OP, Category::NaN),
- */
- (p_inf, p_normal_value, "inf", Status::OK, Category::Infinity),
- (p_inf, m_normal_value, "-inf", Status::OK, Category::Infinity),
- (p_inf, p_largest_value, "inf", Status::OK, Category::Infinity),
- (p_inf, m_largest_value, "-inf", Status::OK, Category::Infinity),
- (p_inf, p_smallest_value, "inf", Status::OK, Category::Infinity),
- (p_inf, m_smallest_value, "-inf", Status::OK, Category::Infinity),
- (p_inf, p_smallest_normalized, "inf", Status::OK, Category::Infinity),
- (p_inf, m_smallest_normalized, "-inf", Status::OK, Category::Infinity),
- (m_inf, p_inf, "-inf", Status::OK, Category::Infinity),
- (m_inf, m_inf, "inf", Status::OK, Category::Infinity),
- (m_inf, p_zero, "nan", Status::INVALID_OP, Category::NaN),
- (m_inf, m_zero, "nan", Status::INVALID_OP, Category::NaN),
- (m_inf, qnan, "nan", Status::OK, Category::NaN),
- /*
- // See Note 1.
- (m_inf, snan, "nan", Status::INVALID_OP, Category::NaN),
- */
- (m_inf, p_normal_value, "-inf", Status::OK, Category::Infinity),
- (m_inf, m_normal_value, "inf", Status::OK, Category::Infinity),
- (m_inf, p_largest_value, "-inf", Status::OK, Category::Infinity),
- (m_inf, m_largest_value, "inf", Status::OK, Category::Infinity),
- (m_inf, p_smallest_value, "-inf", Status::OK, Category::Infinity),
- (m_inf, m_smallest_value, "inf", Status::OK, Category::Infinity),
- (m_inf, p_smallest_normalized, "-inf", Status::OK, Category::Infinity),
- (m_inf, m_smallest_normalized, "inf", Status::OK, Category::Infinity),
- (p_zero, p_inf, "nan", Status::INVALID_OP, Category::NaN),
- (p_zero, m_inf, "nan", Status::INVALID_OP, Category::NaN),
- (p_zero, p_zero, "0x0p+0", Status::OK, Category::Zero),
- (p_zero, m_zero, "-0x0p+0", Status::OK, Category::Zero),
- (p_zero, qnan, "nan", Status::OK, Category::NaN),
- /*
- // See Note 1.
- (p_zero, snan, "nan", Status::INVALID_OP, Category::NaN),
- */
- (p_zero, p_normal_value, "0x0p+0", Status::OK, Category::Zero),
- (p_zero, m_normal_value, "-0x0p+0", Status::OK, Category::Zero),
- (p_zero, p_largest_value, "0x0p+0", Status::OK, Category::Zero),
- (p_zero, m_largest_value, "-0x0p+0", Status::OK, Category::Zero),
- (p_zero, p_smallest_value, "0x0p+0", Status::OK, Category::Zero),
- (p_zero, m_smallest_value, "-0x0p+0", Status::OK, Category::Zero),
- (p_zero, p_smallest_normalized, "0x0p+0", Status::OK, Category::Zero),
- (p_zero, m_smallest_normalized, "-0x0p+0", Status::OK, Category::Zero),
- (m_zero, p_inf, "nan", Status::INVALID_OP, Category::NaN),
- (m_zero, m_inf, "nan", Status::INVALID_OP, Category::NaN),
- (m_zero, p_zero, "-0x0p+0", Status::OK, Category::Zero),
- (m_zero, m_zero, "0x0p+0", Status::OK, Category::Zero),
- (m_zero, qnan, "nan", Status::OK, Category::NaN),
- /*
- // See Note 1.
- (m_zero, snan, "nan", Status::INVALID_OP, Category::NaN),
- */
- (m_zero, p_normal_value, "-0x0p+0", Status::OK, Category::Zero),
- (m_zero, m_normal_value, "0x0p+0", Status::OK, Category::Zero),
- (m_zero, p_largest_value, "-0x0p+0", Status::OK, Category::Zero),
- (m_zero, m_largest_value, "0x0p+0", Status::OK, Category::Zero),
- (m_zero, p_smallest_value, "-0x0p+0", Status::OK, Category::Zero),
- (m_zero, m_smallest_value, "0x0p+0", Status::OK, Category::Zero),
- (m_zero, p_smallest_normalized, "-0x0p+0", Status::OK, Category::Zero),
- (m_zero, m_smallest_normalized, "0x0p+0", Status::OK, Category::Zero),
- (qnan, p_inf, "nan", Status::OK, Category::NaN),
- (qnan, m_inf, "nan", Status::OK, Category::NaN),
- (qnan, p_zero, "nan", Status::OK, Category::NaN),
- (qnan, m_zero, "nan", Status::OK, Category::NaN),
- (qnan, qnan, "nan", Status::OK, Category::NaN),
- /*
- // See Note 1.
- (qnan, snan, "nan", Status::INVALID_OP, Category::NaN),
- */
- (qnan, p_normal_value, "nan", Status::OK, Category::NaN),
- (qnan, m_normal_value, "nan", Status::OK, Category::NaN),
- (qnan, p_largest_value, "nan", Status::OK, Category::NaN),
- (qnan, m_largest_value, "nan", Status::OK, Category::NaN),
- (qnan, p_smallest_value, "nan", Status::OK, Category::NaN),
- (qnan, m_smallest_value, "nan", Status::OK, Category::NaN),
- (qnan, p_smallest_normalized, "nan", Status::OK, Category::NaN),
- (qnan, m_smallest_normalized, "nan", Status::OK, Category::NaN),
- /*
- // See Note 1.
- (snan, p_inf, "nan", Status::INVALID_OP, Category::NaN),
- (snan, m_inf, "nan", Status::INVALID_OP, Category::NaN),
- (snan, p_zero, "nan", Status::INVALID_OP, Category::NaN),
- (snan, m_zero, "nan", Status::INVALID_OP, Category::NaN),
- (snan, qnan, "nan", Status::INVALID_OP, Category::NaN),
- (snan, snan, "nan", Status::INVALID_OP, Category::NaN),
- (snan, p_normal_value, "nan", Status::INVALID_OP, Category::NaN),
- (snan, m_normal_value, "nan", Status::INVALID_OP, Category::NaN),
- (snan, p_largest_value, "nan", Status::INVALID_OP, Category::NaN),
- (snan, m_largest_value, "nan", Status::INVALID_OP, Category::NaN),
- (snan, p_smallest_value, "nan", Status::INVALID_OP, Category::NaN),
- (snan, m_smallest_value, "nan", Status::INVALID_OP, Category::NaN),
- (snan, p_smallest_normalized, "nan", Status::INVALID_OP, Category::NaN),
- (snan, m_smallest_normalized, "nan", Status::INVALID_OP, Category::NaN),
- */
- (p_normal_value, p_inf, "inf", Status::OK, Category::Infinity),
- (p_normal_value, m_inf, "-inf", Status::OK, Category::Infinity),
- (p_normal_value, p_zero, "0x0p+0", Status::OK, Category::Zero),
- (p_normal_value, m_zero, "-0x0p+0", Status::OK, Category::Zero),
- (p_normal_value, qnan, "nan", Status::OK, Category::NaN),
- /*
- // See Note 1.
- (p_normal_value, snan, "nan", Status::INVALID_OP, Category::NaN),
- */
- (p_normal_value, p_normal_value, "0x1p+0", Status::OK, Category::Normal),
- (p_normal_value, m_normal_value, "-0x1p+0", Status::OK, Category::Normal),
- (p_normal_value, p_largest_value, "0x1.fffffep+127", Status::OK, Category::Normal),
- (p_normal_value, m_largest_value, "-0x1.fffffep+127", Status::OK, Category::Normal),
- (p_normal_value, p_smallest_value, "0x1p-149", Status::OK, Category::Normal),
- (p_normal_value, m_smallest_value, "-0x1p-149", Status::OK, Category::Normal),
- (p_normal_value, p_smallest_normalized, "0x1p-126", Status::OK, Category::Normal),
- (p_normal_value, m_smallest_normalized, "-0x1p-126", Status::OK, Category::Normal),
- (m_normal_value, p_inf, "-inf", Status::OK, Category::Infinity),
- (m_normal_value, m_inf, "inf", Status::OK, Category::Infinity),
- (m_normal_value, p_zero, "-0x0p+0", Status::OK, Category::Zero),
- (m_normal_value, m_zero, "0x0p+0", Status::OK, Category::Zero),
- (m_normal_value, qnan, "nan", Status::OK, Category::NaN),
- /*
- // See Note 1.
- (m_normal_value, snan, "nan", Status::INVALID_OP, Category::NaN),
- */
- (m_normal_value, p_normal_value, "-0x1p+0", Status::OK, Category::Normal),
- (m_normal_value, m_normal_value, "0x1p+0", Status::OK, Category::Normal),
- (m_normal_value, p_largest_value, "-0x1.fffffep+127", Status::OK, Category::Normal),
- (m_normal_value, m_largest_value, "0x1.fffffep+127", Status::OK, Category::Normal),
- (m_normal_value, p_smallest_value, "-0x1p-149", Status::OK, Category::Normal),
- (m_normal_value, m_smallest_value, "0x1p-149", Status::OK, Category::Normal),
- (m_normal_value, p_smallest_normalized, "-0x1p-126", Status::OK, Category::Normal),
- (m_normal_value, m_smallest_normalized, "0x1p-126", Status::OK, Category::Normal),
- (p_largest_value, p_inf, "inf", Status::OK, Category::Infinity),
- (p_largest_value, m_inf, "-inf", Status::OK, Category::Infinity),
- (p_largest_value, p_zero, "0x0p+0", Status::OK, Category::Zero),
- (p_largest_value, m_zero, "-0x0p+0", Status::OK, Category::Zero),
- (p_largest_value, qnan, "nan", Status::OK, Category::NaN),
- /*
- // See Note 1.
- (p_largest_value, snan, "nan", Status::INVALID_OP, Category::NaN),
- */
- (p_largest_value, p_normal_value, "0x1.fffffep+127", Status::OK, Category::Normal),
- (p_largest_value, m_normal_value, "-0x1.fffffep+127", Status::OK, Category::Normal),
- (p_largest_value, p_largest_value, "inf", overflow_status, Category::Infinity),
- (p_largest_value, m_largest_value, "-inf", overflow_status, Category::Infinity),
- (p_largest_value, p_smallest_value, "0x1.fffffep-22", Status::OK, Category::Normal),
- (p_largest_value, m_smallest_value, "-0x1.fffffep-22", Status::OK, Category::Normal),
- (p_largest_value, p_smallest_normalized, "0x1.fffffep+1", Status::OK, Category::Normal),
- (p_largest_value, m_smallest_normalized, "-0x1.fffffep+1", Status::OK, Category::Normal),
- (m_largest_value, p_inf, "-inf", Status::OK, Category::Infinity),
- (m_largest_value, m_inf, "inf", Status::OK, Category::Infinity),
- (m_largest_value, p_zero, "-0x0p+0", Status::OK, Category::Zero),
- (m_largest_value, m_zero, "0x0p+0", Status::OK, Category::Zero),
- (m_largest_value, qnan, "nan", Status::OK, Category::NaN),
- /*
- // See Note 1.
- (m_largest_value, snan, "nan", Status::INVALID_OP, Category::NaN),
- */
- (m_largest_value, p_normal_value, "-0x1.fffffep+127", Status::OK, Category::Normal),
- (m_largest_value, m_normal_value, "0x1.fffffep+127", Status::OK, Category::Normal),
- (m_largest_value, p_largest_value, "-inf", overflow_status, Category::Infinity),
- (m_largest_value, m_largest_value, "inf", overflow_status, Category::Infinity),
- (m_largest_value, p_smallest_value, "-0x1.fffffep-22", Status::OK, Category::Normal),
- (m_largest_value, m_smallest_value, "0x1.fffffep-22", Status::OK, Category::Normal),
- (m_largest_value, p_smallest_normalized, "-0x1.fffffep+1", Status::OK, Category::Normal),
- (m_largest_value, m_smallest_normalized, "0x1.fffffep+1", Status::OK, Category::Normal),
- (p_smallest_value, p_inf, "inf", Status::OK, Category::Infinity),
- (p_smallest_value, m_inf, "-inf", Status::OK, Category::Infinity),
- (p_smallest_value, p_zero, "0x0p+0", Status::OK, Category::Zero),
- (p_smallest_value, m_zero, "-0x0p+0", Status::OK, Category::Zero),
- (p_smallest_value, qnan, "nan", Status::OK, Category::NaN),
- /*
- // See Note 1.
- (p_smallest_value, snan, "nan", Status::INVALID_OP, Category::NaN),
- */
- (p_smallest_value, p_normal_value, "0x1p-149", Status::OK, Category::Normal),
- (p_smallest_value, m_normal_value, "-0x1p-149", Status::OK, Category::Normal),
- (p_smallest_value, p_largest_value, "0x1.fffffep-22", Status::OK, Category::Normal),
- (p_smallest_value, m_largest_value, "-0x1.fffffep-22", Status::OK, Category::Normal),
- (p_smallest_value, p_smallest_value, "0x0p+0", underflow_status, Category::Zero),
- (p_smallest_value, m_smallest_value, "-0x0p+0", underflow_status, Category::Zero),
- (p_smallest_value, p_smallest_normalized, "0x0p+0", underflow_status, Category::Zero),
- (p_smallest_value, m_smallest_normalized, "-0x0p+0", underflow_status, Category::Zero),
- (m_smallest_value, p_inf, "-inf", Status::OK, Category::Infinity),
- (m_smallest_value, m_inf, "inf", Status::OK, Category::Infinity),
- (m_smallest_value, p_zero, "-0x0p+0", Status::OK, Category::Zero),
- (m_smallest_value, m_zero, "0x0p+0", Status::OK, Category::Zero),
- (m_smallest_value, qnan, "nan", Status::OK, Category::NaN),
- /*
- // See Note 1.
- (m_smallest_value, snan, "nan", Status::INVALID_OP, Category::NaN),
- */
- (m_smallest_value, p_normal_value, "-0x1p-149", Status::OK, Category::Normal),
- (m_smallest_value, m_normal_value, "0x1p-149", Status::OK, Category::Normal),
- (m_smallest_value, p_largest_value, "-0x1.fffffep-22", Status::OK, Category::Normal),
- (m_smallest_value, m_largest_value, "0x1.fffffep-22", Status::OK, Category::Normal),
- (m_smallest_value, p_smallest_value, "-0x0p+0", underflow_status, Category::Zero),
- (m_smallest_value, m_smallest_value, "0x0p+0", underflow_status, Category::Zero),
- (m_smallest_value, p_smallest_normalized, "-0x0p+0", underflow_status, Category::Zero),
- (m_smallest_value, m_smallest_normalized, "0x0p+0", underflow_status, Category::Zero),
- (p_smallest_normalized, p_inf, "inf", Status::OK, Category::Infinity),
- (p_smallest_normalized, m_inf, "-inf", Status::OK, Category::Infinity),
- (p_smallest_normalized, p_zero, "0x0p+0", Status::OK, Category::Zero),
- (p_smallest_normalized, m_zero, "-0x0p+0", Status::OK, Category::Zero),
- (p_smallest_normalized, qnan, "nan", Status::OK, Category::NaN),
- /*
- // See Note 1.
- (p_smallest_normalized, snan, "nan", Status::INVALID_OP, Category::NaN),
- */
- (p_smallest_normalized, p_normal_value, "0x1p-126", Status::OK, Category::Normal),
- (p_smallest_normalized, m_normal_value, "-0x1p-126", Status::OK, Category::Normal),
- (p_smallest_normalized, p_largest_value, "0x1.fffffep+1", Status::OK, Category::Normal),
- (p_smallest_normalized, m_largest_value, "-0x1.fffffep+1", Status::OK, Category::Normal),
- (p_smallest_normalized, p_smallest_value, "0x0p+0", underflow_status, Category::Zero),
- (p_smallest_normalized, m_smallest_value, "-0x0p+0", underflow_status, Category::Zero),
- (p_smallest_normalized, p_smallest_normalized, "0x0p+0", underflow_status, Category::Zero),
- (p_smallest_normalized, m_smallest_normalized, "-0x0p+0", underflow_status, Category::Zero),
- (m_smallest_normalized, p_inf, "-inf", Status::OK, Category::Infinity),
- (m_smallest_normalized, m_inf, "inf", Status::OK, Category::Infinity),
- (m_smallest_normalized, p_zero, "-0x0p+0", Status::OK, Category::Zero),
- (m_smallest_normalized, m_zero, "0x0p+0", Status::OK, Category::Zero),
- (m_smallest_normalized, qnan, "nan", Status::OK, Category::NaN),
- /*
- // See Note 1.
- (m_smallest_normalized, snan, "nan", Status::INVALID_OP, Category::NaN),
- */
- (m_smallest_normalized, p_normal_value, "-0x1p-126", Status::OK, Category::Normal),
- (m_smallest_normalized, m_normal_value, "0x1p-126", Status::OK, Category::Normal),
- (m_smallest_normalized, p_largest_value, "-0x1.fffffep+1", Status::OK, Category::Normal),
- (m_smallest_normalized, m_largest_value, "0x1.fffffep+1", Status::OK, Category::Normal),
- (m_smallest_normalized, p_smallest_value, "-0x0p+0", underflow_status, Category::Zero),
- (m_smallest_normalized, m_smallest_value, "0x0p+0", underflow_status, Category::Zero),
- (m_smallest_normalized, p_smallest_normalized, "-0x0p+0", underflow_status, Category::Zero),
- (m_smallest_normalized, m_smallest_normalized, "0x0p+0", underflow_status, Category::Zero),
- ];
-
- for (x, y, e_result, e_status, e_category) in special_cases {
- let status;
- let result = unpack!(status=, x * y);
- assert_eq!(status, e_status);
- assert_eq!(result.category(), e_category);
- assert!(result.bitwise_eq(e_result.parse::<Single>().unwrap()));
- }
-}
-
-#[test]
-fn divide() {
- // Test Special Cases against each other and normal values.
-
- // FIXMES/NOTES:
- // 1. Since we perform only default exception handling all operations with
- // signaling NaNs should have a result that is a quiet NaN. Currently they
- // return sNaN.
-
- let p_inf = Single::INFINITY;
- let m_inf = -Single::INFINITY;
- let p_zero = Single::ZERO;
- let m_zero = -Single::ZERO;
- let qnan = Single::NAN;
- let p_normal_value = "0x1p+0".parse::<Single>().unwrap();
- let m_normal_value = "-0x1p+0".parse::<Single>().unwrap();
- let p_largest_value = Single::largest();
- let m_largest_value = -Single::largest();
- let p_smallest_value = Single::SMALLEST;
- let m_smallest_value = -Single::SMALLEST;
- let p_smallest_normalized = Single::smallest_normalized();
- let m_smallest_normalized = -Single::smallest_normalized();
-
- let overflow_status = Status::OVERFLOW | Status::INEXACT;
- let underflow_status = Status::UNDERFLOW | Status::INEXACT;
-
- let special_cases = [
- (p_inf, p_inf, "nan", Status::INVALID_OP, Category::NaN),
- (p_inf, m_inf, "nan", Status::INVALID_OP, Category::NaN),
- (p_inf, p_zero, "inf", Status::OK, Category::Infinity),
- (p_inf, m_zero, "-inf", Status::OK, Category::Infinity),
- (p_inf, qnan, "nan", Status::OK, Category::NaN),
- /*
- // See Note 1.
- (p_inf, snan, "nan", Status::INVALID_OP, Category::NaN),
- */
- (p_inf, p_normal_value, "inf", Status::OK, Category::Infinity),
- (p_inf, m_normal_value, "-inf", Status::OK, Category::Infinity),
- (p_inf, p_largest_value, "inf", Status::OK, Category::Infinity),
- (p_inf, m_largest_value, "-inf", Status::OK, Category::Infinity),
- (p_inf, p_smallest_value, "inf", Status::OK, Category::Infinity),
- (p_inf, m_smallest_value, "-inf", Status::OK, Category::Infinity),
- (p_inf, p_smallest_normalized, "inf", Status::OK, Category::Infinity),
- (p_inf, m_smallest_normalized, "-inf", Status::OK, Category::Infinity),
- (m_inf, p_inf, "nan", Status::INVALID_OP, Category::NaN),
- (m_inf, m_inf, "nan", Status::INVALID_OP, Category::NaN),
- (m_inf, p_zero, "-inf", Status::OK, Category::Infinity),
- (m_inf, m_zero, "inf", Status::OK, Category::Infinity),
- (m_inf, qnan, "nan", Status::OK, Category::NaN),
- /*
- // See Note 1.
- (m_inf, snan, "nan", Status::INVALID_OP, Category::NaN),
- */
- (m_inf, p_normal_value, "-inf", Status::OK, Category::Infinity),
- (m_inf, m_normal_value, "inf", Status::OK, Category::Infinity),
- (m_inf, p_largest_value, "-inf", Status::OK, Category::Infinity),
- (m_inf, m_largest_value, "inf", Status::OK, Category::Infinity),
- (m_inf, p_smallest_value, "-inf", Status::OK, Category::Infinity),
- (m_inf, m_smallest_value, "inf", Status::OK, Category::Infinity),
- (m_inf, p_smallest_normalized, "-inf", Status::OK, Category::Infinity),
- (m_inf, m_smallest_normalized, "inf", Status::OK, Category::Infinity),
- (p_zero, p_inf, "0x0p+0", Status::OK, Category::Zero),
- (p_zero, m_inf, "-0x0p+0", Status::OK, Category::Zero),
- (p_zero, p_zero, "nan", Status::INVALID_OP, Category::NaN),
- (p_zero, m_zero, "nan", Status::INVALID_OP, Category::NaN),
- (p_zero, qnan, "nan", Status::OK, Category::NaN),
- /*
- // See Note 1.
- (p_zero, snan, "nan", Status::INVALID_OP, Category::NaN),
- */
- (p_zero, p_normal_value, "0x0p+0", Status::OK, Category::Zero),
- (p_zero, m_normal_value, "-0x0p+0", Status::OK, Category::Zero),
- (p_zero, p_largest_value, "0x0p+0", Status::OK, Category::Zero),
- (p_zero, m_largest_value, "-0x0p+0", Status::OK, Category::Zero),
- (p_zero, p_smallest_value, "0x0p+0", Status::OK, Category::Zero),
- (p_zero, m_smallest_value, "-0x0p+0", Status::OK, Category::Zero),
- (p_zero, p_smallest_normalized, "0x0p+0", Status::OK, Category::Zero),
- (p_zero, m_smallest_normalized, "-0x0p+0", Status::OK, Category::Zero),
- (m_zero, p_inf, "-0x0p+0", Status::OK, Category::Zero),
- (m_zero, m_inf, "0x0p+0", Status::OK, Category::Zero),
- (m_zero, p_zero, "nan", Status::INVALID_OP, Category::NaN),
- (m_zero, m_zero, "nan", Status::INVALID_OP, Category::NaN),
- (m_zero, qnan, "nan", Status::OK, Category::NaN),
- /*
- // See Note 1.
- (m_zero, snan, "nan", Status::INVALID_OP, Category::NaN),
- */
- (m_zero, p_normal_value, "-0x0p+0", Status::OK, Category::Zero),
- (m_zero, m_normal_value, "0x0p+0", Status::OK, Category::Zero),
- (m_zero, p_largest_value, "-0x0p+0", Status::OK, Category::Zero),
- (m_zero, m_largest_value, "0x0p+0", Status::OK, Category::Zero),
- (m_zero, p_smallest_value, "-0x0p+0", Status::OK, Category::Zero),
- (m_zero, m_smallest_value, "0x0p+0", Status::OK, Category::Zero),
- (m_zero, p_smallest_normalized, "-0x0p+0", Status::OK, Category::Zero),
- (m_zero, m_smallest_normalized, "0x0p+0", Status::OK, Category::Zero),
- (qnan, p_inf, "nan", Status::OK, Category::NaN),
- (qnan, m_inf, "nan", Status::OK, Category::NaN),
- (qnan, p_zero, "nan", Status::OK, Category::NaN),
- (qnan, m_zero, "nan", Status::OK, Category::NaN),
- (qnan, qnan, "nan", Status::OK, Category::NaN),
- /*
- // See Note 1.
- (qnan, snan, "nan", Status::INVALID_OP, Category::NaN),
- */
- (qnan, p_normal_value, "nan", Status::OK, Category::NaN),
- (qnan, m_normal_value, "nan", Status::OK, Category::NaN),
- (qnan, p_largest_value, "nan", Status::OK, Category::NaN),
- (qnan, m_largest_value, "nan", Status::OK, Category::NaN),
- (qnan, p_smallest_value, "nan", Status::OK, Category::NaN),
- (qnan, m_smallest_value, "nan", Status::OK, Category::NaN),
- (qnan, p_smallest_normalized, "nan", Status::OK, Category::NaN),
- (qnan, m_smallest_normalized, "nan", Status::OK, Category::NaN),
- /*
- // See Note 1.
- (snan, p_inf, "nan", Status::INVALID_OP, Category::NaN),
- (snan, m_inf, "nan", Status::INVALID_OP, Category::NaN),
- (snan, p_zero, "nan", Status::INVALID_OP, Category::NaN),
- (snan, m_zero, "nan", Status::INVALID_OP, Category::NaN),
- (snan, qnan, "nan", Status::INVALID_OP, Category::NaN),
- (snan, snan, "nan", Status::INVALID_OP, Category::NaN),
- (snan, p_normal_value, "nan", Status::INVALID_OP, Category::NaN),
- (snan, m_normal_value, "nan", Status::INVALID_OP, Category::NaN),
- (snan, p_largest_value, "nan", Status::INVALID_OP, Category::NaN),
- (snan, m_largest_value, "nan", Status::INVALID_OP, Category::NaN),
- (snan, p_smallest_value, "nan", Status::INVALID_OP, Category::NaN),
- (snan, m_smallest_value, "nan", Status::INVALID_OP, Category::NaN),
- (snan, p_smallest_normalized, "nan", Status::INVALID_OP, Category::NaN),
- (snan, m_smallest_normalized, "nan", Status::INVALID_OP, Category::NaN),
- */
- (p_normal_value, p_inf, "0x0p+0", Status::OK, Category::Zero),
- (p_normal_value, m_inf, "-0x0p+0", Status::OK, Category::Zero),
- (p_normal_value, p_zero, "inf", Status::DIV_BY_ZERO, Category::Infinity),
- (p_normal_value, m_zero, "-inf", Status::DIV_BY_ZERO, Category::Infinity),
- (p_normal_value, qnan, "nan", Status::OK, Category::NaN),
- /*
- // See Note 1.
- (p_normal_value, snan, "nan", Status::INVALID_OP, Category::NaN),
- */
- (p_normal_value, p_normal_value, "0x1p+0", Status::OK, Category::Normal),
- (p_normal_value, m_normal_value, "-0x1p+0", Status::OK, Category::Normal),
- (p_normal_value, p_largest_value, "0x1p-128", underflow_status, Category::Normal),
- (p_normal_value, m_largest_value, "-0x1p-128", underflow_status, Category::Normal),
- (p_normal_value, p_smallest_value, "inf", overflow_status, Category::Infinity),
- (p_normal_value, m_smallest_value, "-inf", overflow_status, Category::Infinity),
- (p_normal_value, p_smallest_normalized, "0x1p+126", Status::OK, Category::Normal),
- (p_normal_value, m_smallest_normalized, "-0x1p+126", Status::OK, Category::Normal),
- (m_normal_value, p_inf, "-0x0p+0", Status::OK, Category::Zero),
- (m_normal_value, m_inf, "0x0p+0", Status::OK, Category::Zero),
- (m_normal_value, p_zero, "-inf", Status::DIV_BY_ZERO, Category::Infinity),
- (m_normal_value, m_zero, "inf", Status::DIV_BY_ZERO, Category::Infinity),
- (m_normal_value, qnan, "nan", Status::OK, Category::NaN),
- /*
- // See Note 1.
- (m_normal_value, snan, "nan", Status::INVALID_OP, Category::NaN),
- */
- (m_normal_value, p_normal_value, "-0x1p+0", Status::OK, Category::Normal),
- (m_normal_value, m_normal_value, "0x1p+0", Status::OK, Category::Normal),
- (m_normal_value, p_largest_value, "-0x1p-128", underflow_status, Category::Normal),
- (m_normal_value, m_largest_value, "0x1p-128", underflow_status, Category::Normal),
- (m_normal_value, p_smallest_value, "-inf", overflow_status, Category::Infinity),
- (m_normal_value, m_smallest_value, "inf", overflow_status, Category::Infinity),
- (m_normal_value, p_smallest_normalized, "-0x1p+126", Status::OK, Category::Normal),
- (m_normal_value, m_smallest_normalized, "0x1p+126", Status::OK, Category::Normal),
- (p_largest_value, p_inf, "0x0p+0", Status::OK, Category::Zero),
- (p_largest_value, m_inf, "-0x0p+0", Status::OK, Category::Zero),
- (p_largest_value, p_zero, "inf", Status::DIV_BY_ZERO, Category::Infinity),
- (p_largest_value, m_zero, "-inf", Status::DIV_BY_ZERO, Category::Infinity),
- (p_largest_value, qnan, "nan", Status::OK, Category::NaN),
- /*
- // See Note 1.
- (p_largest_value, snan, "nan", Status::INVALID_OP, Category::NaN),
- */
- (p_largest_value, p_normal_value, "0x1.fffffep+127", Status::OK, Category::Normal),
- (p_largest_value, m_normal_value, "-0x1.fffffep+127", Status::OK, Category::Normal),
- (p_largest_value, p_largest_value, "0x1p+0", Status::OK, Category::Normal),
- (p_largest_value, m_largest_value, "-0x1p+0", Status::OK, Category::Normal),
- (p_largest_value, p_smallest_value, "inf", overflow_status, Category::Infinity),
- (p_largest_value, m_smallest_value, "-inf", overflow_status, Category::Infinity),
- (p_largest_value, p_smallest_normalized, "inf", overflow_status, Category::Infinity),
- (p_largest_value, m_smallest_normalized, "-inf", overflow_status, Category::Infinity),
- (m_largest_value, p_inf, "-0x0p+0", Status::OK, Category::Zero),
- (m_largest_value, m_inf, "0x0p+0", Status::OK, Category::Zero),
- (m_largest_value, p_zero, "-inf", Status::DIV_BY_ZERO, Category::Infinity),
- (m_largest_value, m_zero, "inf", Status::DIV_BY_ZERO, Category::Infinity),
- (m_largest_value, qnan, "nan", Status::OK, Category::NaN),
- /*
- // See Note 1.
- (m_largest_value, snan, "nan", Status::INVALID_OP, Category::NaN),
- */
- (m_largest_value, p_normal_value, "-0x1.fffffep+127", Status::OK, Category::Normal),
- (m_largest_value, m_normal_value, "0x1.fffffep+127", Status::OK, Category::Normal),
- (m_largest_value, p_largest_value, "-0x1p+0", Status::OK, Category::Normal),
- (m_largest_value, m_largest_value, "0x1p+0", Status::OK, Category::Normal),
- (m_largest_value, p_smallest_value, "-inf", overflow_status, Category::Infinity),
- (m_largest_value, m_smallest_value, "inf", overflow_status, Category::Infinity),
- (m_largest_value, p_smallest_normalized, "-inf", overflow_status, Category::Infinity),
- (m_largest_value, m_smallest_normalized, "inf", overflow_status, Category::Infinity),
- (p_smallest_value, p_inf, "0x0p+0", Status::OK, Category::Zero),
- (p_smallest_value, m_inf, "-0x0p+0", Status::OK, Category::Zero),
- (p_smallest_value, p_zero, "inf", Status::DIV_BY_ZERO, Category::Infinity),
- (p_smallest_value, m_zero, "-inf", Status::DIV_BY_ZERO, Category::Infinity),
- (p_smallest_value, qnan, "nan", Status::OK, Category::NaN),
- /*
- // See Note 1.
- (p_smallest_value, snan, "nan", Status::INVALID_OP, Category::NaN),
- */
- (p_smallest_value, p_normal_value, "0x1p-149", Status::OK, Category::Normal),
- (p_smallest_value, m_normal_value, "-0x1p-149", Status::OK, Category::Normal),
- (p_smallest_value, p_largest_value, "0x0p+0", underflow_status, Category::Zero),
- (p_smallest_value, m_largest_value, "-0x0p+0", underflow_status, Category::Zero),
- (p_smallest_value, p_smallest_value, "0x1p+0", Status::OK, Category::Normal),
- (p_smallest_value, m_smallest_value, "-0x1p+0", Status::OK, Category::Normal),
- (p_smallest_value, p_smallest_normalized, "0x1p-23", Status::OK, Category::Normal),
- (p_smallest_value, m_smallest_normalized, "-0x1p-23", Status::OK, Category::Normal),
- (m_smallest_value, p_inf, "-0x0p+0", Status::OK, Category::Zero),
- (m_smallest_value, m_inf, "0x0p+0", Status::OK, Category::Zero),
- (m_smallest_value, p_zero, "-inf", Status::DIV_BY_ZERO, Category::Infinity),
- (m_smallest_value, m_zero, "inf", Status::DIV_BY_ZERO, Category::Infinity),
- (m_smallest_value, qnan, "nan", Status::OK, Category::NaN),
- /*
- // See Note 1.
- (m_smallest_value, snan, "nan", Status::INVALID_OP, Category::NaN),
- */
- (m_smallest_value, p_normal_value, "-0x1p-149", Status::OK, Category::Normal),
- (m_smallest_value, m_normal_value, "0x1p-149", Status::OK, Category::Normal),
- (m_smallest_value, p_largest_value, "-0x0p+0", underflow_status, Category::Zero),
- (m_smallest_value, m_largest_value, "0x0p+0", underflow_status, Category::Zero),
- (m_smallest_value, p_smallest_value, "-0x1p+0", Status::OK, Category::Normal),
- (m_smallest_value, m_smallest_value, "0x1p+0", Status::OK, Category::Normal),
- (m_smallest_value, p_smallest_normalized, "-0x1p-23", Status::OK, Category::Normal),
- (m_smallest_value, m_smallest_normalized, "0x1p-23", Status::OK, Category::Normal),
- (p_smallest_normalized, p_inf, "0x0p+0", Status::OK, Category::Zero),
- (p_smallest_normalized, m_inf, "-0x0p+0", Status::OK, Category::Zero),
- (p_smallest_normalized, p_zero, "inf", Status::DIV_BY_ZERO, Category::Infinity),
- (p_smallest_normalized, m_zero, "-inf", Status::DIV_BY_ZERO, Category::Infinity),
- (p_smallest_normalized, qnan, "nan", Status::OK, Category::NaN),
- /*
- // See Note 1.
- (p_smallest_normalized, snan, "nan", Status::INVALID_OP, Category::NaN),
- */
- (p_smallest_normalized, p_normal_value, "0x1p-126", Status::OK, Category::Normal),
- (p_smallest_normalized, m_normal_value, "-0x1p-126", Status::OK, Category::Normal),
- (p_smallest_normalized, p_largest_value, "0x0p+0", underflow_status, Category::Zero),
- (p_smallest_normalized, m_largest_value, "-0x0p+0", underflow_status, Category::Zero),
- (p_smallest_normalized, p_smallest_value, "0x1p+23", Status::OK, Category::Normal),
- (p_smallest_normalized, m_smallest_value, "-0x1p+23", Status::OK, Category::Normal),
- (p_smallest_normalized, p_smallest_normalized, "0x1p+0", Status::OK, Category::Normal),
- (p_smallest_normalized, m_smallest_normalized, "-0x1p+0", Status::OK, Category::Normal),
- (m_smallest_normalized, p_inf, "-0x0p+0", Status::OK, Category::Zero),
- (m_smallest_normalized, m_inf, "0x0p+0", Status::OK, Category::Zero),
- (m_smallest_normalized, p_zero, "-inf", Status::DIV_BY_ZERO, Category::Infinity),
- (m_smallest_normalized, m_zero, "inf", Status::DIV_BY_ZERO, Category::Infinity),
- (m_smallest_normalized, qnan, "nan", Status::OK, Category::NaN),
- /*
- // See Note 1.
- (m_smallest_normalized, snan, "nan", Status::INVALID_OP, Category::NaN),
- */
- (m_smallest_normalized, p_normal_value, "-0x1p-126", Status::OK, Category::Normal),
- (m_smallest_normalized, m_normal_value, "0x1p-126", Status::OK, Category::Normal),
- (m_smallest_normalized, p_largest_value, "-0x0p+0", underflow_status, Category::Zero),
- (m_smallest_normalized, m_largest_value, "0x0p+0", underflow_status, Category::Zero),
- (m_smallest_normalized, p_smallest_value, "-0x1p+23", Status::OK, Category::Normal),
- (m_smallest_normalized, m_smallest_value, "0x1p+23", Status::OK, Category::Normal),
- (m_smallest_normalized, p_smallest_normalized, "-0x1p+0", Status::OK, Category::Normal),
- (m_smallest_normalized, m_smallest_normalized, "0x1p+0", Status::OK, Category::Normal),
- ];
-
- for (x, y, e_result, e_status, e_category) in special_cases {
- let status;
- let result = unpack!(status=, x / y);
- assert_eq!(status, e_status);
- assert_eq!(result.category(), e_category);
- assert!(result.bitwise_eq(e_result.parse::<Single>().unwrap()));
- }
-}
-
-#[test]
-fn operator_overloads() {
- // This is mostly testing that these operator overloads compile.
- let one = "0x1p+0".parse::<Single>().unwrap();
- let two = "0x2p+0".parse::<Single>().unwrap();
- assert!(two.bitwise_eq((one + one).value));
- assert!(one.bitwise_eq((two - one).value));
- assert!(two.bitwise_eq((one * two).value));
- assert!(one.bitwise_eq((two / two).value));
-}
-
-#[test]
-fn abs() {
- let p_inf = Single::INFINITY;
- let m_inf = -Single::INFINITY;
- let p_zero = Single::ZERO;
- let m_zero = -Single::ZERO;
- let p_qnan = Single::NAN;
- let m_qnan = -Single::NAN;
- let p_snan = Single::snan(None);
- let m_snan = -Single::snan(None);
- let p_normal_value = "0x1p+0".parse::<Single>().unwrap();
- let m_normal_value = "-0x1p+0".parse::<Single>().unwrap();
- let p_largest_value = Single::largest();
- let m_largest_value = -Single::largest();
- let p_smallest_value = Single::SMALLEST;
- let m_smallest_value = -Single::SMALLEST;
- let p_smallest_normalized = Single::smallest_normalized();
- let m_smallest_normalized = -Single::smallest_normalized();
-
- assert!(p_inf.bitwise_eq(p_inf.abs()));
- assert!(p_inf.bitwise_eq(m_inf.abs()));
- assert!(p_zero.bitwise_eq(p_zero.abs()));
- assert!(p_zero.bitwise_eq(m_zero.abs()));
- assert!(p_qnan.bitwise_eq(p_qnan.abs()));
- assert!(p_qnan.bitwise_eq(m_qnan.abs()));
- assert!(p_snan.bitwise_eq(p_snan.abs()));
- assert!(p_snan.bitwise_eq(m_snan.abs()));
- assert!(p_normal_value.bitwise_eq(p_normal_value.abs()));
- assert!(p_normal_value.bitwise_eq(m_normal_value.abs()));
- assert!(p_largest_value.bitwise_eq(p_largest_value.abs()));
- assert!(p_largest_value.bitwise_eq(m_largest_value.abs()));
- assert!(p_smallest_value.bitwise_eq(p_smallest_value.abs()));
- assert!(p_smallest_value.bitwise_eq(m_smallest_value.abs()));
- assert!(p_smallest_normalized.bitwise_eq(p_smallest_normalized.abs(),));
- assert!(p_smallest_normalized.bitwise_eq(m_smallest_normalized.abs(),));
-}
-
-#[test]
-fn neg() {
- let one = "1.0".parse::<Single>().unwrap();
- let neg_one = "-1.0".parse::<Single>().unwrap();
- let zero = Single::ZERO;
- let neg_zero = -Single::ZERO;
- let inf = Single::INFINITY;
- let neg_inf = -Single::INFINITY;
- let qnan = Single::NAN;
- let neg_qnan = -Single::NAN;
-
- assert!(neg_one.bitwise_eq(-one));
- assert!(one.bitwise_eq(-neg_one));
- assert!(neg_zero.bitwise_eq(-zero));
- assert!(zero.bitwise_eq(-neg_zero));
- assert!(neg_inf.bitwise_eq(-inf));
- assert!(inf.bitwise_eq(-neg_inf));
- assert!(neg_inf.bitwise_eq(-inf));
- assert!(inf.bitwise_eq(-neg_inf));
- assert!(neg_qnan.bitwise_eq(-qnan));
- assert!(qnan.bitwise_eq(-neg_qnan));
-}
-
-#[test]
-fn ilogb() {
- assert_eq!(-1074, Double::SMALLEST.ilogb());
- assert_eq!(-1074, (-Double::SMALLEST).ilogb());
- assert_eq!(-1023, "0x1.ffffffffffffep-1024".parse::<Double>().unwrap().ilogb());
- assert_eq!(-1023, "0x1.ffffffffffffep-1023".parse::<Double>().unwrap().ilogb());
- assert_eq!(-1023, "-0x1.ffffffffffffep-1023".parse::<Double>().unwrap().ilogb());
- assert_eq!(-51, "0x1p-51".parse::<Double>().unwrap().ilogb());
- assert_eq!(-1023, "0x1.c60f120d9f87cp-1023".parse::<Double>().unwrap().ilogb());
- assert_eq!(-2, "0x0.ffffp-1".parse::<Double>().unwrap().ilogb());
- assert_eq!(-1023, "0x1.fffep-1023".parse::<Double>().unwrap().ilogb());
- assert_eq!(1023, Double::largest().ilogb());
- assert_eq!(1023, (-Double::largest()).ilogb());
-
- assert_eq!(0, "0x1p+0".parse::<Single>().unwrap().ilogb());
- assert_eq!(0, "-0x1p+0".parse::<Single>().unwrap().ilogb());
- assert_eq!(42, "0x1p+42".parse::<Single>().unwrap().ilogb());
- assert_eq!(-42, "0x1p-42".parse::<Single>().unwrap().ilogb());
-
- assert_eq!(IEK_INF, Single::INFINITY.ilogb());
- assert_eq!(IEK_INF, (-Single::INFINITY).ilogb());
- assert_eq!(IEK_ZERO, Single::ZERO.ilogb());
- assert_eq!(IEK_ZERO, (-Single::ZERO).ilogb());
- assert_eq!(IEK_NAN, Single::NAN.ilogb());
- assert_eq!(IEK_NAN, Single::snan(None).ilogb());
-
- assert_eq!(127, Single::largest().ilogb());
- assert_eq!(127, (-Single::largest()).ilogb());
-
- assert_eq!(-149, Single::SMALLEST.ilogb());
- assert_eq!(-149, (-Single::SMALLEST).ilogb());
- assert_eq!(-126, Single::smallest_normalized().ilogb());
- assert_eq!(-126, (-Single::smallest_normalized()).ilogb());
-}
-
-#[test]
-fn scalbn() {
- assert!(
- "0x1p+0"
- .parse::<Single>()
- .unwrap()
- .bitwise_eq("0x1p+0".parse::<Single>().unwrap().scalbn(0),)
- );
- assert!(
- "0x1p+42"
- .parse::<Single>()
- .unwrap()
- .bitwise_eq("0x1p+0".parse::<Single>().unwrap().scalbn(42),)
- );
- assert!(
- "0x1p-42"
- .parse::<Single>()
- .unwrap()
- .bitwise_eq("0x1p+0".parse::<Single>().unwrap().scalbn(-42),)
- );
-
- let p_inf = Single::INFINITY;
- let m_inf = -Single::INFINITY;
- let p_zero = Single::ZERO;
- let m_zero = -Single::ZERO;
- let p_qnan = Single::NAN;
- let m_qnan = -Single::NAN;
- let snan = Single::snan(None);
-
- assert!(p_inf.bitwise_eq(p_inf.scalbn(0)));
- assert!(m_inf.bitwise_eq(m_inf.scalbn(0)));
- assert!(p_zero.bitwise_eq(p_zero.scalbn(0)));
- assert!(m_zero.bitwise_eq(m_zero.scalbn(0)));
- assert!(p_qnan.bitwise_eq(p_qnan.scalbn(0)));
- assert!(m_qnan.bitwise_eq(m_qnan.scalbn(0)));
- assert!(!snan.scalbn(0).is_signaling());
-
- let scalbn_snan = snan.scalbn(1);
- assert!(scalbn_snan.is_nan() && !scalbn_snan.is_signaling());
-
- // Make sure highest bit of payload is preserved.
- let payload = (1 << 50) | (1 << 49) | (1234 << 32) | 1;
-
- let snan_with_payload = Double::snan(Some(payload));
- let quiet_payload = snan_with_payload.scalbn(1);
- assert!(quiet_payload.is_nan() && !quiet_payload.is_signaling());
- assert_eq!(payload, quiet_payload.to_bits() & ((1 << 51) - 1));
-
- assert!(p_inf.bitwise_eq("0x1p+0".parse::<Single>().unwrap().scalbn(128),));
- assert!(m_inf.bitwise_eq("-0x1p+0".parse::<Single>().unwrap().scalbn(128),));
- assert!(p_inf.bitwise_eq("0x1p+127".parse::<Single>().unwrap().scalbn(1),));
- assert!(p_zero.bitwise_eq("0x1p-127".parse::<Single>().unwrap().scalbn(-127),));
- assert!(m_zero.bitwise_eq("-0x1p-127".parse::<Single>().unwrap().scalbn(-127),));
- assert!(
- "-0x1p-149"
- .parse::<Single>()
- .unwrap()
- .bitwise_eq("-0x1p-127".parse::<Single>().unwrap().scalbn(-22),)
- );
- assert!(p_zero.bitwise_eq("0x1p-126".parse::<Single>().unwrap().scalbn(-24),));
-
- let smallest_f64 = Double::SMALLEST;
- let neg_smallest_f64 = -Double::SMALLEST;
-
- let largest_f64 = Double::largest();
- let neg_largest_f64 = -Double::largest();
-
- let largest_denormal_f64 = "0x1.ffffffffffffep-1023".parse::<Double>().unwrap();
- let neg_largest_denormal_f64 = "-0x1.ffffffffffffep-1023".parse::<Double>().unwrap();
-
- assert!(smallest_f64.bitwise_eq("0x1p-1074".parse::<Double>().unwrap().scalbn(0),));
- assert!(neg_smallest_f64.bitwise_eq("-0x1p-1074".parse::<Double>().unwrap().scalbn(0),));
-
- assert!("0x1p+1023".parse::<Double>().unwrap().bitwise_eq(smallest_f64.scalbn(2097,),));
-
- assert!(smallest_f64.scalbn(-2097).is_pos_zero());
- assert!(smallest_f64.scalbn(-2098).is_pos_zero());
- assert!(smallest_f64.scalbn(-2099).is_pos_zero());
- assert!("0x1p+1022".parse::<Double>().unwrap().bitwise_eq(smallest_f64.scalbn(2096,),));
- assert!("0x1p+1023".parse::<Double>().unwrap().bitwise_eq(smallest_f64.scalbn(2097,),));
- assert!(smallest_f64.scalbn(2098).is_infinite());
- assert!(smallest_f64.scalbn(2099).is_infinite());
-
- // Test for integer overflows when adding to exponent.
- assert!(smallest_f64.scalbn(-ExpInt::MAX).is_pos_zero());
- assert!(largest_f64.scalbn(ExpInt::MAX).is_infinite());
-
- assert!(largest_denormal_f64.bitwise_eq(largest_denormal_f64.scalbn(0),));
- assert!(neg_largest_denormal_f64.bitwise_eq(neg_largest_denormal_f64.scalbn(0),));
-
- assert!(
- "0x1.ffffffffffffep-1022"
- .parse::<Double>()
- .unwrap()
- .bitwise_eq(largest_denormal_f64.scalbn(1))
- );
- assert!(
- "-0x1.ffffffffffffep-1021"
- .parse::<Double>()
- .unwrap()
- .bitwise_eq(neg_largest_denormal_f64.scalbn(2))
- );
-
- assert!(
- "0x1.ffffffffffffep+1"
- .parse::<Double>()
- .unwrap()
- .bitwise_eq(largest_denormal_f64.scalbn(1024))
- );
- assert!(largest_denormal_f64.scalbn(-1023).is_pos_zero());
- assert!(largest_denormal_f64.scalbn(-1024).is_pos_zero());
- assert!(largest_denormal_f64.scalbn(-2048).is_pos_zero());
- assert!(largest_denormal_f64.scalbn(2047).is_infinite());
- assert!(largest_denormal_f64.scalbn(2098).is_infinite());
- assert!(largest_denormal_f64.scalbn(2099).is_infinite());
-
- assert!(
- "0x1.ffffffffffffep-2"
- .parse::<Double>()
- .unwrap()
- .bitwise_eq(largest_denormal_f64.scalbn(1021))
- );
- assert!(
- "0x1.ffffffffffffep-1"
- .parse::<Double>()
- .unwrap()
- .bitwise_eq(largest_denormal_f64.scalbn(1022))
- );
- assert!(
- "0x1.ffffffffffffep+0"
- .parse::<Double>()
- .unwrap()
- .bitwise_eq(largest_denormal_f64.scalbn(1023))
- );
- assert!(
- "0x1.ffffffffffffep+1023"
- .parse::<Double>()
- .unwrap()
- .bitwise_eq(largest_denormal_f64.scalbn(2046))
- );
- assert!("0x1p+974".parse::<Double>().unwrap().bitwise_eq(smallest_f64.scalbn(2048,),));
-
- let random_denormal_f64 = "0x1.c60f120d9f87cp+51".parse::<Double>().unwrap();
- assert!(
- "0x1.c60f120d9f87cp-972"
- .parse::<Double>()
- .unwrap()
- .bitwise_eq(random_denormal_f64.scalbn(-1023))
- );
- assert!(
- "0x1.c60f120d9f87cp-1"
- .parse::<Double>()
- .unwrap()
- .bitwise_eq(random_denormal_f64.scalbn(-52))
- );
- assert!(
- "0x1.c60f120d9f87cp-2"
- .parse::<Double>()
- .unwrap()
- .bitwise_eq(random_denormal_f64.scalbn(-53))
- );
- assert!(
- "0x1.c60f120d9f87cp+0"
- .parse::<Double>()
- .unwrap()
- .bitwise_eq(random_denormal_f64.scalbn(-51))
- );
-
- assert!(random_denormal_f64.scalbn(-2097).is_pos_zero());
- assert!(random_denormal_f64.scalbn(-2090).is_pos_zero());
-
- assert!("-0x1p-1073".parse::<Double>().unwrap().bitwise_eq(neg_largest_f64.scalbn(-2097),));
-
- assert!("-0x1p-1024".parse::<Double>().unwrap().bitwise_eq(neg_largest_f64.scalbn(-2048),));
-
- assert!("0x1p-1073".parse::<Double>().unwrap().bitwise_eq(largest_f64.scalbn(-2097,),));
-
- assert!("0x1p-1074".parse::<Double>().unwrap().bitwise_eq(largest_f64.scalbn(-2098,),));
- assert!("-0x1p-1074".parse::<Double>().unwrap().bitwise_eq(neg_largest_f64.scalbn(-2098),));
- assert!(neg_largest_f64.scalbn(-2099).is_neg_zero());
- assert!(largest_f64.scalbn(1).is_infinite());
-
- assert!(
- "0x1p+0"
- .parse::<Double>()
- .unwrap()
- .bitwise_eq("0x1p+52".parse::<Double>().unwrap().scalbn(-52),)
- );
-
- assert!(
- "0x1p-103"
- .parse::<Double>()
- .unwrap()
- .bitwise_eq("0x1p-51".parse::<Double>().unwrap().scalbn(-52),)
- );
-}
-
-#[test]
-fn frexp() {
- let p_zero = Double::ZERO;
- let m_zero = -Double::ZERO;
- let one = Double::from_f64(1.0);
- let m_one = Double::from_f64(-1.0);
-
- let largest_denormal = "0x1.ffffffffffffep-1023".parse::<Double>().unwrap();
- let neg_largest_denormal = "-0x1.ffffffffffffep-1023".parse::<Double>().unwrap();
-
- let smallest = Double::SMALLEST;
- let neg_smallest = -Double::SMALLEST;
-
- let largest = Double::largest();
- let neg_largest = -Double::largest();
-
- let p_inf = Double::INFINITY;
- let m_inf = -Double::INFINITY;
-
- let p_qnan = Double::NAN;
- let m_qnan = -Double::NAN;
- let snan = Double::snan(None);
-
- // Make sure highest bit of payload is preserved.
- let payload = (1 << 50) | (1 << 49) | (1234 << 32) | 1;
-
- let snan_with_payload = Double::snan(Some(payload));
-
- let mut exp = 0;
-
- let frac = p_zero.frexp(&mut exp);
- assert_eq!(0, exp);
- assert!(frac.is_pos_zero());
-
- let frac = m_zero.frexp(&mut exp);
- assert_eq!(0, exp);
- assert!(frac.is_neg_zero());
-
- let frac = one.frexp(&mut exp);
- assert_eq!(1, exp);
- assert!("0x1p-1".parse::<Double>().unwrap().bitwise_eq(frac));
-
- let frac = m_one.frexp(&mut exp);
- assert_eq!(1, exp);
- assert!("-0x1p-1".parse::<Double>().unwrap().bitwise_eq(frac));
-
- let frac = largest_denormal.frexp(&mut exp);
- assert_eq!(-1022, exp);
- assert!("0x1.ffffffffffffep-1".parse::<Double>().unwrap().bitwise_eq(frac));
-
- let frac = neg_largest_denormal.frexp(&mut exp);
- assert_eq!(-1022, exp);
- assert!("-0x1.ffffffffffffep-1".parse::<Double>().unwrap().bitwise_eq(frac));
-
- let frac = smallest.frexp(&mut exp);
- assert_eq!(-1073, exp);
- assert!("0x1p-1".parse::<Double>().unwrap().bitwise_eq(frac));
-
- let frac = neg_smallest.frexp(&mut exp);
- assert_eq!(-1073, exp);
- assert!("-0x1p-1".parse::<Double>().unwrap().bitwise_eq(frac));
-
- let frac = largest.frexp(&mut exp);
- assert_eq!(1024, exp);
- assert!("0x1.fffffffffffffp-1".parse::<Double>().unwrap().bitwise_eq(frac));
-
- let frac = neg_largest.frexp(&mut exp);
- assert_eq!(1024, exp);
- assert!("-0x1.fffffffffffffp-1".parse::<Double>().unwrap().bitwise_eq(frac));
-
- let frac = p_inf.frexp(&mut exp);
- assert_eq!(IEK_INF, exp);
- assert!(frac.is_infinite() && !frac.is_negative());
-
- let frac = m_inf.frexp(&mut exp);
- assert_eq!(IEK_INF, exp);
- assert!(frac.is_infinite() && frac.is_negative());
-
- let frac = p_qnan.frexp(&mut exp);
- assert_eq!(IEK_NAN, exp);
- assert!(frac.is_nan());
-
- let frac = m_qnan.frexp(&mut exp);
- assert_eq!(IEK_NAN, exp);
- assert!(frac.is_nan());
-
- let frac = snan.frexp(&mut exp);
- assert_eq!(IEK_NAN, exp);
- assert!(frac.is_nan() && !frac.is_signaling());
-
- let frac = snan_with_payload.frexp(&mut exp);
- assert_eq!(IEK_NAN, exp);
- assert!(frac.is_nan() && !frac.is_signaling());
- assert_eq!(payload, frac.to_bits() & ((1 << 51) - 1));
-
- let frac = "0x0.ffffp-1".parse::<Double>().unwrap().frexp(&mut exp);
- assert_eq!(-1, exp);
- assert!("0x1.fffep-1".parse::<Double>().unwrap().bitwise_eq(frac));
-
- let frac = "0x1p-51".parse::<Double>().unwrap().frexp(&mut exp);
- assert_eq!(-50, exp);
- assert!("0x1p-1".parse::<Double>().unwrap().bitwise_eq(frac));
-
- let frac = "0x1.c60f120d9f87cp+51".parse::<Double>().unwrap().frexp(&mut exp);
- assert_eq!(52, exp);
- assert!("0x1.c60f120d9f87cp-1".parse::<Double>().unwrap().bitwise_eq(frac));
-}
-
-#[test]
-fn modulo() {
- let mut status;
- {
- let f1 = "1.5".parse::<Double>().unwrap();
- let f2 = "1.0".parse::<Double>().unwrap();
- let expected = "0.5".parse::<Double>().unwrap();
- assert!(unpack!(status=, f1 % f2).bitwise_eq(expected));
- assert_eq!(status, Status::OK);
- }
- {
- let f1 = "0.5".parse::<Double>().unwrap();
- let f2 = "1.0".parse::<Double>().unwrap();
- let expected = "0.5".parse::<Double>().unwrap();
- assert!(unpack!(status=, f1 % f2).bitwise_eq(expected));
- assert_eq!(status, Status::OK);
- }
- {
- let f1 = "0x1.3333333333333p-2".parse::<Double>().unwrap(); // 0.3
- let f2 = "0x1.47ae147ae147bp-7".parse::<Double>().unwrap(); // 0.01
- // 0.009999999999999983
- let expected = "0x1.47ae147ae1471p-7".parse::<Double>().unwrap();
- assert!(unpack!(status=, f1 % f2).bitwise_eq(expected));
- assert_eq!(status, Status::OK);
- }
- {
- let f1 = "0x1p64".parse::<Double>().unwrap(); // 1.8446744073709552e19
- let f2 = "1.5".parse::<Double>().unwrap();
- let expected = "1.0".parse::<Double>().unwrap();
- assert!(unpack!(status=, f1 % f2).bitwise_eq(expected));
- assert_eq!(status, Status::OK);
- }
- {
- let f1 = "0x1p1000".parse::<Double>().unwrap();
- let f2 = "0x1p-1000".parse::<Double>().unwrap();
- let expected = "0.0".parse::<Double>().unwrap();
- assert!(unpack!(status=, f1 % f2).bitwise_eq(expected));
- assert_eq!(status, Status::OK);
- }
- {
- let f1 = "0.0".parse::<Double>().unwrap();
- let f2 = "1.0".parse::<Double>().unwrap();
- let expected = "0.0".parse::<Double>().unwrap();
- assert!(unpack!(status=, f1 % f2).bitwise_eq(expected));
- assert_eq!(status, Status::OK);
- }
- {
- let f1 = "1.0".parse::<Double>().unwrap();
- let f2 = "0.0".parse::<Double>().unwrap();
- assert!(unpack!(status=, f1 % f2).is_nan());
- assert_eq!(status, Status::INVALID_OP);
- }
- {
- let f1 = "0.0".parse::<Double>().unwrap();
- let f2 = "0.0".parse::<Double>().unwrap();
- assert!(unpack!(status=, f1 % f2).is_nan());
- assert_eq!(status, Status::INVALID_OP);
- }
- {
- let f1 = Double::INFINITY;
- let f2 = "1.0".parse::<Double>().unwrap();
- assert!(unpack!(status=, f1 % f2).is_nan());
- assert_eq!(status, Status::INVALID_OP);
- }
-}
diff --git a/compiler/rustc_apfloat/tests/ppc.rs b/compiler/rustc_apfloat/tests/ppc.rs
deleted file mode 100644
index c769d2654..000000000
--- a/compiler/rustc_apfloat/tests/ppc.rs
+++ /dev/null
@@ -1,530 +0,0 @@
-use rustc_apfloat::ppc::DoubleDouble;
-use rustc_apfloat::{Category, Float, Round};
-
-use std::cmp::Ordering;
-
-#[test]
-fn ppc_double_double() {
- let test = DoubleDouble::ZERO;
- let expected = "0x0p+0".parse::<DoubleDouble>().unwrap();
- assert!(test.is_zero());
- assert!(!test.is_negative());
- assert!(test.bitwise_eq(expected));
- assert_eq!(0, test.to_bits());
-
- let test = -DoubleDouble::ZERO;
- let expected = "-0x0p+0".parse::<DoubleDouble>().unwrap();
- assert!(test.is_zero());
- assert!(test.is_negative());
- assert!(test.bitwise_eq(expected));
- assert_eq!(0x8000000000000000, test.to_bits());
-
- let test = "1.0".parse::<DoubleDouble>().unwrap();
- assert_eq!(0x3ff0000000000000, test.to_bits());
-
- // LDBL_MAX
- let test = "1.79769313486231580793728971405301e+308".parse::<DoubleDouble>().unwrap();
- assert_eq!(0x7c8ffffffffffffe_7fefffffffffffff, test.to_bits());
-
- // LDBL_MIN
- let test = "2.00416836000897277799610805135016e-292".parse::<DoubleDouble>().unwrap();
- assert_eq!(0x0000000000000000_0360000000000000, test.to_bits());
-}
-
-#[test]
-fn ppc_double_double_add_special() {
- let data = [
- // (1 + 0) + (-1 + 0) = Category::Zero
- (0x3ff0000000000000, 0xbff0000000000000, Category::Zero, Round::NearestTiesToEven),
- // LDBL_MAX + (1.1 >> (1023 - 106) + 0)) = Category::Infinity
- (
- 0x7c8ffffffffffffe_7fefffffffffffff,
- 0x7948000000000000,
- Category::Infinity,
- Round::NearestTiesToEven,
- ),
- // FIXME: change the 4th 0x75effffffffffffe to 0x75efffffffffffff when
- // DoubleDouble's fallback is gone.
- // LDBL_MAX + (1.011111... >> (1023 - 106) + (1.1111111...0 >> (1023 -
- // 160))) = Category::Normal
- (
- 0x7c8ffffffffffffe_7fefffffffffffff,
- 0x75effffffffffffe_7947ffffffffffff,
- Category::Normal,
- Round::NearestTiesToEven,
- ),
- // LDBL_MAX + (1.1 >> (1023 - 106) + 0)) = Category::Infinity
- (
- 0x7c8ffffffffffffe_7fefffffffffffff,
- 0x7c8ffffffffffffe_7fefffffffffffff,
- Category::Infinity,
- Round::NearestTiesToEven,
- ),
- // NaN + (1 + 0) = Category::NaN
- (0x7ff8000000000000, 0x3ff0000000000000, Category::NaN, Round::NearestTiesToEven),
- ];
-
- for (op1, op2, expected, round) in data {
- {
- let mut a1 = DoubleDouble::from_bits(op1);
- let a2 = DoubleDouble::from_bits(op2);
- a1 = a1.add_r(a2, round).value;
-
- assert_eq!(expected, a1.category(), "{:#x} + {:#x}", op1, op2);
- }
- {
- let a1 = DoubleDouble::from_bits(op1);
- let mut a2 = DoubleDouble::from_bits(op2);
- a2 = a2.add_r(a1, round).value;
-
- assert_eq!(expected, a2.category(), "{:#x} + {:#x}", op2, op1);
- }
- }
-}
-
-#[test]
-fn ppc_double_double_add() {
- let data = [
- // (1 + 0) + (1e-105 + 0) = (1 + 1e-105)
- (
- 0x3ff0000000000000,
- 0x3960000000000000,
- 0x3960000000000000_3ff0000000000000,
- Round::NearestTiesToEven,
- ),
- // (1 + 0) + (1e-106 + 0) = (1 + 1e-106)
- (
- 0x3ff0000000000000,
- 0x3950000000000000,
- 0x3950000000000000_3ff0000000000000,
- Round::NearestTiesToEven,
- ),
- // (1 + 1e-106) + (1e-106 + 0) = (1 + 1e-105)
- (
- 0x3950000000000000_3ff0000000000000,
- 0x3950000000000000,
- 0x3960000000000000_3ff0000000000000,
- Round::NearestTiesToEven,
- ),
- // (1 + 0) + (epsilon + 0) = (1 + epsilon)
- (
- 0x3ff0000000000000,
- 0x0000000000000001,
- 0x0000000000000001_3ff0000000000000,
- Round::NearestTiesToEven,
- ),
- // FIXME: change 0xf950000000000000 to 0xf940000000000000, when
- // DoubleDouble's fallback is gone.
- // (DBL_MAX - 1 << (1023 - 105)) + (1 << (1023 - 53) + 0) = DBL_MAX +
- // 1.11111... << (1023 - 52)
- (
- 0xf950000000000000_7fefffffffffffff,
- 0x7c90000000000000,
- 0x7c8ffffffffffffe_7fefffffffffffff,
- Round::NearestTiesToEven,
- ),
- // FIXME: change 0xf950000000000000 to 0xf940000000000000, when
- // DoubleDouble's fallback is gone.
- // (1 << (1023 - 53) + 0) + (DBL_MAX - 1 << (1023 - 105)) = DBL_MAX +
- // 1.11111... << (1023 - 52)
- (
- 0x7c90000000000000,
- 0xf950000000000000_7fefffffffffffff,
- 0x7c8ffffffffffffe_7fefffffffffffff,
- Round::NearestTiesToEven,
- ),
- ];
-
- for (op1, op2, expected, round) in data {
- {
- let mut a1 = DoubleDouble::from_bits(op1);
- let a2 = DoubleDouble::from_bits(op2);
- a1 = a1.add_r(a2, round).value;
-
- assert_eq!(expected, a1.to_bits(), "{:#x} + {:#x}", op1, op2);
- }
- {
- let a1 = DoubleDouble::from_bits(op1);
- let mut a2 = DoubleDouble::from_bits(op2);
- a2 = a2.add_r(a1, round).value;
-
- assert_eq!(expected, a2.to_bits(), "{:#x} + {:#x}", op2, op1);
- }
- }
-}
-
-#[test]
-fn ppc_double_double_subtract() {
- let data = [
- // (1 + 0) - (-1e-105 + 0) = (1 + 1e-105)
- (
- 0x3ff0000000000000,
- 0xb960000000000000,
- 0x3960000000000000_3ff0000000000000,
- Round::NearestTiesToEven,
- ),
- // (1 + 0) - (-1e-106 + 0) = (1 + 1e-106)
- (
- 0x3ff0000000000000,
- 0xb950000000000000,
- 0x3950000000000000_3ff0000000000000,
- Round::NearestTiesToEven,
- ),
- ];
-
- for (op1, op2, expected, round) in data {
- let mut a1 = DoubleDouble::from_bits(op1);
- let a2 = DoubleDouble::from_bits(op2);
- a1 = a1.sub_r(a2, round).value;
-
- assert_eq!(expected, a1.to_bits(), "{:#x} - {:#x}", op1, op2);
- }
-}
-
-#[test]
-fn ppc_double_double_multiply_special() {
- let data = [
- // Category::NaN * Category::NaN = Category::NaN
- (0x7ff8000000000000, 0x7ff8000000000000, Category::NaN, Round::NearestTiesToEven),
- // Category::NaN * Category::Zero = Category::NaN
- (0x7ff8000000000000, 0, Category::NaN, Round::NearestTiesToEven),
- // Category::NaN * Category::Infinity = Category::NaN
- (0x7ff8000000000000, 0x7ff0000000000000, Category::NaN, Round::NearestTiesToEven),
- // Category::NaN * Category::Normal = Category::NaN
- (0x7ff8000000000000, 0x3ff0000000000000, Category::NaN, Round::NearestTiesToEven),
- // Category::Infinity * Category::Infinity = Category::Infinity
- (0x7ff0000000000000, 0x7ff0000000000000, Category::Infinity, Round::NearestTiesToEven),
- // Category::Infinity * Category::Zero = Category::NaN
- (0x7ff0000000000000, 0, Category::NaN, Round::NearestTiesToEven),
- // Category::Infinity * Category::Normal = Category::Infinity
- (0x7ff0000000000000, 0x3ff0000000000000, Category::Infinity, Round::NearestTiesToEven),
- // Category::Zero * Category::Zero = Category::Zero
- (0, 0, Category::Zero, Round::NearestTiesToEven),
- // Category::Zero * Category::Normal = Category::Zero
- (0, 0x3ff0000000000000, Category::Zero, Round::NearestTiesToEven),
- ];
-
- for (op1, op2, expected, round) in data {
- {
- let mut a1 = DoubleDouble::from_bits(op1);
- let a2 = DoubleDouble::from_bits(op2);
- a1 = a1.mul_r(a2, round).value;
-
- assert_eq!(expected, a1.category(), "{:#x} * {:#x}", op1, op2);
- }
- {
- let a1 = DoubleDouble::from_bits(op1);
- let mut a2 = DoubleDouble::from_bits(op2);
- a2 = a2.mul_r(a1, round).value;
-
- assert_eq!(expected, a2.category(), "{:#x} * {:#x}", op2, op1);
- }
- }
-}
-
-#[test]
-fn ppc_double_double_multiply() {
- let data = [
- // 1/3 * 3 = 1.0
- (
- 0x3c75555555555556_3fd5555555555555,
- 0x4008000000000000,
- 0x3ff0000000000000,
- Round::NearestTiesToEven,
- ),
- // (1 + epsilon) * (1 + 0) = Category::Zero
- (
- 0x0000000000000001_3ff0000000000000,
- 0x3ff0000000000000,
- 0x0000000000000001_3ff0000000000000,
- Round::NearestTiesToEven,
- ),
- // (1 + epsilon) * (1 + epsilon) = 1 + 2 * epsilon
- (
- 0x0000000000000001_3ff0000000000000,
- 0x0000000000000001_3ff0000000000000,
- 0x0000000000000002_3ff0000000000000,
- Round::NearestTiesToEven,
- ),
- // -(1 + epsilon) * (1 + epsilon) = -1
- (
- 0x0000000000000001_bff0000000000000,
- 0x0000000000000001_3ff0000000000000,
- 0xbff0000000000000,
- Round::NearestTiesToEven,
- ),
- // (0.5 + 0) * (1 + 2 * epsilon) = 0.5 + epsilon
- (
- 0x3fe0000000000000,
- 0x0000000000000002_3ff0000000000000,
- 0x0000000000000001_3fe0000000000000,
- Round::NearestTiesToEven,
- ),
- // (0.5 + 0) * (1 + epsilon) = 0.5
- (
- 0x3fe0000000000000,
- 0x0000000000000001_3ff0000000000000,
- 0x3fe0000000000000,
- Round::NearestTiesToEven,
- ),
- // __LDBL_MAX__ * (1 + 1 << 106) = inf
- (
- 0x7c8ffffffffffffe_7fefffffffffffff,
- 0x3950000000000000_3ff0000000000000,
- 0x7ff0000000000000,
- Round::NearestTiesToEven,
- ),
- // __LDBL_MAX__ * (1 + 1 << 107) > __LDBL_MAX__, but not inf, yes =_=|||
- (
- 0x7c8ffffffffffffe_7fefffffffffffff,
- 0x3940000000000000_3ff0000000000000,
- 0x7c8fffffffffffff_7fefffffffffffff,
- Round::NearestTiesToEven,
- ),
- // __LDBL_MAX__ * (1 + 1 << 108) = __LDBL_MAX__
- (
- 0x7c8ffffffffffffe_7fefffffffffffff,
- 0x3930000000000000_3ff0000000000000,
- 0x7c8ffffffffffffe_7fefffffffffffff,
- Round::NearestTiesToEven,
- ),
- ];
-
- for (op1, op2, expected, round) in data {
- {
- let mut a1 = DoubleDouble::from_bits(op1);
- let a2 = DoubleDouble::from_bits(op2);
- a1 = a1.mul_r(a2, round).value;
-
- assert_eq!(expected, a1.to_bits(), "{:#x} * {:#x}", op1, op2);
- }
- {
- let a1 = DoubleDouble::from_bits(op1);
- let mut a2 = DoubleDouble::from_bits(op2);
- a2 = a2.mul_r(a1, round).value;
-
- assert_eq!(expected, a2.to_bits(), "{:#x} * {:#x}", op2, op1);
- }
- }
-}
-
-#[test]
-fn ppc_double_double_divide() {
- // FIXME: Only a sanity check for now. Add more edge cases when the
- // double-double algorithm is implemented.
- let data = [
- // 1 / 3 = 1/3
- (
- 0x3ff0000000000000,
- 0x4008000000000000,
- 0x3c75555555555556_3fd5555555555555,
- Round::NearestTiesToEven,
- ),
- ];
-
- for (op1, op2, expected, round) in data {
- let mut a1 = DoubleDouble::from_bits(op1);
- let a2 = DoubleDouble::from_bits(op2);
- a1 = a1.div_r(a2, round).value;
-
- assert_eq!(expected, a1.to_bits(), "{:#x} / {:#x}", op1, op2);
- }
-}
-
-#[test]
-fn ppc_double_double_remainder() {
- let data = [
- // ieee_rem(3.0 + 3.0 << 53, 1.25 + 1.25 << 53) = (0.5 + 0.5 << 53)
- (
- 0x3cb8000000000000_4008000000000000,
- 0x3ca4000000000000_3ff4000000000000,
- 0x3c90000000000000_3fe0000000000000,
- ),
- // ieee_rem(3.0 + 3.0 << 53, 1.75 + 1.75 << 53) = (-0.5 - 0.5 << 53)
- (
- 0x3cb8000000000000_4008000000000000,
- 0x3cac000000000000_3ffc000000000000,
- 0xbc90000000000000_bfe0000000000000,
- ),
- ];
-
- for (op1, op2, expected) in data {
- let a1 = DoubleDouble::from_bits(op1);
- let a2 = DoubleDouble::from_bits(op2);
- let result = a1.ieee_rem(a2).value;
-
- assert_eq!(expected, result.to_bits(), "ieee_rem({:#x}, {:#x})", op1, op2);
- }
-}
-
-#[test]
-fn ppc_double_double_mod() {
- let data = [
- // mod(3.0 + 3.0 << 53, 1.25 + 1.25 << 53) = (0.5 + 0.5 << 53)
- (
- 0x3cb8000000000000_4008000000000000,
- 0x3ca4000000000000_3ff4000000000000,
- 0x3c90000000000000_3fe0000000000000,
- ),
- // mod(3.0 + 3.0 << 53, 1.75 + 1.75 << 53) = (1.25 + 1.25 << 53)
- // 0xbc98000000000000 doesn't seem right, but it's what we currently have.
- // FIXME: investigate
- (
- 0x3cb8000000000000_4008000000000000,
- 0x3cac000000000000_3ffc000000000000,
- 0xbc98000000000000_3ff4000000000001,
- ),
- ];
-
- for (op1, op2, expected) in data {
- let a1 = DoubleDouble::from_bits(op1);
- let a2 = DoubleDouble::from_bits(op2);
- let r = (a1 % a2).value;
-
- assert_eq!(expected, r.to_bits(), "fmod({:#x}, {:#x})", op1, op2);
- }
-}
-
-#[test]
-fn ppc_double_double_fma() {
- // Sanity check for now.
- let mut a = "2".parse::<DoubleDouble>().unwrap();
- a = a.mul_add("3".parse::<DoubleDouble>().unwrap(), "4".parse::<DoubleDouble>().unwrap()).value;
- assert_eq!(Some(Ordering::Equal), "10".parse::<DoubleDouble>().unwrap().partial_cmp(&a));
-}
-
-#[test]
-fn ppc_double_double_round_to_integral() {
- {
- let a = "1.5".parse::<DoubleDouble>().unwrap();
- let a = a.round_to_integral(Round::NearestTiesToEven).value;
- assert_eq!(Some(Ordering::Equal), "2".parse::<DoubleDouble>().unwrap().partial_cmp(&a));
- }
- {
- let a = "2.5".parse::<DoubleDouble>().unwrap();
- let a = a.round_to_integral(Round::NearestTiesToEven).value;
- assert_eq!(Some(Ordering::Equal), "2".parse::<DoubleDouble>().unwrap().partial_cmp(&a));
- }
-}
-
-#[test]
-fn ppc_double_double_compare() {
- let data = [
- // (1 + 0) = (1 + 0)
- (0x3ff0000000000000, 0x3ff0000000000000, Some(Ordering::Equal)),
- // (1 + 0) < (1.00...1 + 0)
- (0x3ff0000000000000, 0x3ff0000000000001, Some(Ordering::Less)),
- // (1.00...1 + 0) > (1 + 0)
- (0x3ff0000000000001, 0x3ff0000000000000, Some(Ordering::Greater)),
- // (1 + 0) < (1 + epsilon)
- (0x3ff0000000000000, 0x0000000000000001_3ff0000000000001, Some(Ordering::Less)),
- // NaN != NaN
- (0x7ff8000000000000, 0x7ff8000000000000, None),
- // (1 + 0) != NaN
- (0x3ff0000000000000, 0x7ff8000000000000, None),
- // Inf = Inf
- (0x7ff0000000000000, 0x7ff0000000000000, Some(Ordering::Equal)),
- ];
-
- for (op1, op2, expected) in data {
- let a1 = DoubleDouble::from_bits(op1);
- let a2 = DoubleDouble::from_bits(op2);
- assert_eq!(expected, a1.partial_cmp(&a2), "compare({:#x}, {:#x})", op1, op2,);
- }
-}
-
-#[test]
-fn ppc_double_double_bitwise_eq() {
- let data = [
- // (1 + 0) = (1 + 0)
- (0x3ff0000000000000, 0x3ff0000000000000, true),
- // (1 + 0) != (1.00...1 + 0)
- (0x3ff0000000000000, 0x3ff0000000000001, false),
- // NaN = NaN
- (0x7ff8000000000000, 0x7ff8000000000000, true),
- // NaN != NaN with a different bit pattern
- (0x7ff8000000000000, 0x3ff0000000000000_7ff8000000000000, false),
- // Inf = Inf
- (0x7ff0000000000000, 0x7ff0000000000000, true),
- ];
-
- for (op1, op2, expected) in data {
- let a1 = DoubleDouble::from_bits(op1);
- let a2 = DoubleDouble::from_bits(op2);
- assert_eq!(expected, a1.bitwise_eq(a2), "{:#x} = {:#x}", op1, op2);
- }
-}
-
-#[test]
-fn ppc_double_double_change_sign() {
- let float = DoubleDouble::from_bits(0xbcb0000000000000_400f000000000000);
- {
- let actual = float.copy_sign("1".parse::<DoubleDouble>().unwrap());
- assert_eq!(0xbcb0000000000000_400f000000000000, actual.to_bits());
- }
- {
- let actual = float.copy_sign("-1".parse::<DoubleDouble>().unwrap());
- assert_eq!(0x3cb0000000000000_c00f000000000000, actual.to_bits());
- }
-}
-
-#[test]
-fn ppc_double_double_factories() {
- assert_eq!(0, DoubleDouble::ZERO.to_bits());
- assert_eq!(0x7c8ffffffffffffe_7fefffffffffffff, DoubleDouble::largest().to_bits());
- assert_eq!(0x0000000000000001, DoubleDouble::SMALLEST.to_bits());
- assert_eq!(0x0360000000000000, DoubleDouble::smallest_normalized().to_bits());
- assert_eq!(0x0000000000000000_8000000000000000, (-DoubleDouble::ZERO).to_bits());
- assert_eq!(0xfc8ffffffffffffe_ffefffffffffffff, (-DoubleDouble::largest()).to_bits());
- assert_eq!(0x0000000000000000_8000000000000001, (-DoubleDouble::SMALLEST).to_bits());
- assert_eq!(
- 0x0000000000000000_8360000000000000,
- (-DoubleDouble::smallest_normalized()).to_bits()
- );
- assert!(DoubleDouble::SMALLEST.is_smallest());
- assert!(DoubleDouble::largest().is_largest());
-}
-
-#[test]
-fn ppc_double_double_is_denormal() {
- assert!(DoubleDouble::SMALLEST.is_denormal());
- assert!(!DoubleDouble::largest().is_denormal());
- assert!(!DoubleDouble::smallest_normalized().is_denormal());
- {
- // (4 + 3) is not normalized
- let data = 0x4008000000000000_4010000000000000;
- assert!(DoubleDouble::from_bits(data).is_denormal());
- }
-}
-
-#[test]
-fn ppc_double_double_exact_inverse() {
- assert!(
- "2.0"
- .parse::<DoubleDouble>()
- .unwrap()
- .get_exact_inverse()
- .unwrap()
- .bitwise_eq("0.5".parse::<DoubleDouble>().unwrap())
- );
-}
-
-#[test]
-fn ppc_double_double_scalbn() {
- // 3.0 + 3.0 << 53
- let input = 0x3cb8000000000000_4008000000000000;
- let result = DoubleDouble::from_bits(input).scalbn(1);
- // 6.0 + 6.0 << 53
- assert_eq!(0x3cc8000000000000_4018000000000000, result.to_bits());
-}
-
-#[test]
-fn ppc_double_double_frexp() {
- // 3.0 + 3.0 << 53
- let input = 0x3cb8000000000000_4008000000000000;
- let mut exp = 0;
- // 0.75 + 0.75 << 53
- let result = DoubleDouble::from_bits(input).frexp(&mut exp);
- assert_eq!(2, exp);
- assert_eq!(0x3c98000000000000_3fe8000000000000, result.to_bits());
-}
diff --git a/compiler/rustc_arena/src/lib.rs b/compiler/rustc_arena/src/lib.rs
index ba47ebd68..e45b7c154 100644
--- a/compiler/rustc_arena/src/lib.rs
+++ b/compiler/rustc_arena/src/lib.rs
@@ -11,6 +11,7 @@
html_root_url = "https://doc.rust-lang.org/nightly/nightly-rustc/",
test(no_crate_inject, attr(deny(warnings)))
)]
+#![feature(core_intrinsics)]
#![feature(dropck_eyepatch)]
#![feature(new_uninit)]
#![feature(maybe_uninit_slice)]
@@ -23,17 +24,18 @@
#![deny(unsafe_op_in_unsafe_fn)]
#![deny(rustc::untranslatable_diagnostic)]
#![deny(rustc::diagnostic_outside_of_impl)]
+#![cfg_attr(not(bootstrap), allow(internal_features))]
#![allow(clippy::mut_from_ref)] // Arena allocators are one of the places where this pattern is fine.
use smallvec::SmallVec;
use std::alloc::Layout;
use std::cell::{Cell, RefCell};
-use std::cmp;
use std::marker::PhantomData;
use std::mem::{self, MaybeUninit};
use std::ptr::{self, NonNull};
use std::slice;
+use std::{cmp, intrinsics};
#[inline(never)]
#[cold]
@@ -362,6 +364,22 @@ unsafe impl<#[may_dangle] T> Drop for TypedArena<T> {
unsafe impl<T: Send> Send for TypedArena<T> {}
+#[inline(always)]
+fn align_down(val: usize, align: usize) -> usize {
+ debug_assert!(align.is_power_of_two());
+ val & !(align - 1)
+}
+
+#[inline(always)]
+fn align_up(val: usize, align: usize) -> usize {
+ debug_assert!(align.is_power_of_two());
+ (val + align - 1) & !(align - 1)
+}
+
+// Pointer alignment is common in compiler types, so keep `DroplessArena` aligned to them
+// to optimize away alignment code.
+const DROPLESS_ALIGNMENT: usize = mem::align_of::<usize>();
+
/// An arena that can hold objects of multiple different types that impl `Copy`
/// and/or satisfy `!mem::needs_drop`.
pub struct DroplessArena {
@@ -374,6 +392,8 @@ pub struct DroplessArena {
/// start. (This is slightly simpler and faster than allocating upwards,
/// see <https://fitzgeraldnick.com/2019/11/01/always-bump-downwards.html>.)
/// When this pointer crosses the start pointer, a new chunk is allocated.
+ ///
+ /// This is kept aligned to DROPLESS_ALIGNMENT.
end: Cell<*mut u8>,
/// A vector of arena chunks.
@@ -394,9 +414,11 @@ impl Default for DroplessArena {
}
impl DroplessArena {
- #[inline(never)]
- #[cold]
- fn grow(&self, additional: usize) {
+ fn grow(&self, layout: Layout) {
+ // Add some padding so we can align `self.end` while
+ // stilling fitting in a `layout` allocation.
+ let additional = layout.size() + cmp::max(DROPLESS_ALIGNMENT, layout.align()) - 1;
+
unsafe {
let mut chunks = self.chunks.borrow_mut();
let mut new_cap;
@@ -415,13 +437,35 @@ impl DroplessArena {
// Also ensure that this chunk can fit `additional`.
new_cap = cmp::max(additional, new_cap);
- let mut chunk = ArenaChunk::new(new_cap);
+ let mut chunk = ArenaChunk::new(align_up(new_cap, PAGE));
self.start.set(chunk.start());
- self.end.set(chunk.end());
+
+ // Align the end to DROPLESS_ALIGNMENT
+ let end = align_down(chunk.end().addr(), DROPLESS_ALIGNMENT);
+
+ // Make sure we don't go past `start`. This should not happen since the allocation
+ // should be at least DROPLESS_ALIGNMENT - 1 bytes.
+ debug_assert!(chunk.start().addr() <= end);
+
+ self.end.set(chunk.end().with_addr(end));
+
chunks.push(chunk);
}
}
+ #[inline(never)]
+ #[cold]
+ fn grow_and_alloc_raw(&self, layout: Layout) -> *mut u8 {
+ self.grow(layout);
+ self.alloc_raw_without_grow(layout).unwrap()
+ }
+
+ #[inline(never)]
+ #[cold]
+ fn grow_and_alloc<T>(&self) -> *mut u8 {
+ self.grow_and_alloc_raw(Layout::new::<T>())
+ }
+
/// Allocates a byte slice with specified layout from the current memory
/// chunk. Returns `None` if there is no free space left to satisfy the
/// request.
@@ -431,12 +475,17 @@ impl DroplessArena {
let old_end = self.end.get();
let end = old_end.addr();
- let align = layout.align();
- let bytes = layout.size();
+ // Align allocated bytes so that `self.end` stays aligned to DROPLESS_ALIGNMENT
+ let bytes = align_up(layout.size(), DROPLESS_ALIGNMENT);
+
+ // Tell LLVM that `end` is aligned to DROPLESS_ALIGNMENT
+ unsafe { intrinsics::assume(end == align_down(end, DROPLESS_ALIGNMENT)) };
- let new_end = end.checked_sub(bytes)? & !(align - 1);
+ let new_end = align_down(end.checked_sub(bytes)?, layout.align());
if start <= new_end {
let new_end = old_end.with_addr(new_end);
+ // `new_end` is aligned to DROPLESS_ALIGNMENT as `align_down` preserves alignment
+ // as both `end` and `bytes` are already aligned to DROPLESS_ALIGNMENT.
self.end.set(new_end);
Some(new_end)
} else {
@@ -447,21 +496,26 @@ impl DroplessArena {
#[inline]
pub fn alloc_raw(&self, layout: Layout) -> *mut u8 {
assert!(layout.size() != 0);
- loop {
- if let Some(a) = self.alloc_raw_without_grow(layout) {
- break a;
- }
- // No free space left. Allocate a new chunk to satisfy the request.
- // On failure the grow will panic or abort.
- self.grow(layout.size());
+ if let Some(a) = self.alloc_raw_without_grow(layout) {
+ return a;
}
+ // No free space left. Allocate a new chunk to satisfy the request.
+ // On failure the grow will panic or abort.
+ self.grow_and_alloc_raw(layout)
}
#[inline]
pub fn alloc<T>(&self, object: T) -> &mut T {
assert!(!mem::needs_drop::<T>());
+ assert!(mem::size_of::<T>() != 0);
- let mem = self.alloc_raw(Layout::for_value::<T>(&object)) as *mut T;
+ let mem = if let Some(a) = self.alloc_raw_without_grow(Layout::for_value::<T>(&object)) {
+ a
+ } else {
+ // No free space left. Allocate a new chunk to satisfy the request.
+ // On failure the grow will panic or abort.
+ self.grow_and_alloc::<T>()
+ } as *mut T;
unsafe {
// Write into uninitialized memory.
diff --git a/compiler/rustc_ast/src/ast.rs b/compiler/rustc_ast/src/ast.rs
index a7198fbf8..58725a08c 100644
--- a/compiler/rustc_ast/src/ast.rs
+++ b/compiler/rustc_ast/src/ast.rs
@@ -14,7 +14,7 @@
//! - [`Generics`], [`GenericParam`], [`WhereClause`]: Metadata associated with generic parameters.
//! - [`EnumDef`] and [`Variant`]: Enum declaration.
//! - [`MetaItemLit`] and [`LitKind`]: Literal expressions.
-//! - [`MacroDef`], [`MacStmtStyle`], [`MacCall`], [`MacDelimiter`]: Macro definition and invocation.
+//! - [`MacroDef`], [`MacStmtStyle`], [`MacCall`]: Macro definition and invocation.
//! - [`Attribute`]: Metadata associated with item.
//! - [`UnOp`], [`BinOp`], and [`BinOpKind`]: Unary and binary operators.
@@ -313,6 +313,16 @@ pub enum TraitBoundModifier {
MaybeConstMaybe,
}
+impl TraitBoundModifier {
+ pub fn to_constness(self) -> Const {
+ match self {
+ // FIXME(effects) span
+ Self::MaybeConst => Const::Yes(DUMMY_SP),
+ _ => Const::No,
+ }
+ }
+}
+
/// The AST represents all type param bounds as types.
/// `typeck::collect::compute_bounds` matches these against
/// the "special" built-in traits (see `middle::lang_items`) and
@@ -1462,7 +1472,8 @@ pub enum ExprKind {
/// Access of a named (e.g., `obj.foo`) or unnamed (e.g., `obj.0`) struct field.
Field(P<Expr>, Ident),
/// An indexing operation (e.g., `foo[2]`).
- Index(P<Expr>, P<Expr>),
+ /// The span represents the span of the `[2]`, including brackets.
+ Index(P<Expr>, P<Expr>, Span),
/// A range (e.g., `1..2`, `1..`, `..2`, `1..=2`, `..=2`; and `..` in destructuring assignment).
Range(Option<P<Expr>>, Option<P<Expr>>, RangeLimits),
/// An underscore, used in destructuring assignment to ignore a value.
@@ -1693,7 +1704,7 @@ where
#[derive(Clone, Encodable, Decodable, Debug)]
pub struct DelimArgs {
pub dspan: DelimSpan,
- pub delim: MacDelimiter,
+ pub delim: Delimiter, // Note: `Delimiter::Invisible` never occurs
pub tokens: TokenStream,
}
@@ -1701,7 +1712,7 @@ impl DelimArgs {
/// Whether a macro with these arguments needs a semicolon
/// when used as a standalone item or statement.
pub fn need_semicolon(&self) -> bool {
- !matches!(self, DelimArgs { delim: MacDelimiter::Brace, .. })
+ !matches!(self, DelimArgs { delim: Delimiter::Brace, .. })
}
}
@@ -1717,32 +1728,6 @@ where
}
}
-#[derive(Copy, Clone, PartialEq, Eq, Encodable, Decodable, Debug, HashStable_Generic)]
-pub enum MacDelimiter {
- Parenthesis,
- Bracket,
- Brace,
-}
-
-impl MacDelimiter {
- pub fn to_token(self) -> Delimiter {
- match self {
- MacDelimiter::Parenthesis => Delimiter::Parenthesis,
- MacDelimiter::Bracket => Delimiter::Bracket,
- MacDelimiter::Brace => Delimiter::Brace,
- }
- }
-
- pub fn from_token(delim: Delimiter) -> Option<MacDelimiter> {
- match delim {
- Delimiter::Parenthesis => Some(MacDelimiter::Parenthesis),
- Delimiter::Bracket => Some(MacDelimiter::Bracket),
- Delimiter::Brace => Some(MacDelimiter::Brace),
- Delimiter::Invisible => None,
- }
- }
-}
-
/// Represents a macro definition.
#[derive(Clone, Encodable, Decodable, Debug, HashStable_Generic)]
pub struct MacroDef {
@@ -2353,7 +2338,12 @@ impl Param {
/// Builds a `Param` object from `ExplicitSelf`.
pub fn from_self(attrs: AttrVec, eself: ExplicitSelf, eself_ident: Ident) -> Param {
let span = eself.span.to(eself_ident.span);
- let infer_ty = P(Ty { id: DUMMY_NODE_ID, kind: TyKind::ImplicitSelf, span, tokens: None });
+ let infer_ty = P(Ty {
+ id: DUMMY_NODE_ID,
+ kind: TyKind::ImplicitSelf,
+ span: eself_ident.span,
+ tokens: None,
+ });
let (mutbl, ty) = match eself.node {
SelfKind::Explicit(ty, mutbl) => (mutbl, ty),
SelfKind::Value(mutbl) => (mutbl, infer_ty),
@@ -2942,6 +2932,7 @@ pub struct StaticItem {
#[derive(Clone, Encodable, Decodable, Debug)]
pub struct ConstItem {
pub defaultness: Defaultness,
+ pub generics: Generics,
pub ty: P<Ty>,
pub expr: Option<P<Expr>>,
}
@@ -3053,6 +3044,7 @@ impl ItemKind {
match self {
Self::Fn(box Fn { generics, .. })
| Self::TyAlias(box TyAlias { generics, .. })
+ | Self::Const(box ConstItem { generics, .. })
| Self::Enum(_, generics)
| Self::Struct(_, generics)
| Self::Union(_, generics)
diff --git a/compiler/rustc_ast/src/attr/mod.rs b/compiler/rustc_ast/src/attr/mod.rs
index 15fe29580..19a2b3017 100644
--- a/compiler/rustc_ast/src/attr/mod.rs
+++ b/compiler/rustc_ast/src/attr/mod.rs
@@ -2,7 +2,7 @@
use crate::ast::{AttrArgs, AttrArgsEq, AttrId, AttrItem, AttrKind, AttrStyle, AttrVec, Attribute};
use crate::ast::{DelimArgs, Expr, ExprKind, LitKind, MetaItemLit};
-use crate::ast::{MacDelimiter, MetaItem, MetaItemKind, NestedMetaItem, NormalAttr};
+use crate::ast::{MetaItem, MetaItemKind, NestedMetaItem, NormalAttr};
use crate::ast::{Path, PathSegment, DUMMY_NODE_ID};
use crate::ptr::P;
use crate::token::{self, CommentKind, Delimiter, Token};
@@ -196,7 +196,7 @@ impl AttrItem {
fn meta_item_list(&self) -> Option<ThinVec<NestedMetaItem>> {
match &self.args {
- AttrArgs::Delimited(args) if args.delim == MacDelimiter::Parenthesis => {
+ AttrArgs::Delimited(args) if args.delim == Delimiter::Parenthesis => {
MetaItemKind::list_from_tokens(args.tokens.clone())
}
AttrArgs::Delimited(_) | AttrArgs::Eq(..) | AttrArgs::Empty => None,
@@ -285,17 +285,17 @@ impl MetaItem {
self.kind.value_str()
}
- fn from_tokens<I>(tokens: &mut iter::Peekable<I>) -> Option<MetaItem>
+ fn from_tokens<'a, I>(tokens: &mut iter::Peekable<I>) -> Option<MetaItem>
where
- I: Iterator<Item = TokenTree>,
+ I: Iterator<Item = &'a TokenTree>,
{
// FIXME: Share code with `parse_path`.
- let path = match tokens.next().map(TokenTree::uninterpolate) {
- Some(TokenTree::Token(
- Token { kind: kind @ (token::Ident(..) | token::ModSep), span },
+ let path = match tokens.next().map(|tt| TokenTree::uninterpolate(tt)).as_deref() {
+ Some(&TokenTree::Token(
+ Token { kind: ref kind @ (token::Ident(..) | token::ModSep), span },
_,
)) => 'arm: {
- let mut segments = if let token::Ident(name, _) = kind {
+ let mut segments = if let &token::Ident(name, _) = kind {
if let Some(TokenTree::Token(Token { kind: token::ModSep, .. }, _)) =
tokens.peek()
{
@@ -308,8 +308,8 @@ impl MetaItem {
thin_vec![PathSegment::path_root(span)]
};
loop {
- if let Some(TokenTree::Token(Token { kind: token::Ident(name, _), span }, _)) =
- tokens.next().map(TokenTree::uninterpolate)
+ if let Some(&TokenTree::Token(Token { kind: token::Ident(name, _), span }, _)) =
+ tokens.next().map(|tt| TokenTree::uninterpolate(tt)).as_deref()
{
segments.push(PathSegment::from_ident(Ident::new(name, span)));
} else {
@@ -326,7 +326,7 @@ impl MetaItem {
let span = span.with_hi(segments.last().unwrap().ident.span.hi());
Path { span, segments, tokens: None }
}
- Some(TokenTree::Token(Token { kind: token::Interpolated(nt), .. }, _)) => match &*nt {
+ Some(TokenTree::Token(Token { kind: token::Interpolated(nt), .. }, _)) => match &**nt {
token::Nonterminal::NtMeta(item) => return item.meta(item.path.span),
token::Nonterminal::NtPath(path) => (**path).clone(),
_ => return None,
@@ -354,7 +354,7 @@ impl MetaItemKind {
}
fn list_from_tokens(tokens: TokenStream) -> Option<ThinVec<NestedMetaItem>> {
- let mut tokens = tokens.into_trees().peekable();
+ let mut tokens = tokens.trees().peekable();
let mut result = ThinVec::new();
while tokens.peek().is_some() {
let item = NestedMetaItem::from_tokens(&mut tokens)?;
@@ -367,12 +367,12 @@ impl MetaItemKind {
Some(result)
}
- fn name_value_from_tokens(
- tokens: &mut impl Iterator<Item = TokenTree>,
+ fn name_value_from_tokens<'a>(
+ tokens: &mut impl Iterator<Item = &'a TokenTree>,
) -> Option<MetaItemKind> {
match tokens.next() {
Some(TokenTree::Delimited(_, Delimiter::Invisible, inner_tokens)) => {
- MetaItemKind::name_value_from_tokens(&mut inner_tokens.into_trees())
+ MetaItemKind::name_value_from_tokens(&mut inner_tokens.trees())
}
Some(TokenTree::Token(token, _)) => {
MetaItemLit::from_token(&token).map(MetaItemKind::NameValue)
@@ -381,8 +381,8 @@ impl MetaItemKind {
}
}
- fn from_tokens(
- tokens: &mut iter::Peekable<impl Iterator<Item = TokenTree>>,
+ fn from_tokens<'a>(
+ tokens: &mut iter::Peekable<impl Iterator<Item = &'a TokenTree>>,
) -> Option<MetaItemKind> {
match tokens.peek() {
Some(TokenTree::Delimited(_, Delimiter::Parenthesis, inner_tokens)) => {
@@ -402,11 +402,9 @@ impl MetaItemKind {
fn from_attr_args(args: &AttrArgs) -> Option<MetaItemKind> {
match args {
AttrArgs::Empty => Some(MetaItemKind::Word),
- AttrArgs::Delimited(DelimArgs {
- dspan: _,
- delim: MacDelimiter::Parenthesis,
- tokens,
- }) => MetaItemKind::list_from_tokens(tokens.clone()).map(MetaItemKind::List),
+ AttrArgs::Delimited(DelimArgs { dspan: _, delim: Delimiter::Parenthesis, tokens }) => {
+ MetaItemKind::list_from_tokens(tokens.clone()).map(MetaItemKind::List)
+ }
AttrArgs::Delimited(..) => None,
AttrArgs::Eq(_, AttrArgsEq::Ast(expr)) => match expr.kind {
ExprKind::Lit(token_lit) => {
@@ -501,9 +499,9 @@ impl NestedMetaItem {
self.meta_item().is_some()
}
- fn from_tokens<I>(tokens: &mut iter::Peekable<I>) -> Option<NestedMetaItem>
+ fn from_tokens<'a, I>(tokens: &mut iter::Peekable<I>) -> Option<NestedMetaItem>
where
- I: Iterator<Item = TokenTree>,
+ I: Iterator<Item = &'a TokenTree>,
{
match tokens.peek() {
Some(TokenTree::Token(token, _))
@@ -513,9 +511,8 @@ impl NestedMetaItem {
return Some(NestedMetaItem::Lit(lit));
}
Some(TokenTree::Delimited(_, Delimiter::Invisible, inner_tokens)) => {
- let inner_tokens = inner_tokens.clone();
tokens.next();
- return NestedMetaItem::from_tokens(&mut inner_tokens.into_trees().peekable());
+ return NestedMetaItem::from_tokens(&mut inner_tokens.trees().peekable());
}
_ => {}
}
@@ -579,7 +576,7 @@ pub fn mk_attr_nested_word(
let path = Path::from_ident(outer_ident);
let attr_args = AttrArgs::Delimited(DelimArgs {
dspan: DelimSpan::from_single(span),
- delim: MacDelimiter::Parenthesis,
+ delim: Delimiter::Parenthesis,
tokens: inner_tokens,
});
mk_attr(g, style, path, attr_args, span)
diff --git a/compiler/rustc_ast/src/expand/allocator.rs b/compiler/rustc_ast/src/expand/allocator.rs
index e87f6e820..f825b10f4 100644
--- a/compiler/rustc_ast/src/expand/allocator.rs
+++ b/compiler/rustc_ast/src/expand/allocator.rs
@@ -33,29 +33,41 @@ pub enum AllocatorTy {
pub struct AllocatorMethod {
pub name: Symbol,
- pub inputs: &'static [AllocatorTy],
+ pub inputs: &'static [AllocatorMethodInput],
pub output: AllocatorTy,
}
+pub struct AllocatorMethodInput {
+ pub name: &'static str,
+ pub ty: AllocatorTy,
+}
+
pub static ALLOCATOR_METHODS: &[AllocatorMethod] = &[
AllocatorMethod {
name: sym::alloc,
- inputs: &[AllocatorTy::Layout],
+ inputs: &[AllocatorMethodInput { name: "layout", ty: AllocatorTy::Layout }],
output: AllocatorTy::ResultPtr,
},
AllocatorMethod {
name: sym::dealloc,
- inputs: &[AllocatorTy::Ptr, AllocatorTy::Layout],
+ inputs: &[
+ AllocatorMethodInput { name: "ptr", ty: AllocatorTy::Ptr },
+ AllocatorMethodInput { name: "layout", ty: AllocatorTy::Layout },
+ ],
output: AllocatorTy::Unit,
},
AllocatorMethod {
name: sym::realloc,
- inputs: &[AllocatorTy::Ptr, AllocatorTy::Layout, AllocatorTy::Usize],
+ inputs: &[
+ AllocatorMethodInput { name: "ptr", ty: AllocatorTy::Ptr },
+ AllocatorMethodInput { name: "layout", ty: AllocatorTy::Layout },
+ AllocatorMethodInput { name: "new_size", ty: AllocatorTy::Usize },
+ ],
output: AllocatorTy::ResultPtr,
},
AllocatorMethod {
name: sym::alloc_zeroed,
- inputs: &[AllocatorTy::Layout],
+ inputs: &[AllocatorMethodInput { name: "layout", ty: AllocatorTy::Layout }],
output: AllocatorTy::ResultPtr,
},
];
diff --git a/compiler/rustc_ast/src/format.rs b/compiler/rustc_ast/src/format.rs
index 699946f30..805596ff0 100644
--- a/compiler/rustc_ast/src/format.rs
+++ b/compiler/rustc_ast/src/format.rs
@@ -67,12 +67,6 @@ pub struct FormatArguments {
names: FxHashMap<Symbol, usize>,
}
-// FIXME: Rustdoc has trouble proving Send/Sync for this. See #106930.
-#[cfg(parallel_compiler)]
-unsafe impl Sync for FormatArguments {}
-#[cfg(parallel_compiler)]
-unsafe impl Send for FormatArguments {}
-
impl FormatArguments {
pub fn new() -> Self {
Self {
diff --git a/compiler/rustc_ast/src/mut_visit.rs b/compiler/rustc_ast/src/mut_visit.rs
index 53a9c9a04..48e9b180b 100644
--- a/compiler/rustc_ast/src/mut_visit.rs
+++ b/compiler/rustc_ast/src/mut_visit.rs
@@ -13,6 +13,7 @@ use crate::tokenstream::*;
use crate::{ast::*, StaticItem};
use rustc_data_structures::flat_map_in_place::FlatMapInPlace;
+use rustc_data_structures::stack::ensure_sufficient_stack;
use rustc_data_structures::sync::Lrc;
use rustc_span::source_map::Spanned;
use rustc_span::symbol::Ident;
@@ -1149,10 +1150,11 @@ pub fn noop_flat_map_assoc_item<T: MutVisitor>(
}
fn visit_const_item<T: MutVisitor>(
- ConstItem { defaultness, ty, expr }: &mut ConstItem,
+ ConstItem { defaultness, generics, ty, expr }: &mut ConstItem,
visitor: &mut T,
) {
visit_defaultness(defaultness, visitor);
+ visitor.visit_generics(generics);
visitor.visit_ty(ty);
visit_opt(expr, |expr| visitor.visit_expr(expr));
}
@@ -1368,7 +1370,7 @@ pub fn noop_visit_expr<T: MutVisitor>(
ExprKind::If(cond, tr, fl) => {
vis.visit_expr(cond);
vis.visit_block(tr);
- visit_opt(fl, |fl| vis.visit_expr(fl));
+ visit_opt(fl, |fl| ensure_sufficient_stack(|| vis.visit_expr(fl)));
}
ExprKind::While(cond, body, label) => {
vis.visit_expr(cond);
@@ -1399,7 +1401,7 @@ pub fn noop_visit_expr<T: MutVisitor>(
fn_decl,
body,
fn_decl_span,
- fn_arg_span: _,
+ fn_arg_span,
}) => {
vis.visit_closure_binder(binder);
visit_constness(constness, vis);
@@ -1407,6 +1409,7 @@ pub fn noop_visit_expr<T: MutVisitor>(
vis.visit_fn_decl(fn_decl);
vis.visit_expr(body);
vis.visit_span(fn_decl_span);
+ vis.visit_span(fn_arg_span);
}
ExprKind::Block(blk, label) => {
vis.visit_block(blk);
@@ -1419,9 +1422,10 @@ pub fn noop_visit_expr<T: MutVisitor>(
vis.visit_expr(expr);
vis.visit_span(await_kw_span);
}
- ExprKind::Assign(el, er, _) => {
+ ExprKind::Assign(el, er, span) => {
vis.visit_expr(el);
vis.visit_expr(er);
+ vis.visit_span(span);
}
ExprKind::AssignOp(_op, el, er) => {
vis.visit_expr(el);
@@ -1431,9 +1435,10 @@ pub fn noop_visit_expr<T: MutVisitor>(
vis.visit_expr(el);
vis.visit_ident(ident);
}
- ExprKind::Index(el, er) => {
+ ExprKind::Index(el, er, brackets_span) => {
vis.visit_expr(el);
vis.visit_expr(er);
+ vis.visit_span(brackets_span);
}
ExprKind::Range(e1, e2, _lim) => {
visit_opt(e1, |e1| vis.visit_expr(e1));
diff --git a/compiler/rustc_ast/src/token.rs b/compiler/rustc_ast/src/token.rs
index 6646fa944..f4ad0efa4 100644
--- a/compiler/rustc_ast/src/token.rs
+++ b/compiler/rustc_ast/src/token.rs
@@ -11,7 +11,7 @@ use rustc_data_structures::stable_hasher::{HashStable, StableHasher};
use rustc_data_structures::sync::Lrc;
use rustc_macros::HashStable_Generic;
use rustc_span::symbol::{kw, sym};
-#[cfg_attr(not(bootstrap), allow(hidden_glob_reexports))]
+#[allow(hidden_glob_reexports)]
use rustc_span::symbol::{Ident, Symbol};
use rustc_span::{self, edition::Edition, Span, DUMMY_SP};
use std::borrow::Cow;
@@ -41,8 +41,6 @@ pub enum BinOpToken {
/// Describes how a sequence of token trees is delimited.
/// Cannot use `proc_macro::Delimiter` directly because this
/// structure should implement some additional traits.
-/// The `None` variant is also renamed to `Invisible` to be
-/// less confusing and better convey the semantics.
#[derive(Copy, Clone, Debug, PartialEq, Eq)]
#[derive(Encodable, Decodable, Hash, HashStable_Generic)]
pub enum Delimiter {
@@ -226,7 +224,9 @@ fn ident_can_begin_type(name: Symbol, span: Span, is_raw: bool) -> bool {
.contains(&name)
}
-#[derive(Clone, PartialEq, Encodable, Decodable, Debug, HashStable_Generic)]
+// SAFETY: due to the `Clone` impl below, all fields of all variants other than
+// `Interpolated` must impl `Copy`.
+#[derive(PartialEq, Encodable, Decodable, Debug, HashStable_Generic)]
pub enum TokenKind {
/* Expression-operator symbols. */
Eq,
@@ -299,6 +299,19 @@ pub enum TokenKind {
Eof,
}
+impl Clone for TokenKind {
+ fn clone(&self) -> Self {
+ // `TokenKind` would impl `Copy` if it weren't for `Interpolated`. So
+ // for all other variants, this implementation of `clone` is just like
+ // a copy. This is faster than the `derive(Clone)` version which has a
+ // separate path for every variant.
+ match self {
+ Interpolated(nt) => Interpolated(nt.clone()),
+ _ => unsafe { std::ptr::read(self) },
+ }
+ }
+}
+
#[derive(Clone, PartialEq, Encodable, Decodable, Debug, HashStable_Generic)]
pub struct Token {
pub kind: TokenKind,
diff --git a/compiler/rustc_ast/src/tokenstream.rs b/compiler/rustc_ast/src/tokenstream.rs
index ca4a739ab..e9591c7c8 100644
--- a/compiler/rustc_ast/src/tokenstream.rs
+++ b/compiler/rustc_ast/src/tokenstream.rs
@@ -13,7 +13,7 @@
//! and a borrowed `TokenStream` is sufficient to build an owned `TokenStream` without taking
//! ownership of the original.
-use crate::ast::StmtKind;
+use crate::ast::{AttrStyle, StmtKind};
use crate::ast_traits::{HasAttrs, HasSpan, HasTokens};
use crate::token::{self, Delimiter, Nonterminal, Token, TokenKind};
use crate::AttrVec;
@@ -22,10 +22,11 @@ use rustc_data_structures::stable_hasher::{HashStable, StableHasher};
use rustc_data_structures::sync::{self, Lrc};
use rustc_macros::HashStable_Generic;
use rustc_serialize::{Decodable, Decoder, Encodable, Encoder};
-use rustc_span::{Span, DUMMY_SP};
+use rustc_span::{sym, Span, Symbol, DUMMY_SP};
use smallvec::{smallvec, SmallVec};
-use std::{fmt, iter, mem};
+use std::borrow::Cow;
+use std::{cmp, fmt, iter, mem};
/// When the main Rust parser encounters a syntax-extension invocation, it
/// parses the arguments to the invocation as a token tree. This is a very
@@ -98,12 +99,13 @@ impl TokenTree {
TokenTree::Token(Token::new(kind, span), Spacing::Joint)
}
- pub fn uninterpolate(self) -> TokenTree {
+ pub fn uninterpolate(&self) -> Cow<'_, TokenTree> {
match self {
- TokenTree::Token(token, spacing) => {
- TokenTree::Token(token.uninterpolate().into_owned(), spacing)
- }
- tt => tt,
+ TokenTree::Token(token, spacing) => match token.uninterpolate() {
+ Cow::Owned(token) => Cow::Owned(TokenTree::Token(token, *spacing)),
+ Cow::Borrowed(_) => Cow::Borrowed(self),
+ },
+ _ => Cow::Borrowed(self),
}
}
}
@@ -564,6 +566,92 @@ impl TokenStream {
pub fn chunks(&self, chunk_size: usize) -> core::slice::Chunks<'_, TokenTree> {
self.0.chunks(chunk_size)
}
+
+ /// Desugar doc comments like `/// foo` in the stream into `#[doc =
+ /// r"foo"]`. Modifies the `TokenStream` via `Lrc::make_mut`, but as little
+ /// as possible.
+ pub fn desugar_doc_comments(&mut self) {
+ if let Some(desugared_stream) = desugar_inner(self.clone()) {
+ *self = desugared_stream;
+ }
+
+ // The return value is `None` if nothing in `stream` changed.
+ fn desugar_inner(mut stream: TokenStream) -> Option<TokenStream> {
+ let mut i = 0;
+ let mut modified = false;
+ while let Some(tt) = stream.0.get(i) {
+ match tt {
+ &TokenTree::Token(
+ Token { kind: token::DocComment(_, attr_style, data), span },
+ _spacing,
+ ) => {
+ let desugared = desugared_tts(attr_style, data, span);
+ let desugared_len = desugared.len();
+ Lrc::make_mut(&mut stream.0).splice(i..i + 1, desugared);
+ modified = true;
+ i += desugared_len;
+ }
+
+ &TokenTree::Token(..) => i += 1,
+
+ &TokenTree::Delimited(sp, delim, ref delim_stream) => {
+ if let Some(desugared_delim_stream) = desugar_inner(delim_stream.clone()) {
+ let new_tt = TokenTree::Delimited(sp, delim, desugared_delim_stream);
+ Lrc::make_mut(&mut stream.0)[i] = new_tt;
+ modified = true;
+ }
+ i += 1;
+ }
+ }
+ }
+ if modified { Some(stream) } else { None }
+ }
+
+ fn desugared_tts(attr_style: AttrStyle, data: Symbol, span: Span) -> Vec<TokenTree> {
+ // Searches for the occurrences of `"#*` and returns the minimum number of `#`s
+ // required to wrap the text. E.g.
+ // - `abc d` is wrapped as `r"abc d"` (num_of_hashes = 0)
+ // - `abc "d"` is wrapped as `r#"abc "d""#` (num_of_hashes = 1)
+ // - `abc "##d##"` is wrapped as `r###"abc ##"d"##"###` (num_of_hashes = 3)
+ let mut num_of_hashes = 0;
+ let mut count = 0;
+ for ch in data.as_str().chars() {
+ count = match ch {
+ '"' => 1,
+ '#' if count > 0 => count + 1,
+ _ => 0,
+ };
+ num_of_hashes = cmp::max(num_of_hashes, count);
+ }
+
+ // `/// foo` becomes `doc = r"foo"`.
+ let delim_span = DelimSpan::from_single(span);
+ let body = TokenTree::Delimited(
+ delim_span,
+ Delimiter::Bracket,
+ [
+ TokenTree::token_alone(token::Ident(sym::doc, false), span),
+ TokenTree::token_alone(token::Eq, span),
+ TokenTree::token_alone(
+ TokenKind::lit(token::StrRaw(num_of_hashes), data, None),
+ span,
+ ),
+ ]
+ .into_iter()
+ .collect::<TokenStream>(),
+ );
+
+ if attr_style == AttrStyle::Inner {
+ vec![
+ TokenTree::token_alone(token::Pound, span),
+ TokenTree::token_alone(token::Not, span),
+ body,
+ ]
+ } else {
+ vec![TokenTree::token_alone(token::Pound, span), body]
+ }
+ }
+ }
}
/// By-reference iterator over a [`TokenStream`], that produces `&TokenTree`
@@ -595,26 +683,21 @@ impl<'t> Iterator for RefTokenTreeCursor<'t> {
}
}
-/// Owning by-value iterator over a [`TokenStream`], that produces `TokenTree`
+/// Owning by-value iterator over a [`TokenStream`], that produces `&TokenTree`
/// items.
-// FIXME: Many uses of this can be replaced with by-reference iterator to avoid clones.
+///
+/// Doesn't impl `Iterator` because Rust doesn't permit an owning iterator to
+/// return `&T` from `next`; the need for an explicit lifetime in the `Item`
+/// associated type gets in the way. Instead, use `next_ref` (which doesn't
+/// involve associated types) for getting individual elements, or
+/// `RefTokenTreeCursor` if you really want an `Iterator`, e.g. in a `for`
+/// loop.
#[derive(Clone)]
pub struct TokenTreeCursor {
pub stream: TokenStream,
index: usize,
}
-impl Iterator for TokenTreeCursor {
- type Item = TokenTree;
-
- fn next(&mut self) -> Option<TokenTree> {
- self.stream.0.get(self.index).map(|tree| {
- self.index += 1;
- tree.clone()
- })
- }
-}
-
impl TokenTreeCursor {
fn new(stream: TokenStream) -> Self {
TokenTreeCursor { stream, index: 0 }
@@ -631,15 +714,6 @@ impl TokenTreeCursor {
pub fn look_ahead(&self, n: usize) -> Option<&TokenTree> {
self.stream.0.get(self.index + n)
}
-
- // Replace the previously obtained token tree with `tts`, and rewind to
- // just before them.
- pub fn replace_prev_and_rewind(&mut self, tts: Vec<TokenTree>) {
- assert!(self.index > 0);
- self.index -= 1;
- let stream = Lrc::make_mut(&mut self.stream.0);
- stream.splice(self.index..self.index + 1, tts);
- }
}
#[derive(Debug, Copy, Clone, PartialEq, Encodable, Decodable, HashStable_Generic)]
diff --git a/compiler/rustc_ast/src/util/comments.rs b/compiler/rustc_ast/src/util/comments.rs
index eece99a3e..bdf5143b0 100644
--- a/compiler/rustc_ast/src/util/comments.rs
+++ b/compiler/rustc_ast/src/util/comments.rs
@@ -62,7 +62,7 @@ pub fn beautify_doc_string(data: Symbol, kind: CommentKind) -> Symbol {
CommentKind::Block => {
// Whatever happens, we skip the first line.
let mut i = lines
- .get(0)
+ .first()
.map(|l| if l.trim_start().starts_with('*') { 0 } else { 1 })
.unwrap_or(0);
let mut j = lines.len();
diff --git a/compiler/rustc_ast/src/util/parser.rs b/compiler/rustc_ast/src/util/parser.rs
index 096077e09..d3e43e202 100644
--- a/compiler/rustc_ast/src/util/parser.rs
+++ b/compiler/rustc_ast/src/util/parser.rs
@@ -390,7 +390,7 @@ pub fn contains_exterior_struct_lit(value: &ast::Expr) -> bool {
| ast::ExprKind::Cast(x, _)
| ast::ExprKind::Type(x, _)
| ast::ExprKind::Field(x, _)
- | ast::ExprKind::Index(x, _) => {
+ | ast::ExprKind::Index(x, _, _) => {
// &X { y: 1 }, X { y: 1 }.y
contains_exterior_struct_lit(x)
}
diff --git a/compiler/rustc_ast/src/visit.rs b/compiler/rustc_ast/src/visit.rs
index d9de5b8e1..6d474de2d 100644
--- a/compiler/rustc_ast/src/visit.rs
+++ b/compiler/rustc_ast/src/visit.rs
@@ -308,8 +308,12 @@ pub fn walk_item<'a, V: Visitor<'a>>(visitor: &mut V, item: &'a Item) {
match &item.kind {
ItemKind::ExternCrate(_) => {}
ItemKind::Use(use_tree) => visitor.visit_use_tree(use_tree, item.id, false),
- ItemKind::Static(box StaticItem { ty, mutability: _, expr })
- | ItemKind::Const(box ConstItem { ty, expr, .. }) => {
+ ItemKind::Static(box StaticItem { ty, mutability: _, expr }) => {
+ visitor.visit_ty(ty);
+ walk_list!(visitor, visit_expr, expr);
+ }
+ ItemKind::Const(box ConstItem { defaultness: _, generics, ty, expr }) => {
+ visitor.visit_generics(generics);
visitor.visit_ty(ty);
walk_list!(visitor, visit_expr, expr);
}
@@ -677,7 +681,8 @@ pub fn walk_assoc_item<'a, V: Visitor<'a>>(visitor: &mut V, item: &'a AssocItem,
visitor.visit_ident(ident);
walk_list!(visitor, visit_attribute, attrs);
match kind {
- AssocItemKind::Const(box ConstItem { ty, expr, .. }) => {
+ AssocItemKind::Const(box ConstItem { defaultness: _, generics, ty, expr }) => {
+ visitor.visit_generics(generics);
visitor.visit_ty(ty);
walk_list!(visitor, visit_expr, expr);
}
@@ -880,7 +885,7 @@ pub fn walk_expr<'a, V: Visitor<'a>>(visitor: &mut V, expression: &'a Expr) {
visitor.visit_expr(subexpression);
visitor.visit_ident(*ident);
}
- ExprKind::Index(main_expression, index_expression) => {
+ ExprKind::Index(main_expression, index_expression, _) => {
visitor.visit_expr(main_expression);
visitor.visit_expr(index_expression)
}
diff --git a/compiler/rustc_ast_lowering/src/asm.rs b/compiler/rustc_ast_lowering/src/asm.rs
index d350498bc..a1e626996 100644
--- a/compiler/rustc_ast_lowering/src/asm.rs
+++ b/compiler/rustc_ast_lowering/src/asm.rs
@@ -207,6 +207,7 @@ impl<'a, 'hir> LoweringContext<'a, 'hir> {
&sym.path,
ParamMode::Optional,
&ImplTraitContext::Disallowed(ImplTraitPosition::Path),
+ None,
);
hir::InlineAsmOperand::SymStatic { path, def_id }
} else {
@@ -352,7 +353,8 @@ impl<'a, 'hir> LoweringContext<'a, 'hir> {
let idx2 = *o.get();
let (ref op2, op_sp2) = operands[idx2];
- let Some(asm::InlineAsmRegOrRegClass::Reg(reg2)) = op2.reg() else {
+ let Some(asm::InlineAsmRegOrRegClass::Reg(reg2)) = op2.reg()
+ else {
unreachable!();
};
@@ -368,7 +370,7 @@ impl<'a, 'hir> LoweringContext<'a, 'hir> {
assert!(!*late);
let out_op_sp = if input { op_sp2 } else { op_sp };
Some(out_op_sp)
- },
+ }
_ => None,
};
@@ -377,7 +379,7 @@ impl<'a, 'hir> LoweringContext<'a, 'hir> {
op_span2: op_sp2,
reg1_name: reg.name(),
reg2_name: reg2.name(),
- in_out
+ in_out,
});
}
Entry::Vacant(v) => {
diff --git a/compiler/rustc_ast_lowering/src/errors.rs b/compiler/rustc_ast_lowering/src/errors.rs
index 72dc52a63..a63bd4f8a 100644
--- a/compiler/rustc_ast_lowering/src/errors.rs
+++ b/compiler/rustc_ast_lowering/src/errors.rs
@@ -31,9 +31,26 @@ pub struct InvalidAbi {
pub abi: Symbol,
pub command: String,
#[subdiagnostic]
+ pub explain: Option<InvalidAbiReason>,
+ #[subdiagnostic]
pub suggestion: Option<InvalidAbiSuggestion>,
}
+pub struct InvalidAbiReason(pub &'static str);
+
+impl rustc_errors::AddToDiagnostic for InvalidAbiReason {
+ fn add_to_diagnostic_with<F>(self, diag: &mut rustc_errors::Diagnostic, _: F)
+ where
+ F: Fn(
+ &mut rustc_errors::Diagnostic,
+ rustc_errors::SubdiagnosticMessage,
+ ) -> rustc_errors::SubdiagnosticMessage,
+ {
+ #[allow(rustc::untranslatable_diagnostic)]
+ diag.note(self.0);
+ }
+}
+
#[derive(Subdiagnostic)]
#[suggestion(
ast_lowering_invalid_abi_suggestion,
diff --git a/compiler/rustc_ast_lowering/src/expr.rs b/compiler/rustc_ast_lowering/src/expr.rs
index dcaaaafed..7408b4fb0 100644
--- a/compiler/rustc_ast_lowering/src/expr.rs
+++ b/compiler/rustc_ast_lowering/src/expr.rs
@@ -100,6 +100,7 @@ impl<'hir> LoweringContext<'_, 'hir> {
ParamMode::Optional,
ParenthesizedGenericArgs::Err,
&ImplTraitContext::Disallowed(ImplTraitPosition::Path),
+ None,
));
let receiver = self.lower_expr(receiver);
let args =
@@ -240,8 +241,8 @@ impl<'hir> LoweringContext<'_, 'hir> {
ExprKind::Field(el, ident) => {
hir::ExprKind::Field(self.lower_expr(el), self.lower_ident(*ident))
}
- ExprKind::Index(el, er) => {
- hir::ExprKind::Index(self.lower_expr(el), self.lower_expr(er))
+ ExprKind::Index(el, er, brackets_span) => {
+ hir::ExprKind::Index(self.lower_expr(el), self.lower_expr(er), *brackets_span)
}
ExprKind::Range(Some(e1), Some(e2), RangeLimits::Closed) => {
self.lower_expr_range_closed(e.span, e1, e2)
@@ -260,6 +261,7 @@ impl<'hir> LoweringContext<'_, 'hir> {
path,
ParamMode::Optional,
&ImplTraitContext::Disallowed(ImplTraitPosition::Path),
+ None,
);
hir::ExprKind::Path(qpath)
}
@@ -286,7 +288,7 @@ impl<'hir> LoweringContext<'_, 'hir> {
ExprKind::OffsetOf(container, fields) => hir::ExprKind::OffsetOf(
self.lower_ty(
container,
- &mut ImplTraitContext::Disallowed(ImplTraitPosition::OffsetOf),
+ &ImplTraitContext::Disallowed(ImplTraitPosition::OffsetOf),
),
self.arena.alloc_from_iter(fields.iter().map(|&ident| self.lower_ident(ident))),
),
@@ -307,6 +309,7 @@ impl<'hir> LoweringContext<'_, 'hir> {
&se.path,
ParamMode::Optional,
&ImplTraitContext::Disallowed(ImplTraitPosition::Path),
+ None,
)),
self.arena
.alloc_from_iter(se.fields.iter().map(|x| self.lower_expr_field(x))),
@@ -657,14 +660,14 @@ impl<'hir> LoweringContext<'_, 'hir> {
}
/// Forwards a possible `#[track_caller]` annotation from `outer_hir_id` to
- /// `inner_hir_id` in case the `closure_track_caller` feature is enabled.
+ /// `inner_hir_id` in case the `async_fn_track_caller` feature is enabled.
pub(super) fn maybe_forward_track_caller(
&mut self,
span: Span,
outer_hir_id: hir::HirId,
inner_hir_id: hir::HirId,
) {
- if self.tcx.features().closure_track_caller
+ if self.tcx.features().async_fn_track_caller
&& let Some(attrs) = self.attrs.get(&outer_hir_id.local_id)
&& attrs.into_iter().any(|attr| attr.has_name(sym::track_caller))
{
@@ -1179,6 +1182,7 @@ impl<'hir> LoweringContext<'_, 'hir> {
path,
ParamMode::Optional,
&ImplTraitContext::Disallowed(ImplTraitPosition::Path),
+ None,
);
// Destructure like a tuple struct.
let tuple_struct_pat = hir::PatKind::TupleStruct(
@@ -1198,6 +1202,7 @@ impl<'hir> LoweringContext<'_, 'hir> {
path,
ParamMode::Optional,
&ImplTraitContext::Disallowed(ImplTraitPosition::Path),
+ None,
);
// Destructure like a unit struct.
let unit_struct_pat = hir::PatKind::Path(qpath);
@@ -1222,6 +1227,7 @@ impl<'hir> LoweringContext<'_, 'hir> {
&se.path,
ParamMode::Optional,
&ImplTraitContext::Disallowed(ImplTraitPosition::Path),
+ None,
);
let fields_omitted = match &se.rest {
StructRest::Base(e) => {
@@ -1642,7 +1648,7 @@ impl<'hir> LoweringContext<'_, 'hir> {
hir::ExprKind::Match(
scrutinee,
arena_vec![self; break_arm, continue_arm],
- hir::MatchSource::TryDesugar,
+ hir::MatchSource::TryDesugar(scrutinee.hir_id),
)
}
diff --git a/compiler/rustc_ast_lowering/src/item.rs b/compiler/rustc_ast_lowering/src/item.rs
index ab68436c0..a59c83de0 100644
--- a/compiler/rustc_ast_lowering/src/item.rs
+++ b/compiler/rustc_ast_lowering/src/item.rs
@@ -1,4 +1,4 @@
-use super::errors::{InvalidAbi, InvalidAbiSuggestion, MisplacedRelaxTraitBound};
+use super::errors::{InvalidAbi, InvalidAbiReason, InvalidAbiSuggestion, MisplacedRelaxTraitBound};
use super::ResolverAstLoweringExt;
use super::{AstOwner, ImplTraitContext, ImplTraitPosition};
use super::{FnDeclKind, LoweringContext, ParamMode};
@@ -56,6 +56,11 @@ impl<'a, 'hir> ItemLowerer<'a, 'hir> {
owner: NodeId,
f: impl FnOnce(&mut LoweringContext<'_, 'hir>) -> hir::OwnerNode<'hir>,
) {
+ let allow_gen_future = Some(if self.tcx.features().async_fn_track_caller {
+ [sym::gen_future, sym::closure_track_caller][..].into()
+ } else {
+ [sym::gen_future][..].into()
+ });
let mut lctx = LoweringContext {
// Pseudo-globals.
tcx: self.tcx,
@@ -83,8 +88,9 @@ impl<'a, 'hir> ItemLowerer<'a, 'hir> {
impl_trait_defs: Vec::new(),
impl_trait_bounds: Vec::new(),
allow_try_trait: Some([sym::try_trait_v2, sym::yeet_desugar_details][..].into()),
- allow_gen_future: Some([sym::gen_future, sym::closure_track_caller][..].into()),
+ allow_gen_future,
generics_def_id_map: Default::default(),
+ host_param_id: None,
};
lctx.with_hir_id_owner(owner, |lctx| f(lctx));
@@ -139,8 +145,24 @@ impl<'a, 'hir> ItemLowerer<'a, 'hir> {
// This is used to track which lifetimes have already been defined,
// and which need to be replicated when lowering an async fn.
- if let hir::ItemKind::Impl(impl_) = parent_hir.node().expect_item().kind {
- lctx.is_in_trait_impl = impl_.of_trait.is_some();
+ match parent_hir.node().expect_item().kind {
+ hir::ItemKind::Impl(impl_) => {
+ lctx.is_in_trait_impl = impl_.of_trait.is_some();
+ }
+ hir::ItemKind::Trait(_, _, generics, _, _) if lctx.tcx.features().effects => {
+ lctx.host_param_id = generics
+ .params
+ .iter()
+ .find(|param| {
+ parent_hir
+ .attrs
+ .get(param.hir_id.local_id)
+ .iter()
+ .any(|attr| attr.has_name(sym::rustc_host))
+ })
+ .map(|param| param.def_id);
+ }
+ _ => {}
}
match ctxt {
@@ -231,9 +253,15 @@ impl<'hir> LoweringContext<'_, 'hir> {
let (ty, body_id) = self.lower_const_item(t, span, e.as_deref());
hir::ItemKind::Static(ty, *m, body_id)
}
- ItemKind::Const(box ast::ConstItem { ty, expr, .. }) => {
- let (ty, body_id) = self.lower_const_item(ty, span, expr.as_deref());
- hir::ItemKind::Const(ty, body_id)
+ ItemKind::Const(box ast::ConstItem { generics, ty, expr, .. }) => {
+ let (generics, (ty, body_id)) = self.lower_generics(
+ generics,
+ Const::No,
+ id,
+ &ImplTraitContext::Disallowed(ImplTraitPosition::Generic),
+ |this| this.lower_const_item(ty, span, expr.as_deref()),
+ );
+ hir::ItemKind::Const(ty, generics, body_id)
}
ItemKind::Fn(box Fn {
sig: FnSig { decl, header, span: fn_sig_span },
@@ -378,6 +406,7 @@ impl<'hir> LoweringContext<'_, 'hir> {
self.lower_generics(ast_generics, *constness, id, &itctx, |this| {
let trait_ref = trait_ref.as_ref().map(|trait_ref| {
this.lower_trait_ref(
+ *constness,
trait_ref,
&ImplTraitContext::Disallowed(ImplTraitPosition::Trait),
)
@@ -408,7 +437,6 @@ impl<'hir> LoweringContext<'_, 'hir> {
polarity,
defaultness,
defaultness_span,
- constness: self.lower_constness(*constness),
generics,
of_trait: trait_ref,
self_ty: lowered_ty,
@@ -551,17 +579,6 @@ impl<'hir> LoweringContext<'_, 'hir> {
for &(ref use_tree, id) in trees {
let new_hir_id = self.local_def_id(id);
- let mut prefix = prefix.clone();
-
- // Give the segments new node-ids since they are being cloned.
- for seg in &mut prefix.segments {
- // Give the cloned segment the same resolution information
- // as the old one (this is needed for stability checking).
- let new_id = self.next_node_id();
- self.resolver.clone_res(seg.id, new_id);
- seg.id = new_id;
- }
-
// Each `use` import is an item and thus are owners of the
// names in the path. Up to this point the nested import is
// the current owner, since we want each desugared import to
@@ -570,6 +587,9 @@ impl<'hir> LoweringContext<'_, 'hir> {
self.with_hir_id_owner(id, |this| {
let mut ident = *ident;
+ // `prefix` is lowered multiple times, but in different HIR owners.
+ // So each segment gets renewed `HirId` with the same
+ // `ItemLocalId` and the new owner. (See `lower_node_id`)
let kind =
this.lower_use_tree(use_tree, &prefix, id, vis_span, &mut ident, attrs);
if let Some(attrs) = attrs {
@@ -723,11 +743,23 @@ impl<'hir> LoweringContext<'_, 'hir> {
let trait_item_def_id = hir_id.expect_owner();
let (generics, kind, has_default) = match &i.kind {
- AssocItemKind::Const(box ConstItem { ty, expr, .. }) => {
- let ty =
- self.lower_ty(ty, &ImplTraitContext::Disallowed(ImplTraitPosition::ConstTy));
- let body = expr.as_ref().map(|x| self.lower_const_body(i.span, Some(x)));
- (hir::Generics::empty(), hir::TraitItemKind::Const(ty, body), body.is_some())
+ AssocItemKind::Const(box ConstItem { generics, ty, expr, .. }) => {
+ let (generics, kind) = self.lower_generics(
+ &generics,
+ Const::No,
+ i.id,
+ &ImplTraitContext::Disallowed(ImplTraitPosition::Generic),
+ |this| {
+ let ty = this.lower_ty(
+ ty,
+ &ImplTraitContext::Disallowed(ImplTraitPosition::ConstTy),
+ );
+ let body = expr.as_ref().map(|x| this.lower_const_body(i.span, Some(x)));
+
+ hir::TraitItemKind::Const(ty, body)
+ },
+ );
+ (generics, kind, expr.is_some())
}
AssocItemKind::Fn(box Fn { sig, generics, body: None, .. }) => {
let asyncness = sig.header.asyncness;
@@ -825,14 +857,19 @@ impl<'hir> LoweringContext<'_, 'hir> {
self.lower_attrs(hir_id, &i.attrs);
let (generics, kind) = match &i.kind {
- AssocItemKind::Const(box ConstItem { ty, expr, .. }) => {
- let ty =
- self.lower_ty(ty, &ImplTraitContext::Disallowed(ImplTraitPosition::ConstTy));
- (
- hir::Generics::empty(),
- hir::ImplItemKind::Const(ty, self.lower_const_body(i.span, expr.as_deref())),
- )
- }
+ AssocItemKind::Const(box ConstItem { generics, ty, expr, .. }) => self.lower_generics(
+ &generics,
+ Const::No,
+ i.id,
+ &ImplTraitContext::Disallowed(ImplTraitPosition::Generic),
+ |this| {
+ let ty = this
+ .lower_ty(ty, &ImplTraitContext::Disallowed(ImplTraitPosition::ConstTy));
+ let body = this.lower_const_body(i.span, expr.as_deref());
+
+ hir::ImplItemKind::Const(ty, body)
+ },
+ ),
AssocItemKind::Fn(box Fn { sig, generics, body, .. }) => {
self.current_item = Some(i.span);
let asyncness = sig.header.asyncness;
@@ -1234,8 +1271,8 @@ impl<'hir> LoweringContext<'_, 'hir> {
}
pub(super) fn lower_abi(&mut self, abi: StrLit) -> abi::Abi {
- abi::lookup(abi.symbol_unescaped.as_str()).unwrap_or_else(|| {
- self.error_on_invalid_abi(abi);
+ abi::lookup(abi.symbol_unescaped.as_str()).unwrap_or_else(|err| {
+ self.error_on_invalid_abi(abi, err);
abi::Abi::Rust
})
}
@@ -1248,7 +1285,7 @@ impl<'hir> LoweringContext<'_, 'hir> {
}
}
- fn error_on_invalid_abi(&self, abi: StrLit) {
+ fn error_on_invalid_abi(&self, abi: StrLit, err: abi::AbiUnsupported) {
let abi_names = abi::enabled_names(self.tcx.features(), abi.span)
.iter()
.map(|s| Symbol::intern(s))
@@ -1257,6 +1294,10 @@ impl<'hir> LoweringContext<'_, 'hir> {
self.tcx.sess.emit_err(InvalidAbi {
abi: abi.symbol_unescaped,
span: abi.span,
+ explain: match err {
+ abi::AbiUnsupported::Reason { explain } => Some(InvalidAbiReason(explain)),
+ _ => None,
+ },
suggestion: suggested_name.map(|suggested_name| InvalidAbiSuggestion {
span: abi.span,
suggestion: format!("\"{suggested_name}\""),
@@ -1343,6 +1384,29 @@ impl<'hir> LoweringContext<'_, 'hir> {
}
}
+ // Desugar `~const` bound in generics into an additional `const host: bool` param
+ // if the effects feature is enabled. This needs to be done before we lower where
+ // clauses since where clauses need to bind to the DefId of the host param
+ let host_param_parts = if let Const::Yes(span) = constness && self.tcx.features().effects {
+ if let Some(param) = generics.params.iter().find(|x| {
+ x.attrs.iter().any(|x| x.has_name(sym::rustc_host))
+ }) {
+ // user has manually specified a `rustc_host` param, in this case, we set
+ // the param id so that lowering logic can use that. But we don't create
+ // another host param, so this gives `None`.
+ self.host_param_id = Some(self.local_def_id(param.id));
+ None
+ } else {
+ let param_node_id = self.next_node_id();
+ let hir_id = self.next_id();
+ let def_id = self.create_def(self.local_def_id(parent_node_id), param_node_id, DefPathData::TypeNs(sym::host), span);
+ self.host_param_id = Some(def_id);
+ Some((span, hir_id, def_id))
+ }
+ } else {
+ None
+ };
+
let mut predicates: SmallVec<[hir::WherePredicate<'hir>; 4]> = SmallVec::new();
predicates.extend(generics.params.iter().filter_map(|param| {
self.lower_generic_bound_predicate(
@@ -1390,22 +1454,11 @@ impl<'hir> LoweringContext<'_, 'hir> {
let impl_trait_bounds = std::mem::take(&mut self.impl_trait_bounds);
predicates.extend(impl_trait_bounds.into_iter());
- // Desugar `~const` bound in generics into an additional `const host: bool` param
- // if the effects feature is enabled.
- if let Const::Yes(span) = constness && self.tcx.features().effects
- // Do not add host param if it already has it (manually specified)
- && !params.iter().any(|x| {
- self.attrs.get(&x.hir_id.local_id).map_or(false, |attrs| {
- attrs.iter().any(|x| x.has_name(sym::rustc_host))
- })
- })
- {
- let param_node_id = self.next_node_id();
+ if let Some((span, hir_id, def_id)) = host_param_parts {
let const_node_id = self.next_node_id();
- let def_id = self.create_def(self.local_def_id(parent_node_id), param_node_id, DefPathData::TypeNs(sym::host), span);
- let anon_const: LocalDefId = self.create_def(def_id, const_node_id, DefPathData::AnonConst, span);
+ let anon_const: LocalDefId =
+ self.create_def(def_id, const_node_id, DefPathData::AnonConst, span);
- let hir_id = self.next_id();
let const_id = self.next_id();
let const_expr_id = self.next_id();
let bool_id = self.next_id();
@@ -1415,14 +1468,15 @@ impl<'hir> LoweringContext<'_, 'hir> {
let attr_id = self.tcx.sess.parse_sess.attr_id_generator.mk_attr_id();
- let attrs = self.arena.alloc_from_iter([
- Attribute {
- kind: AttrKind::Normal(P(NormalAttr::from_ident(Ident::new(sym::rustc_host, span)))),
+ let attrs = self.arena.alloc_from_iter([Attribute {
+ kind: AttrKind::Normal(P(NormalAttr::from_ident(Ident::new(
+ sym::rustc_host,
span,
- id: attr_id,
- style: AttrStyle::Outer,
- },
- ]);
+ )))),
+ span,
+ id: attr_id,
+ style: AttrStyle::Outer,
+ }]);
self.attrs.insert(hir_id.local_id, attrs);
let const_body = self.lower_body(|this| {
@@ -1461,7 +1515,11 @@ impl<'hir> LoweringContext<'_, 'hir> {
}),
)),
)),
- default: Some(hir::AnonConst { def_id: anon_const, hir_id: const_id, body: const_body }),
+ default: Some(hir::AnonConst {
+ def_id: anon_const,
+ hir_id: const_id,
+ body: const_body,
+ }),
},
colon_span: None,
pure_wrt_drop: false,
diff --git a/compiler/rustc_ast_lowering/src/lib.rs b/compiler/rustc_ast_lowering/src/lib.rs
index 429e62c4a..4a47de128 100644
--- a/compiler/rustc_ast_lowering/src/lib.rs
+++ b/compiler/rustc_ast_lowering/src/lib.rs
@@ -142,16 +142,14 @@ struct LoweringContext<'a, 'hir> {
/// defined on the TAIT, so we have type Foo<'a1> = ... and we establish a mapping in this
/// field from the original parameter 'a to the new parameter 'a1.
generics_def_id_map: Vec<FxHashMap<LocalDefId, LocalDefId>>,
+
+ host_param_id: Option<LocalDefId>,
}
trait ResolverAstLoweringExt {
fn legacy_const_generic_args(&self, expr: &Expr) -> Option<Vec<usize>>;
fn get_partial_res(&self, id: NodeId) -> Option<PartialRes>;
fn get_import_res(&self, id: NodeId) -> PerNS<Option<Res<NodeId>>>;
- // Clones the resolution (if any) on 'source' and applies it
- // to 'target'. Used when desugaring a `UseTreeKind::Nested` to
- // multiple `UseTreeKind::Simple`s
- fn clone_res(&mut self, source: NodeId, target: NodeId);
fn get_label_res(&self, id: NodeId) -> Option<NodeId>;
fn get_lifetime_res(&self, id: NodeId) -> Option<LifetimeRes>;
fn take_extra_lifetime_params(&mut self, id: NodeId) -> Vec<(Ident, NodeId, LifetimeRes)>;
@@ -184,12 +182,6 @@ impl ResolverAstLoweringExt for ResolverAstLowering {
None
}
- fn clone_res(&mut self, source: NodeId, target: NodeId) {
- if let Some(res) = self.partial_res_map.get(&source) {
- self.partial_res_map.insert(target, *res);
- }
- }
-
/// Obtains resolution for a `NodeId` with a single resolution.
fn get_partial_res(&self, id: NodeId) -> Option<PartialRes> {
self.partial_res_map.get(&id).copied()
@@ -465,7 +457,7 @@ pub fn lower_to_hir(tcx: TyCtxt<'_>, (): ()) -> hir::Crate<'_> {
// Don't hash unless necessary, because it's expensive.
let opt_hir_hash =
- if tcx.sess.needs_crate_hash() { Some(compute_hir_hash(tcx, &owners)) } else { None };
+ if tcx.needs_crate_hash() { Some(compute_hir_hash(tcx, &owners)) } else { None };
hir::Crate { owners, opt_hir_hash }
}
@@ -522,11 +514,6 @@ impl<'a, 'hir> LoweringContext<'a, 'hir> {
self.resolver.node_id_to_def_id.get(&node).map(|local_def_id| *local_def_id)
}
- fn orig_local_def_id(&self, node: NodeId) -> LocalDefId {
- self.orig_opt_local_def_id(node)
- .unwrap_or_else(|| panic!("no entry for node id: `{node:?}`"))
- }
-
/// Given the id of some node in the AST, finds the `LocalDefId` associated with it by the name
/// resolver (if any), after applying any remapping from `get_remapped_def_id`.
///
@@ -661,7 +648,7 @@ impl<'a, 'hir> LoweringContext<'a, 'hir> {
let bodies = SortedMap::from_presorted_elements(bodies);
// Don't hash unless necessary, because it's expensive.
- let (opt_hash_including_bodies, attrs_hash) = if self.tcx.sess.needs_crate_hash() {
+ let (opt_hash_including_bodies, attrs_hash) = if self.tcx.needs_crate_hash() {
self.tcx.with_stable_hashing_context(|mut hcx| {
let mut stable_hasher = StableHasher::new();
hcx.with_hir_bodies(node.def_id(), &bodies, |hcx| {
@@ -1277,6 +1264,7 @@ impl<'a, 'hir> LoweringContext<'a, 'hir> {
span: t.span
},
itctx,
+ ast::Const::No,
);
let bounds = this.arena.alloc_from_iter([bound]);
let lifetime_bound = this.elided_dyn_bound(t.span);
@@ -1287,7 +1275,7 @@ impl<'a, 'hir> LoweringContext<'a, 'hir> {
}
let id = self.lower_node_id(t.id);
- let qpath = self.lower_qpath(t.id, qself, path, param_mode, itctx);
+ let qpath = self.lower_qpath(t.id, qself, path, param_mode, itctx, None);
self.ty_path(id, t.span, qpath)
}
@@ -1371,10 +1359,12 @@ impl<'a, 'hir> LoweringContext<'a, 'hir> {
this.arena.alloc_from_iter(bounds.iter().filter_map(|bound| match bound {
GenericBound::Trait(
ty,
- TraitBoundModifier::None
+ modifier @ (TraitBoundModifier::None
| TraitBoundModifier::MaybeConst
- | TraitBoundModifier::Negative,
- ) => Some(this.lower_poly_trait_ref(ty, itctx)),
+ | TraitBoundModifier::Negative),
+ ) => {
+ Some(this.lower_poly_trait_ref(ty, itctx, modifier.to_constness()))
+ }
// `~const ?Bound` will cause an error during AST validation
// anyways, so treat it like `?Bound` as compilation proceeds.
GenericBound::Trait(
@@ -1531,207 +1521,86 @@ impl<'a, 'hir> LoweringContext<'a, 'hir> {
// frequently opened issues show.
let opaque_ty_span = self.mark_span_with_reason(DesugaringKind::OpaqueTy, span, None);
- let opaque_ty_def_id = self.create_def(
- self.current_hir_id_owner.def_id,
- opaque_ty_node_id,
- DefPathData::ImplTrait,
- opaque_ty_span,
- );
- debug!(?opaque_ty_def_id);
-
- // If this came from a TAIT (as opposed to a function that returns an RPIT), we only want
- // to capture the lifetimes that appear in the bounds. So visit the bounds to find out
- // exactly which ones those are.
- let lifetimes_to_remap = match origin {
+ let captured_lifetimes_to_duplicate = match origin {
hir::OpaqueTyOrigin::TyAlias { .. } => {
- // in a TAIT like `type Foo<'a> = impl Foo<'a>`, we don't keep all the lifetime parameters
+ // in a TAIT like `type Foo<'a> = impl Foo<'a>`, we don't duplicate any
+ // lifetimes, since we don't have the issue that any are late-bound.
Vec::new()
}
- hir::OpaqueTyOrigin::AsyncFn(..) | hir::OpaqueTyOrigin::FnReturn(..) => {
- // in fn return position, like the `fn test<'a>() -> impl Debug + 'a` example,
- // we only keep the lifetimes that appear in the `impl Debug` itself:
+ hir::OpaqueTyOrigin::FnReturn(..) => {
+ // in fn return position, like the `fn test<'a>() -> impl Debug + 'a`
+ // example, we only need to duplicate lifetimes that appear in the
+ // bounds, since those are the only ones that are captured by the opaque.
lifetime_collector::lifetimes_in_bounds(&self.resolver, bounds)
}
+ hir::OpaqueTyOrigin::AsyncFn(..) => {
+ unreachable!("should be using `lower_async_fn_ret_ty`")
+ }
};
- debug!(?lifetimes_to_remap);
-
- let mut new_remapping = FxHashMap::default();
-
- // Contains the new lifetime definitions created for the TAIT (if any).
- // If this opaque type is only capturing a subset of the lifetimes (those that appear in
- // bounds), then create the new lifetime parameters required and create a mapping from the
- // old `'a` (on the function) to the new `'a` (on the opaque type).
- let collected_lifetimes =
- self.create_lifetime_defs(opaque_ty_def_id, &lifetimes_to_remap, &mut new_remapping);
- debug!(?collected_lifetimes);
- debug!(?new_remapping);
-
- // This creates HIR lifetime arguments as `hir::GenericArg`, in the given example `type
- // TestReturn<'a, T, 'x> = impl Debug + 'x`, it creates a collection containing `&['x]`.
- let collected_lifetime_mapping: Vec<_> = collected_lifetimes
- .iter()
- .map(|(node_id, lifetime)| {
- let id = self.next_node_id();
- let lifetime = self.new_named_lifetime(lifetime.id, id, lifetime.ident);
- let def_id = self.local_def_id(*node_id);
- (lifetime, def_id)
- })
- .collect();
- debug!(?collected_lifetime_mapping);
-
- self.with_hir_id_owner(opaque_ty_node_id, |lctx| {
- // Install the remapping from old to new (if any):
- lctx.with_remapping(new_remapping, |lctx| {
- // This creates HIR lifetime definitions as `hir::GenericParam`, in the given
- // example `type TestReturn<'a, T, 'x> = impl Debug + 'x`, it creates a collection
- // containing `&['x]`.
- let lifetime_defs = lctx.arena.alloc_from_iter(collected_lifetimes.iter().map(
- |&(new_node_id, lifetime)| {
- let hir_id = lctx.lower_node_id(new_node_id);
- debug_assert_ne!(lctx.opt_local_def_id(new_node_id), None);
-
- let (name, kind) = if lifetime.ident.name == kw::UnderscoreLifetime {
- (hir::ParamName::Fresh, hir::LifetimeParamKind::Elided)
- } else {
- (
- hir::ParamName::Plain(lifetime.ident),
- hir::LifetimeParamKind::Explicit,
- )
- };
+ debug!(?captured_lifetimes_to_duplicate);
- hir::GenericParam {
- hir_id,
- def_id: lctx.local_def_id(new_node_id),
- name,
- span: lifetime.ident.span,
- pure_wrt_drop: false,
- kind: hir::GenericParamKind::Lifetime { kind },
- colon_span: None,
- source: hir::GenericParamSource::Generics,
- }
- },
- ));
- debug!(?lifetime_defs);
-
- // Then when we lower the param bounds, references to 'a are remapped to 'a1, so we
- // get back Debug + 'a1, which is suitable for use on the TAIT.
- let hir_bounds = lctx.lower_param_bounds(bounds, itctx);
- debug!(?hir_bounds);
-
- let lifetime_mapping = if in_trait {
- self.arena.alloc_from_iter(
- collected_lifetime_mapping
- .iter()
- .map(|(lifetime, def_id)| (**lifetime, *def_id)),
- )
- } else {
- &mut []
- };
-
- let opaque_ty_item = hir::OpaqueTy {
- generics: self.arena.alloc(hir::Generics {
- params: lifetime_defs,
- predicates: &[],
- has_where_clause_predicates: false,
- where_clause_span: lctx.lower_span(span),
- span: lctx.lower_span(span),
- }),
- bounds: hir_bounds,
- origin,
- lifetime_mapping,
- in_trait,
- };
- debug!(?opaque_ty_item);
-
- lctx.generate_opaque_type(opaque_ty_def_id, opaque_ty_item, span, opaque_ty_span)
- })
- });
-
- // `impl Trait` now just becomes `Foo<'a, 'b, ..>`.
- hir::TyKind::OpaqueDef(
- hir::ItemId { owner_id: hir::OwnerId { def_id: opaque_ty_def_id } },
- self.arena.alloc_from_iter(
- collected_lifetime_mapping
- .iter()
- .map(|(lifetime, _)| hir::GenericArg::Lifetime(*lifetime)),
- ),
+ self.lower_opaque_inner(
+ opaque_ty_node_id,
+ origin,
in_trait,
+ captured_lifetimes_to_duplicate,
+ span,
+ opaque_ty_span,
+ |this| this.lower_param_bounds(bounds, itctx),
)
}
- /// Registers a new opaque type with the proper `NodeId`s and
- /// returns the lowered node-ID for the opaque type.
- fn generate_opaque_type(
+ fn lower_opaque_inner(
&mut self,
- opaque_ty_id: LocalDefId,
- opaque_ty_item: hir::OpaqueTy<'hir>,
+ opaque_ty_node_id: NodeId,
+ origin: hir::OpaqueTyOrigin,
+ in_trait: bool,
+ captured_lifetimes_to_duplicate: Vec<Lifetime>,
span: Span,
opaque_ty_span: Span,
- ) -> hir::OwnerNode<'hir> {
- let opaque_ty_item_kind = hir::ItemKind::OpaqueTy(self.arena.alloc(opaque_ty_item));
- // Generate an `type Foo = impl Trait;` declaration.
- trace!("registering opaque type with id {:#?}", opaque_ty_id);
- let opaque_ty_item = hir::Item {
- owner_id: hir::OwnerId { def_id: opaque_ty_id },
- ident: Ident::empty(),
- kind: opaque_ty_item_kind,
- vis_span: self.lower_span(span.shrink_to_lo()),
- span: self.lower_span(opaque_ty_span),
- };
- hir::OwnerNode::Item(self.arena.alloc(opaque_ty_item))
- }
-
- /// Given a `parent_def_id`, a list of `lifetimes_in_bounds` and a `remapping` hash to be
- /// filled, this function creates new definitions for `Param` and `Fresh` lifetimes, inserts the
- /// new definition, adds it to the remapping with the definition of the given lifetime and
- /// returns a list of lifetimes to be lowered afterwards.
- fn create_lifetime_defs(
- &mut self,
- parent_def_id: LocalDefId,
- lifetimes_in_bounds: &[Lifetime],
- remapping: &mut FxHashMap<LocalDefId, LocalDefId>,
- ) -> Vec<(NodeId, Lifetime)> {
- let mut result = Vec::new();
+ lower_item_bounds: impl FnOnce(&mut Self) -> &'hir [hir::GenericBound<'hir>],
+ ) -> hir::TyKind<'hir> {
+ let opaque_ty_def_id = self.create_def(
+ self.current_hir_id_owner.def_id,
+ opaque_ty_node_id,
+ DefPathData::ImplTrait,
+ opaque_ty_span,
+ );
+ debug!(?opaque_ty_def_id);
- for lifetime in lifetimes_in_bounds {
+ // Map from captured (old) lifetime to synthetic (new) lifetime.
+ // Used to resolve lifetimes in the bounds of the opaque.
+ let mut captured_to_synthesized_mapping = FxHashMap::default();
+ // List of (early-bound) synthetic lifetimes that are owned by the opaque.
+ // This is used to create the `hir::Generics` owned by the opaque.
+ let mut synthesized_lifetime_definitions = vec![];
+ // Pairs of lifetime arg (that resolves to the captured lifetime)
+ // and the def-id of the (early-bound) synthetic lifetime definition.
+ // This is used both to create generics for the `TyKind::OpaqueDef` that
+ // we return, and also as a captured lifetime mapping for RPITITs.
+ let mut synthesized_lifetime_args = vec![];
+
+ for lifetime in captured_lifetimes_to_duplicate {
let res = self.resolver.get_lifetime_res(lifetime.id).unwrap_or(LifetimeRes::Error);
- debug!(?res);
-
- match res {
- LifetimeRes::Param { param: old_def_id, binder: _ } => {
- if remapping.get(&old_def_id).is_none() {
- let node_id = self.next_node_id();
-
- let new_def_id = self.create_def(
- parent_def_id,
- node_id,
- DefPathData::LifetimeNs(lifetime.ident.name),
- lifetime.ident.span,
- );
- remapping.insert(old_def_id, new_def_id);
-
- result.push((node_id, *lifetime));
- }
- }
+ let old_def_id = match res {
+ LifetimeRes::Param { param: old_def_id, binder: _ } => old_def_id,
LifetimeRes::Fresh { param, binder: _ } => {
debug_assert_eq!(lifetime.ident.name, kw::UnderscoreLifetime);
- if let Some(old_def_id) = self.orig_opt_local_def_id(param) && remapping.get(&old_def_id).is_none() {
- let node_id = self.next_node_id();
-
- let new_def_id = self.create_def(
- parent_def_id,
- node_id,
- DefPathData::LifetimeNs(kw::UnderscoreLifetime),
- lifetime.ident.span,
- );
- remapping.insert(old_def_id, new_def_id);
-
- result.push((node_id, *lifetime));
+ if let Some(old_def_id) = self.orig_opt_local_def_id(param) {
+ old_def_id
+ } else {
+ self.tcx
+ .sess
+ .delay_span_bug(lifetime.ident.span, "no def-id for fresh lifetime");
+ continue;
}
}
- LifetimeRes::Static | LifetimeRes::Error => {}
+ // Opaques do not capture `'static`
+ LifetimeRes::Static | LifetimeRes::Error => {
+ continue;
+ }
res => {
let bug_msg = format!(
@@ -1740,10 +1609,109 @@ impl<'a, 'hir> LoweringContext<'a, 'hir> {
);
span_bug!(lifetime.ident.span, "{}", bug_msg);
}
+ };
+
+ if captured_to_synthesized_mapping.get(&old_def_id).is_none() {
+ // Create a new lifetime parameter local to the opaque.
+ let duplicated_lifetime_node_id = self.next_node_id();
+ let duplicated_lifetime_def_id = self.create_def(
+ opaque_ty_def_id,
+ duplicated_lifetime_node_id,
+ DefPathData::LifetimeNs(lifetime.ident.name),
+ lifetime.ident.span,
+ );
+ captured_to_synthesized_mapping.insert(old_def_id, duplicated_lifetime_def_id);
+ // FIXME: Instead of doing this, we could move this whole loop
+ // into the `with_hir_id_owner`, then just directly construct
+ // the `hir::GenericParam` here.
+ synthesized_lifetime_definitions.push((
+ duplicated_lifetime_node_id,
+ duplicated_lifetime_def_id,
+ lifetime.ident,
+ ));
+
+ // Now make an arg that we can use for the substs of the opaque tykind.
+ let id = self.next_node_id();
+ let lifetime_arg = self.new_named_lifetime_with_res(id, lifetime.ident, res);
+ let duplicated_lifetime_def_id = self.local_def_id(duplicated_lifetime_node_id);
+ synthesized_lifetime_args.push((lifetime_arg, duplicated_lifetime_def_id))
}
}
- result
+ self.with_hir_id_owner(opaque_ty_node_id, |this| {
+ // Install the remapping from old to new (if any). This makes sure that
+ // any lifetimes that would have resolved to the def-id of captured
+ // lifetimes are remapped to the new *synthetic* lifetimes of the opaque.
+ let bounds = this
+ .with_remapping(captured_to_synthesized_mapping, |this| lower_item_bounds(this));
+
+ let generic_params = this.arena.alloc_from_iter(
+ synthesized_lifetime_definitions.iter().map(|&(new_node_id, new_def_id, ident)| {
+ let hir_id = this.lower_node_id(new_node_id);
+ let (name, kind) = if ident.name == kw::UnderscoreLifetime {
+ (hir::ParamName::Fresh, hir::LifetimeParamKind::Elided)
+ } else {
+ (hir::ParamName::Plain(ident), hir::LifetimeParamKind::Explicit)
+ };
+
+ hir::GenericParam {
+ hir_id,
+ def_id: new_def_id,
+ name,
+ span: ident.span,
+ pure_wrt_drop: false,
+ kind: hir::GenericParamKind::Lifetime { kind },
+ colon_span: None,
+ source: hir::GenericParamSource::Generics,
+ }
+ }),
+ );
+ debug!("lower_async_fn_ret_ty: generic_params={:#?}", generic_params);
+
+ let lifetime_mapping = self.arena.alloc_slice(&synthesized_lifetime_args);
+
+ let opaque_ty_item = hir::OpaqueTy {
+ generics: this.arena.alloc(hir::Generics {
+ params: generic_params,
+ predicates: &[],
+ has_where_clause_predicates: false,
+ where_clause_span: this.lower_span(span),
+ span: this.lower_span(span),
+ }),
+ bounds,
+ origin,
+ lifetime_mapping,
+ in_trait,
+ };
+
+ // Generate an `type Foo = impl Trait;` declaration.
+ trace!("registering opaque type with id {:#?}", opaque_ty_def_id);
+ let opaque_ty_item = hir::Item {
+ owner_id: hir::OwnerId { def_id: opaque_ty_def_id },
+ ident: Ident::empty(),
+ kind: hir::ItemKind::OpaqueTy(this.arena.alloc(opaque_ty_item)),
+ vis_span: this.lower_span(span.shrink_to_lo()),
+ span: this.lower_span(opaque_ty_span),
+ };
+
+ hir::OwnerNode::Item(this.arena.alloc(opaque_ty_item))
+ });
+
+ let generic_args = self.arena.alloc_from_iter(
+ synthesized_lifetime_args
+ .iter()
+ .map(|(lifetime, _)| hir::GenericArg::Lifetime(*lifetime)),
+ );
+
+ // Create the `Foo<...>` reference itself. Note that the `type
+ // Foo = impl Trait` is, internally, created as a child of the
+ // async fn, so the *type parameters* are inherited. It's
+ // only the lifetime parameters that we must supply.
+ hir::TyKind::OpaqueDef(
+ hir::ItemId { owner_id: hir::OwnerId { def_id: opaque_ty_def_id } },
+ generic_args,
+ in_trait,
+ )
}
fn lower_fn_params_to_names(&mut self, decl: &FnDecl) -> &'hir [Ident] {
@@ -1821,9 +1789,10 @@ impl<'a, 'hir> LoweringContext<'a, 'hir> {
}
}
+ let fn_def_id = self.local_def_id(fn_node_id);
self.lower_async_fn_ret_ty(
&decl.output,
- fn_node_id,
+ fn_def_id,
ret_id,
matches!(kind, FnDeclKind::Trait),
)
@@ -1900,151 +1869,28 @@ impl<'a, 'hir> LoweringContext<'a, 'hir> {
fn lower_async_fn_ret_ty(
&mut self,
output: &FnRetTy,
- fn_node_id: NodeId,
+ fn_def_id: LocalDefId,
opaque_ty_node_id: NodeId,
in_trait: bool,
) -> hir::FnRetTy<'hir> {
- let span = output.span();
-
+ let span = self.lower_span(output.span());
let opaque_ty_span = self.mark_span_with_reason(DesugaringKind::Async, span, None);
- let fn_def_id = self.local_def_id(fn_node_id);
-
- let opaque_ty_def_id =
- self.create_def(fn_def_id, opaque_ty_node_id, DefPathData::ImplTrait, opaque_ty_span);
-
- // When we create the opaque type for this async fn, it is going to have
- // to capture all the lifetimes involved in the signature (including in the
- // return type). This is done by introducing lifetime parameters for:
- //
- // - all the explicitly declared lifetimes from the impl and function itself;
- // - all the elided lifetimes in the fn arguments;
- // - all the elided lifetimes in the return type.
- //
- // So for example in this snippet:
- //
- // ```rust
- // impl<'a> Foo<'a> {
- // async fn bar<'b>(&self, x: &'b Vec<f64>, y: &str) -> &u32 {
- // // ^ '0 ^ '1 ^ '2
- // // elided lifetimes used below
- // }
- // }
- // ```
- //
- // we would create an opaque type like:
- //
- // ```
- // type Bar<'a, 'b, '0, '1, '2> = impl Future<Output = &'2 u32>;
- // ```
- //
- // and we would then desugar `bar` to the equivalent of:
- //
- // ```rust
- // impl<'a> Foo<'a> {
- // fn bar<'b, '0, '1>(&'0 self, x: &'b Vec<f64>, y: &'1 str) -> Bar<'a, 'b, '0, '1, '_>
- // }
- // ```
- //
- // Note that the final parameter to `Bar` is `'_`, not `'2` --
- // this is because the elided lifetimes from the return type
- // should be figured out using the ordinary elision rules, and
- // this desugaring achieves that.
-
- // Calculate all the lifetimes that should be captured
- // by the opaque type. This should include all in-scope
- // lifetime parameters, including those defined in-band.
-
- // Contains the new lifetime definitions created for the TAIT (if any) generated for the
- // return type.
- let mut collected_lifetimes = Vec::new();
- let mut new_remapping = FxHashMap::default();
-
- let extra_lifetime_params = self.resolver.take_extra_lifetime_params(opaque_ty_node_id);
- debug!(?extra_lifetime_params);
- for (ident, outer_node_id, outer_res) in extra_lifetime_params {
- let outer_def_id = self.orig_local_def_id(outer_node_id);
- let inner_node_id = self.next_node_id();
-
- // Add a definition for the in scope lifetime def.
- let inner_def_id = self.create_def(
- opaque_ty_def_id,
- inner_node_id,
- DefPathData::LifetimeNs(ident.name),
- ident.span,
- );
- new_remapping.insert(outer_def_id, inner_def_id);
-
- let inner_res = match outer_res {
- // Input lifetime like `'a`:
- LifetimeRes::Param { param, .. } => {
- LifetimeRes::Param { param, binder: fn_node_id }
- }
- // Input lifetime like `'1`:
- LifetimeRes::Fresh { param, .. } => {
- LifetimeRes::Fresh { param, binder: fn_node_id }
- }
- LifetimeRes::Static | LifetimeRes::Error => continue,
- res => {
- panic!(
- "Unexpected lifetime resolution {:?} for {:?} at {:?}",
- res, ident, ident.span
- )
- }
- };
-
- let lifetime = Lifetime { id: outer_node_id, ident };
- collected_lifetimes.push((inner_node_id, lifetime, Some(inner_res)));
- }
- debug!(?collected_lifetimes);
-
- // We only want to capture the lifetimes that appear in the bounds. So visit the bounds to
- // find out exactly which ones those are.
- // in fn return position, like the `fn test<'a>() -> impl Debug + 'a` example,
- // we only keep the lifetimes that appear in the `impl Debug` itself:
- let lifetimes_to_remap = lifetime_collector::lifetimes_in_ret_ty(&self.resolver, output);
- debug!(?lifetimes_to_remap);
-
- // If this opaque type is only capturing a subset of the lifetimes (those that appear in
- // bounds), then create the new lifetime parameters required and create a mapping from the
- // old `'a` (on the function) to the new `'a` (on the opaque type).
- collected_lifetimes.extend(
- self.create_lifetime_defs(opaque_ty_def_id, &lifetimes_to_remap, &mut new_remapping)
- .into_iter()
- .map(|(new_node_id, lifetime)| (new_node_id, lifetime, None)),
- );
- debug!(?collected_lifetimes);
- debug!(?new_remapping);
-
- // This creates pairs of HIR lifetimes and def_ids. In the given example `type
- // TestReturn<'a, T, 'x> = impl Debug + 'x`, it creates a collection containing the
- // new lifetime of the RPIT 'x and the def_id of the lifetime 'x corresponding to
- // `TestReturn`.
- let collected_lifetime_mapping: Vec<_> = collected_lifetimes
- .iter()
- .map(|(node_id, lifetime, res)| {
- let id = self.next_node_id();
- let res = res.unwrap_or(
- self.resolver.get_lifetime_res(lifetime.id).unwrap_or(LifetimeRes::Error),
- );
- let lifetime = self.new_named_lifetime_with_res(id, lifetime.ident, res);
- let def_id = self.local_def_id(*node_id);
- (lifetime, def_id)
- })
+ let captured_lifetimes: Vec<_> = self
+ .resolver
+ .take_extra_lifetime_params(opaque_ty_node_id)
+ .into_iter()
+ .map(|(ident, id, _)| Lifetime { id, ident })
.collect();
- debug!(?collected_lifetime_mapping);
- self.with_hir_id_owner(opaque_ty_node_id, |this| {
- // Install the remapping from old to new (if any):
- this.with_remapping(new_remapping, |this| {
- // We have to be careful to get elision right here. The
- // idea is that we create a lifetime parameter for each
- // lifetime in the return type. So, given a return type
- // like `async fn foo(..) -> &[&u32]`, we lower to `impl
- // Future<Output = &'1 [ &'2 u32 ]>`.
- //
- // Then, we will create `fn foo(..) -> Foo<'_, '_>`, and
- // hence the elision takes place at the fn site.
+ let opaque_ty_ref = self.lower_opaque_inner(
+ opaque_ty_node_id,
+ hir::OpaqueTyOrigin::AsyncFn(fn_def_id),
+ in_trait,
+ captured_lifetimes,
+ span,
+ opaque_ty_span,
+ |this| {
let future_bound = this.lower_async_fn_output_type_to_future_bound(
output,
span,
@@ -2060,94 +1906,10 @@ impl<'a, 'hir> LoweringContext<'a, 'hir> {
}
},
);
-
- let generic_params = this.arena.alloc_from_iter(collected_lifetimes.iter().map(
- |&(new_node_id, lifetime, _)| {
- let hir_id = this.lower_node_id(new_node_id);
- debug_assert_ne!(this.opt_local_def_id(new_node_id), None);
-
- let (name, kind) = if lifetime.ident.name == kw::UnderscoreLifetime {
- (hir::ParamName::Fresh, hir::LifetimeParamKind::Elided)
- } else {
- (
- hir::ParamName::Plain(lifetime.ident),
- hir::LifetimeParamKind::Explicit,
- )
- };
-
- hir::GenericParam {
- hir_id,
- def_id: this.local_def_id(new_node_id),
- name,
- span: lifetime.ident.span,
- pure_wrt_drop: false,
- kind: hir::GenericParamKind::Lifetime { kind },
- colon_span: None,
- source: hir::GenericParamSource::Generics,
- }
- },
- ));
- debug!("lower_async_fn_ret_ty: generic_params={:#?}", generic_params);
-
- let lifetime_mapping = if in_trait {
- self.arena.alloc_from_iter(
- collected_lifetime_mapping
- .iter()
- .map(|(lifetime, def_id)| (**lifetime, *def_id)),
- )
- } else {
- &mut []
- };
-
- let opaque_ty_item = hir::OpaqueTy {
- generics: this.arena.alloc(hir::Generics {
- params: generic_params,
- predicates: &[],
- has_where_clause_predicates: false,
- where_clause_span: this.lower_span(span),
- span: this.lower_span(span),
- }),
- bounds: arena_vec![this; future_bound],
- origin: hir::OpaqueTyOrigin::AsyncFn(fn_def_id),
- lifetime_mapping,
- in_trait,
- };
-
- trace!("exist ty from async fn def id: {:#?}", opaque_ty_def_id);
- this.generate_opaque_type(opaque_ty_def_id, opaque_ty_item, span, opaque_ty_span)
- })
- });
-
- // As documented above, we need to create the lifetime
- // arguments to our opaque type. Continuing with our example,
- // we're creating the type arguments for the return type:
- //
- // ```
- // Bar<'a, 'b, '0, '1, '_>
- // ```
- //
- // For the "input" lifetime parameters, we wish to create
- // references to the parameters themselves, including the
- // "implicit" ones created from parameter types (`'a`, `'b`,
- // '`0`, `'1`).
- //
- // For the "output" lifetime parameters, we just want to
- // generate `'_`.
- let generic_args = self.arena.alloc_from_iter(
- collected_lifetime_mapping
- .iter()
- .map(|(lifetime, _)| hir::GenericArg::Lifetime(*lifetime)),
+ arena_vec![this; future_bound]
+ },
);
- // Create the `Foo<...>` reference itself. Note that the `type
- // Foo = impl Trait` is, internally, created as a child of the
- // async fn, so the *type parameters* are inherited. It's
- // only the lifetime parameters that we must supply.
- let opaque_ty_ref = hir::TyKind::OpaqueDef(
- hir::ItemId { owner_id: hir::OwnerId { def_id: opaque_ty_def_id } },
- generic_args,
- in_trait,
- );
let opaque_ty = self.ty(opaque_ty_span, opaque_ty_ref);
hir::FnRetTy::Return(self.arena.alloc(opaque_ty))
}
@@ -2195,7 +1957,7 @@ impl<'a, 'hir> LoweringContext<'a, 'hir> {
) -> hir::GenericBound<'hir> {
match tpb {
GenericBound::Trait(p, modifier) => hir::GenericBound::Trait(
- self.lower_poly_trait_ref(p, itctx),
+ self.lower_poly_trait_ref(p, itctx, modifier.to_constness()),
self.lower_trait_bound_modifier(*modifier),
),
GenericBound::Outlives(lifetime) => {
@@ -2338,8 +2100,20 @@ impl<'a, 'hir> LoweringContext<'a, 'hir> {
}
}
- fn lower_trait_ref(&mut self, p: &TraitRef, itctx: &ImplTraitContext) -> hir::TraitRef<'hir> {
- let path = match self.lower_qpath(p.ref_id, &None, &p.path, ParamMode::Explicit, itctx) {
+ fn lower_trait_ref(
+ &mut self,
+ constness: ast::Const,
+ p: &TraitRef,
+ itctx: &ImplTraitContext,
+ ) -> hir::TraitRef<'hir> {
+ let path = match self.lower_qpath(
+ p.ref_id,
+ &None,
+ &p.path,
+ ParamMode::Explicit,
+ itctx,
+ Some(constness),
+ ) {
hir::QPath::Resolved(None, path) => path,
qpath => panic!("lower_trait_ref: unexpected QPath `{qpath:?}`"),
};
@@ -2351,10 +2125,11 @@ impl<'a, 'hir> LoweringContext<'a, 'hir> {
&mut self,
p: &PolyTraitRef,
itctx: &ImplTraitContext,
+ constness: ast::Const,
) -> hir::PolyTraitRef<'hir> {
let bound_generic_params =
self.lower_lifetime_binder(p.trait_ref.ref_id, &p.bound_generic_params);
- let trait_ref = self.lower_trait_ref(&p.trait_ref, itctx);
+ let trait_ref = self.lower_trait_ref(constness, &p.trait_ref, itctx);
hir::PolyTraitRef { bound_generic_params, trait_ref, span: self.lower_span(p.span) }
}
@@ -2708,6 +2483,63 @@ struct GenericArgsCtor<'hir> {
}
impl<'hir> GenericArgsCtor<'hir> {
+ fn push_constness(&mut self, lcx: &mut LoweringContext<'_, 'hir>, constness: ast::Const) {
+ if !lcx.tcx.features().effects {
+ return;
+ }
+
+ // if bound is non-const, don't add host effect param
+ let ast::Const::Yes(span) = constness else { return };
+
+ let span = lcx.lower_span(span);
+
+ let id = lcx.next_node_id();
+ let hir_id = lcx.next_id();
+
+ let Some(host_param_id) = lcx.host_param_id else {
+ lcx.tcx
+ .sess
+ .delay_span_bug(span, "no host param id for call in const yet no errors reported");
+ return;
+ };
+
+ let body = lcx.lower_body(|lcx| {
+ (&[], {
+ let hir_id = lcx.next_id();
+ let res = Res::Def(DefKind::ConstParam, host_param_id.to_def_id());
+ let expr_kind = hir::ExprKind::Path(hir::QPath::Resolved(
+ None,
+ lcx.arena.alloc(hir::Path {
+ span,
+ res,
+ segments: arena_vec![lcx; hir::PathSegment::new(Ident {
+ name: sym::host,
+ span,
+ }, hir_id, res)],
+ }),
+ ));
+ lcx.expr(span, expr_kind)
+ })
+ });
+
+ let attr_id = lcx.tcx.sess.parse_sess.attr_id_generator.mk_attr_id();
+ let attr = lcx.arena.alloc(Attribute {
+ kind: AttrKind::Normal(P(NormalAttr::from_ident(Ident::new(sym::rustc_host, span)))),
+ span,
+ id: attr_id,
+ style: AttrStyle::Outer,
+ });
+ lcx.attrs.insert(hir_id.local_id, std::slice::from_ref(attr));
+
+ let def_id =
+ lcx.create_def(lcx.current_hir_id_owner.def_id, id, DefPathData::AnonConst, span);
+ lcx.children.push((def_id, hir::MaybeOwner::NonOwner(hir_id)));
+ self.args.push(hir::GenericArg::Const(hir::ConstArg {
+ value: hir::AnonConst { def_id, hir_id, body },
+ span,
+ }))
+ }
+
fn is_empty(&self) -> bool {
self.args.is_empty()
&& self.bindings.is_empty()
diff --git a/compiler/rustc_ast_lowering/src/lifetime_collector.rs b/compiler/rustc_ast_lowering/src/lifetime_collector.rs
index 3989fc486..6f75419c3 100644
--- a/compiler/rustc_ast_lowering/src/lifetime_collector.rs
+++ b/compiler/rustc_ast_lowering/src/lifetime_collector.rs
@@ -1,7 +1,7 @@
use super::ResolverAstLoweringExt;
use rustc_ast::visit::{self, BoundKind, LifetimeCtxt, Visitor};
-use rustc_ast::{FnRetTy, GenericBounds, Lifetime, NodeId, PathSegment, PolyTraitRef, Ty, TyKind};
-use rustc_hir::def::LifetimeRes;
+use rustc_ast::{GenericBounds, Lifetime, NodeId, PathSegment, PolyTraitRef, Ty, TyKind};
+use rustc_hir::def::{DefKind, LifetimeRes, Res};
use rustc_middle::span_bug;
use rustc_middle::ty::ResolverAstLowering;
use rustc_span::symbol::{kw, Ident};
@@ -77,7 +77,20 @@ impl<'ast> Visitor<'ast> for LifetimeCollectVisitor<'ast> {
}
fn visit_ty(&mut self, t: &'ast Ty) {
- match t.kind {
+ match &t.kind {
+ TyKind::Path(None, _) => {
+ // We can sometimes encounter bare trait objects
+ // which are represented in AST as paths.
+ if let Some(partial_res) = self.resolver.get_partial_res(t.id)
+ && let Some(Res::Def(DefKind::Trait | DefKind::TraitAlias, _)) = partial_res.full_res()
+ {
+ self.current_binders.push(t.id);
+ visit::walk_ty(self, t);
+ self.current_binders.pop();
+ } else {
+ visit::walk_ty(self, t);
+ }
+ }
TyKind::BareFn(_) => {
self.current_binders.push(t.id);
visit::walk_ty(self, t);
@@ -94,12 +107,6 @@ impl<'ast> Visitor<'ast> for LifetimeCollectVisitor<'ast> {
}
}
-pub fn lifetimes_in_ret_ty(resolver: &ResolverAstLowering, ret_ty: &FnRetTy) -> Vec<Lifetime> {
- let mut visitor = LifetimeCollectVisitor::new(resolver);
- visitor.visit_fn_ret_ty(ret_ty);
- visitor.collected_lifetimes
-}
-
pub fn lifetimes_in_bounds(
resolver: &ResolverAstLowering,
bounds: &GenericBounds,
diff --git a/compiler/rustc_ast_lowering/src/pat.rs b/compiler/rustc_ast_lowering/src/pat.rs
index 2509b7056..a30f264bc 100644
--- a/compiler/rustc_ast_lowering/src/pat.rs
+++ b/compiler/rustc_ast_lowering/src/pat.rs
@@ -38,6 +38,7 @@ impl<'a, 'hir> LoweringContext<'a, 'hir> {
path,
ParamMode::Optional,
&ImplTraitContext::Disallowed(ImplTraitPosition::Path),
+ None,
);
let (pats, ddpos) = self.lower_pat_tuple(pats, "tuple struct");
break hir::PatKind::TupleStruct(qpath, pats, ddpos);
@@ -54,6 +55,7 @@ impl<'a, 'hir> LoweringContext<'a, 'hir> {
path,
ParamMode::Optional,
&ImplTraitContext::Disallowed(ImplTraitPosition::Path),
+ None,
);
break hir::PatKind::Path(qpath);
}
@@ -64,6 +66,7 @@ impl<'a, 'hir> LoweringContext<'a, 'hir> {
path,
ParamMode::Optional,
&ImplTraitContext::Disallowed(ImplTraitPosition::Path),
+ None,
);
let fs = self.arena.alloc_from_iter(fields.iter().map(|f| {
diff --git a/compiler/rustc_ast_lowering/src/path.rs b/compiler/rustc_ast_lowering/src/path.rs
index 441282c05..899f92a99 100644
--- a/compiler/rustc_ast_lowering/src/path.rs
+++ b/compiler/rustc_ast_lowering/src/path.rs
@@ -23,6 +23,8 @@ impl<'a, 'hir> LoweringContext<'a, 'hir> {
p: &Path,
param_mode: ParamMode,
itctx: &ImplTraitContext,
+ // constness of the impl/bound if this is a trait path
+ constness: Option<ast::Const>,
) -> hir::QPath<'hir> {
let qself_position = qself.as_ref().map(|q| q.position);
let qself = qself.as_ref().map(|q| self.lower_ty(&q.ty, itctx));
@@ -73,6 +75,8 @@ impl<'a, 'hir> LoweringContext<'a, 'hir> {
param_mode,
parenthesized_generic_args,
itctx,
+ // if this is the last segment, add constness to the trait path
+ if i == proj_start - 1 { constness } else { None },
)
},
)),
@@ -119,6 +123,7 @@ impl<'a, 'hir> LoweringContext<'a, 'hir> {
param_mode,
ParenthesizedGenericArgs::Err,
itctx,
+ None,
));
let qpath = hir::QPath::TypeRelative(ty, hir_segment);
@@ -159,6 +164,7 @@ impl<'a, 'hir> LoweringContext<'a, 'hir> {
param_mode,
ParenthesizedGenericArgs::Err,
&ImplTraitContext::Disallowed(ImplTraitPosition::Path),
+ None,
)
})),
span: self.lower_span(p.span),
@@ -172,8 +178,9 @@ impl<'a, 'hir> LoweringContext<'a, 'hir> {
param_mode: ParamMode,
parenthesized_generic_args: ParenthesizedGenericArgs,
itctx: &ImplTraitContext,
+ constness: Option<ast::Const>,
) -> hir::PathSegment<'hir> {
- debug!("path_span: {:?}, lower_path_segment(segment: {:?})", path_span, segment,);
+ debug!("path_span: {:?}, lower_path_segment(segment: {:?})", path_span, segment);
let (mut generic_args, infer_args) = if let Some(generic_args) = segment.args.as_deref() {
match generic_args {
GenericArgs::AngleBracketed(data) => {
@@ -231,6 +238,10 @@ impl<'a, 'hir> LoweringContext<'a, 'hir> {
)
};
+ if let Some(constness) = constness {
+ generic_args.push_constness(self, constness);
+ }
+
let has_lifetimes =
generic_args.args.iter().any(|arg| matches!(arg, GenericArg::Lifetime(_)));
diff --git a/compiler/rustc_ast_passes/messages.ftl b/compiler/rustc_ast_passes/messages.ftl
index 2f0ac0c2b..f323bb4c2 100644
--- a/compiler/rustc_ast_passes/messages.ftl
+++ b/compiler/rustc_ast_passes/messages.ftl
@@ -239,5 +239,10 @@ ast_passes_visibility_not_permitted =
.individual_impl_items = place qualifiers on individual impl items instead
.individual_foreign_items = place qualifiers on individual foreign items instead
-ast_passes_where_after_type_alias = where clauses are not allowed after the type for type aliases
+ast_passes_where_clause_after_type_alias = where clauses are not allowed after the type for type aliases
+ .note = see issue #112792 <https://github.com/rust-lang/rust/issues/112792> for more information
+ .help = add `#![feature(lazy_type_alias)]` to the crate attributes to enable
+
+ast_passes_where_clause_before_type_alias = where clauses are not allowed before the type for type aliases
.note = see issue #89122 <https://github.com/rust-lang/rust/issues/89122> for more information
+ .suggestion = move it to the end of the type declaration
diff --git a/compiler/rustc_ast_passes/src/ast_validation.rs b/compiler/rustc_ast_passes/src/ast_validation.rs
index 096cea945..bd3e676da 100644
--- a/compiler/rustc_ast_passes/src/ast_validation.rs
+++ b/compiler/rustc_ast_passes/src/ast_validation.rs
@@ -13,6 +13,7 @@ use rustc_ast::*;
use rustc_ast::{walk_list, StaticItem};
use rustc_ast_pretty::pprust::{self, State};
use rustc_data_structures::fx::FxIndexMap;
+use rustc_feature::Features;
use rustc_macros::Subdiagnostic;
use rustc_parse::validate_attr;
use rustc_session::lint::builtin::{
@@ -45,6 +46,7 @@ enum DisallowTildeConstContext<'a> {
struct AstValidator<'a> {
session: &'a Session,
+ features: &'a Features,
/// The span of the `extern` in an `extern { ... }` block, if any.
extern_mod: Option<&'a Item>,
@@ -136,40 +138,42 @@ impl<'a> AstValidator<'a> {
}
}
- fn check_gat_where(
+ fn check_type_alias_where_clause_location(
&mut self,
- id: NodeId,
- before_predicates: &[WherePredicate],
- where_clauses: (ast::TyAliasWhereClause, ast::TyAliasWhereClause),
- ) {
- if !before_predicates.is_empty() {
- let mut state = State::new();
- if !where_clauses.1.0 {
- state.space();
- state.word_space("where");
- } else {
+ ty_alias: &TyAlias,
+ ) -> Result<(), errors::WhereClauseBeforeTypeAlias> {
+ let before_predicates =
+ ty_alias.generics.where_clause.predicates.split_at(ty_alias.where_predicates_split).0;
+
+ if ty_alias.ty.is_none() || before_predicates.is_empty() {
+ return Ok(());
+ }
+
+ let mut state = State::new();
+ if !ty_alias.where_clauses.1.0 {
+ state.space();
+ state.word_space("where");
+ } else {
+ state.word_space(",");
+ }
+ let mut first = true;
+ for p in before_predicates {
+ if !first {
state.word_space(",");
}
- let mut first = true;
- for p in before_predicates.iter() {
- if !first {
- state.word_space(",");
- }
- first = false;
- state.print_where_predicate(p);
- }
- let suggestion = state.s.eof();
- self.lint_buffer.buffer_lint_with_diagnostic(
- DEPRECATED_WHERE_CLAUSE_LOCATION,
- id,
- where_clauses.0.1,
- fluent::ast_passes_deprecated_where_clause_location,
- BuiltinLintDiagnostics::DeprecatedWhereclauseLocation(
- where_clauses.1.1.shrink_to_hi(),
- suggestion,
- ),
- );
+ first = false;
+ state.print_where_predicate(p);
}
+
+ let span = ty_alias.where_clauses.0.1;
+ Err(errors::WhereClauseBeforeTypeAlias {
+ span,
+ sugg: errors::WhereClauseBeforeTypeAliasSugg {
+ left: span,
+ snippet: state.s.eof(),
+ right: ty_alias.where_clauses.1.1.shrink_to_hi(),
+ },
+ })
}
fn with_impl_trait(&mut self, outer: Option<Span>, f: impl FnOnce(&mut Self)) {
@@ -659,7 +663,7 @@ fn validate_generic_param_order(
GenericParamKind::Type { .. } => (ParamKindOrd::TypeOrConst, ident.to_string()),
GenericParamKind::Const { ty, .. } => {
let ty = pprust::ty_to_string(ty);
- (ParamKindOrd::TypeOrConst, format!("const {}: {}", ident, ty))
+ (ParamKindOrd::TypeOrConst, format!("const {ident}: {ty}"))
}
};
param_idents.push((kind, ord_kind, bounds, idx, ident));
@@ -1009,7 +1013,9 @@ impl<'a> Visitor<'a> for AstValidator<'a> {
replace_span: self.ending_semi_or_hi(item.span),
});
}
- ItemKind::TyAlias(box TyAlias { defaultness, where_clauses, bounds, ty, .. }) => {
+ ItemKind::TyAlias(
+ ty_alias @ box TyAlias { defaultness, bounds, where_clauses, ty, .. },
+ ) => {
self.check_defaultness(item.span, *defaultness);
if ty.is_none() {
self.session.emit_err(errors::TyAliasWithoutBody {
@@ -1018,9 +1024,16 @@ impl<'a> Visitor<'a> for AstValidator<'a> {
});
}
self.check_type_no_bounds(bounds, "this context");
- if where_clauses.1.0 {
- self.err_handler()
- .emit_err(errors::WhereAfterTypeAlias { span: where_clauses.1.1 });
+
+ if self.features.lazy_type_alias {
+ if let Err(err) = self.check_type_alias_where_clause_location(ty_alias) {
+ self.err_handler().emit_err(err);
+ }
+ } else if where_clauses.1.0 {
+ self.err_handler().emit_err(errors::WhereClauseAfterTypeAlias {
+ span: where_clauses.1.1,
+ help: self.session.is_nightly_build().then_some(()),
+ });
}
}
_ => {}
@@ -1300,14 +1313,7 @@ impl<'a> Visitor<'a> for AstValidator<'a> {
});
}
}
- AssocItemKind::Type(box TyAlias {
- generics,
- where_clauses,
- where_predicates_split,
- bounds,
- ty,
- ..
- }) => {
+ AssocItemKind::Type(box TyAlias { bounds, ty, .. }) => {
if ty.is_none() {
self.session.emit_err(errors::AssocTypeWithoutBody {
span: item.span,
@@ -1315,18 +1321,26 @@ impl<'a> Visitor<'a> for AstValidator<'a> {
});
}
self.check_type_no_bounds(bounds, "`impl`s");
- if ty.is_some() {
- self.check_gat_where(
- item.id,
- generics.where_clause.predicates.split_at(*where_predicates_split).0,
- *where_clauses,
- );
- }
}
_ => {}
}
}
+ if let AssocItemKind::Type(ty_alias) = &item.kind
+ && let Err(err) = self.check_type_alias_where_clause_location(ty_alias)
+ {
+ self.lint_buffer.buffer_lint_with_diagnostic(
+ DEPRECATED_WHERE_CLAUSE_LOCATION,
+ item.id,
+ err.span,
+ fluent::ast_passes_deprecated_where_clause_location,
+ BuiltinLintDiagnostics::DeprecatedWhereclauseLocation(
+ err.sugg.right,
+ err.sugg.snippet,
+ ),
+ );
+ }
+
if ctxt == AssocCtxt::Trait || self.in_trait_impl {
self.visibility_not_permitted(&item.vis, errors::VisibilityNotPermittedNote::TraitImpl);
if let AssocItemKind::Fn(box Fn { sig, .. }) = &item.kind {
@@ -1462,15 +1476,12 @@ fn deny_equality_constraints(
let Some(arg) = args.args.last() else {
continue;
};
- (
- format!(", {} = {}", assoc, ty),
- arg.span().shrink_to_hi(),
- )
+ (format!(", {assoc} = {ty}"), arg.span().shrink_to_hi())
}
_ => continue,
},
None => (
- format!("<{} = {}>", assoc, ty),
+ format!("<{assoc} = {ty}>"),
trait_segment.span().shrink_to_hi(),
),
};
@@ -1491,9 +1502,15 @@ fn deny_equality_constraints(
this.err_handler().emit_err(err);
}
-pub fn check_crate(session: &Session, krate: &Crate, lints: &mut LintBuffer) -> bool {
+pub fn check_crate(
+ session: &Session,
+ features: &Features,
+ krate: &Crate,
+ lints: &mut LintBuffer,
+) -> bool {
let mut validator = AstValidator {
session,
+ features,
extern_mod: None,
in_trait_impl: false,
in_const_trait_impl: false,
diff --git a/compiler/rustc_ast_passes/src/errors.rs b/compiler/rustc_ast_passes/src/errors.rs
index ab8015c4a..a6f217d47 100644
--- a/compiler/rustc_ast_passes/src/errors.rs
+++ b/compiler/rustc_ast_passes/src/errors.rs
@@ -496,11 +496,37 @@ pub struct FieldlessUnion {
}
#[derive(Diagnostic)]
-#[diag(ast_passes_where_after_type_alias)]
+#[diag(ast_passes_where_clause_after_type_alias)]
#[note]
-pub struct WhereAfterTypeAlias {
+pub struct WhereClauseAfterTypeAlias {
#[primary_span]
pub span: Span,
+ #[help]
+ pub help: Option<()>,
+}
+
+#[derive(Diagnostic)]
+#[diag(ast_passes_where_clause_before_type_alias)]
+#[note]
+pub struct WhereClauseBeforeTypeAlias {
+ #[primary_span]
+ pub span: Span,
+ #[subdiagnostic]
+ pub sugg: WhereClauseBeforeTypeAliasSugg,
+}
+
+#[derive(Subdiagnostic)]
+#[multipart_suggestion(
+ ast_passes_suggestion,
+ applicability = "machine-applicable",
+ style = "verbose"
+)]
+pub struct WhereClauseBeforeTypeAliasSugg {
+ #[suggestion_part(code = "")]
+ pub left: Span,
+ pub snippet: String,
+ #[suggestion_part(code = "{snippet}")]
+ pub right: Span,
}
#[derive(Diagnostic)]
diff --git a/compiler/rustc_ast_passes/src/feature_gate.rs b/compiler/rustc_ast_passes/src/feature_gate.rs
index b0dbc2c23..10c9c3ef1 100644
--- a/compiler/rustc_ast_passes/src/feature_gate.rs
+++ b/compiler/rustc_ast_passes/src/feature_gate.rs
@@ -218,6 +218,19 @@ impl<'a> Visitor<'a> for PostExpansionVisitor<'a> {
}
}
}
+ if !attr.is_doc_comment()
+ && attr.get_normal_item().path.segments.len() == 2
+ && attr.get_normal_item().path.segments[0].ident.name == sym::diagnostic
+ && !self.features.diagnostic_namespace
+ {
+ let msg = "`#[diagnostic]` attribute name space is experimental";
+ gate_feature_post!(
+ self,
+ diagnostic_namespace,
+ attr.get_normal_item().path.segments[0].ident.span,
+ msg
+ );
+ }
// Emit errors for non-staged-api crates.
if !self.features.staged_api {
@@ -501,10 +514,10 @@ impl<'a> Visitor<'a> for PostExpansionVisitor<'a> {
}
}
-pub fn check_crate(krate: &ast::Crate, sess: &Session) {
- maybe_stage_features(sess, krate);
- check_incompatible_features(sess);
- let mut visitor = PostExpansionVisitor { sess, features: &sess.features_untracked() };
+pub fn check_crate(krate: &ast::Crate, sess: &Session, features: &Features) {
+ maybe_stage_features(sess, features, krate);
+ check_incompatible_features(sess, features);
+ let mut visitor = PostExpansionVisitor { sess, features };
let spans = sess.parse_sess.gated_spans.spans.borrow();
macro_rules! gate_all {
@@ -556,6 +569,7 @@ pub fn check_crate(krate: &ast::Crate, sess: &Session) {
gate_all!(const_closures, "const closures are experimental");
gate_all!(builtin_syntax, "`builtin #` syntax is unstable");
gate_all!(explicit_tail_calls, "`become` expression is experimental");
+ gate_all!(generic_const_items, "generic const items are experimental");
if !visitor.features.negative_bounds {
for &span in spans.get(&sym::negative_bounds).iter().copied().flatten() {
@@ -586,12 +600,12 @@ pub fn check_crate(krate: &ast::Crate, sess: &Session) {
visit::walk_crate(&mut visitor, krate);
}
-fn maybe_stage_features(sess: &Session, krate: &ast::Crate) {
+fn maybe_stage_features(sess: &Session, features: &Features, krate: &ast::Crate) {
// checks if `#![feature]` has been used to enable any lang feature
// does not check the same for lib features unless there's at least one
// declared lang feature
if !sess.opts.unstable_features.is_nightly_build() {
- let lang_features = &sess.features_untracked().declared_lang_features;
+ let lang_features = &features.declared_lang_features;
if lang_features.len() == 0 {
return;
}
@@ -626,9 +640,7 @@ fn maybe_stage_features(sess: &Session, krate: &ast::Crate) {
}
}
-fn check_incompatible_features(sess: &Session) {
- let features = sess.features_untracked();
-
+fn check_incompatible_features(sess: &Session, features: &Features) {
let declared_features = features
.declared_lang_features
.iter()
diff --git a/compiler/rustc_ast_pretty/src/pprust/state.rs b/compiler/rustc_ast_pretty/src/pprust/state.rs
index 59239b49e..58ce73047 100644
--- a/compiler/rustc_ast_pretty/src/pprust/state.rs
+++ b/compiler/rustc_ast_pretty/src/pprust/state.rs
@@ -150,6 +150,8 @@ pub fn print_crate<'a>(
/// and also addresses some specific regressions described in #63896 and #73345.
fn tt_prepend_space(tt: &TokenTree, prev: &TokenTree) -> bool {
if let TokenTree::Token(token, _) = prev {
+ // No space after these tokens, e.g. `x.y`, `$e`
+ // (The carets point to `prev`.) ^ ^
if matches!(token.kind, token::Dot | token::Dollar) {
return false;
}
@@ -158,10 +160,19 @@ fn tt_prepend_space(tt: &TokenTree, prev: &TokenTree) -> bool {
}
}
match tt {
+ // No space before these tokens, e.g. `foo,`, `println!`, `x.y`
+ // (The carets point to `token`.) ^ ^ ^
+ //
+ // FIXME: having `Not` here works well for macro invocations like
+ // `println!()`, but is bad when `!` means "logical not" or "the never
+ // type", where the lack of space causes ugliness like this:
+ // `Fn() ->!`, `x =! y`, `if! x { f(); }`.
TokenTree::Token(token, _) => !matches!(token.kind, token::Comma | token::Not | token::Dot),
+ // No space before parentheses if preceded by these tokens, e.g. `foo(...)`
TokenTree::Delimited(_, Delimiter::Parenthesis, _) => {
!matches!(prev, TokenTree::Token(Token { kind: token::Ident(..), .. }, _))
}
+ // No space before brackets if preceded by these tokens, e.g. `#[...]`
TokenTree::Delimited(_, Delimiter::Bracket, _) => {
!matches!(prev, TokenTree::Token(Token { kind: token::Pound, .. }, _))
}
@@ -476,7 +487,7 @@ pub trait PrintState<'a>: std::ops::Deref<Target = pp::Printer> + std::ops::Dere
Some(MacHeader::Path(&item.path)),
false,
None,
- delim.to_token(),
+ *delim,
tokens,
true,
span,
@@ -640,7 +651,7 @@ pub trait PrintState<'a>: std::ops::Deref<Target = pp::Printer> + std::ops::Dere
Some(MacHeader::Keyword(kw)),
has_bang,
Some(*ident),
- macro_def.body.delim.to_token(),
+ macro_def.body.delim,
&macro_def.body.tokens.clone(),
true,
sp,
@@ -1240,7 +1251,7 @@ impl<'a> State<'a> {
Some(MacHeader::Path(&m.path)),
true,
None,
- m.args.delim.to_token(),
+ m.args.delim,
&m.args.tokens.clone(),
true,
m.span(),
diff --git a/compiler/rustc_ast_pretty/src/pprust/state/expr.rs b/compiler/rustc_ast_pretty/src/pprust/state/expr.rs
index 609920180..39741a039 100644
--- a/compiler/rustc_ast_pretty/src/pprust/state/expr.rs
+++ b/compiler/rustc_ast_pretty/src/pprust/state/expr.rs
@@ -477,7 +477,7 @@ impl<'a> State<'a> {
self.word(".");
self.print_ident(*ident);
}
- ast::ExprKind::Index(expr, index) => {
+ ast::ExprKind::Index(expr, index, _) => {
self.print_expr_maybe_paren(expr, parser::PREC_POSTFIX);
self.word("[");
self.print_expr(index);
@@ -697,15 +697,15 @@ pub fn reconstruct_format_args_template_string(pieces: &[FormatArgsPiece]) -> St
write!(template, "{n}").unwrap();
if p.format_options != Default::default() || p.format_trait != FormatTrait::Display
{
- template.push_str(":");
+ template.push(':');
}
if let Some(fill) = p.format_options.fill {
template.push(fill);
}
match p.format_options.alignment {
- Some(FormatAlignment::Left) => template.push_str("<"),
- Some(FormatAlignment::Right) => template.push_str(">"),
- Some(FormatAlignment::Center) => template.push_str("^"),
+ Some(FormatAlignment::Left) => template.push('<'),
+ Some(FormatAlignment::Right) => template.push('>'),
+ Some(FormatAlignment::Center) => template.push('^'),
None => {}
}
match p.format_options.sign {
diff --git a/compiler/rustc_ast_pretty/src/pprust/state/item.rs b/compiler/rustc_ast_pretty/src/pprust/state/item.rs
index 5c01b7ea7..d27a44f12 100644
--- a/compiler/rustc_ast_pretty/src/pprust/state/item.rs
+++ b/compiler/rustc_ast_pretty/src/pprust/state/item.rs
@@ -30,10 +30,15 @@ impl<'a> State<'a> {
ast::ForeignItemKind::Fn(box ast::Fn { defaultness, sig, generics, body }) => {
self.print_fn_full(sig, ident, generics, vis, *defaultness, body.as_deref(), attrs);
}
- ast::ForeignItemKind::Static(ty, mutbl, body) => {
- let def = ast::Defaultness::Final;
- self.print_item_const(ident, Some(*mutbl), ty, body.as_deref(), vis, def);
- }
+ ast::ForeignItemKind::Static(ty, mutbl, body) => self.print_item_const(
+ ident,
+ Some(*mutbl),
+ &ast::Generics::default(),
+ ty,
+ body.as_deref(),
+ vis,
+ ast::Defaultness::Final,
+ ),
ast::ForeignItemKind::TyAlias(box ast::TyAlias {
defaultness,
generics,
@@ -67,6 +72,7 @@ impl<'a> State<'a> {
&mut self,
ident: Ident,
mutbl: Option<ast::Mutability>,
+ generics: &ast::Generics,
ty: &ast::Ty,
body: Option<&ast::Expr>,
vis: &ast::Visibility,
@@ -82,6 +88,7 @@ impl<'a> State<'a> {
};
self.word_space(leading);
self.print_ident(ident);
+ self.print_generic_params(&generics.params);
self.word_space(":");
self.print_type(ty);
if body.is_some() {
@@ -92,6 +99,7 @@ impl<'a> State<'a> {
self.word_space("=");
self.print_expr(body);
}
+ self.print_where_clause(&generics.where_clause);
self.word(";");
self.end(); // end the outer cbox
}
@@ -158,20 +166,21 @@ impl<'a> State<'a> {
self.word(";");
}
ast::ItemKind::Static(box StaticItem { ty, mutability: mutbl, expr: body }) => {
- let def = ast::Defaultness::Final;
self.print_item_const(
item.ident,
Some(*mutbl),
+ &ast::Generics::default(),
ty,
body.as_deref(),
&item.vis,
- def,
+ ast::Defaultness::Final,
);
}
- ast::ItemKind::Const(box ast::ConstItem { defaultness, ty, expr }) => {
+ ast::ItemKind::Const(box ast::ConstItem { defaultness, generics, ty, expr }) => {
self.print_item_const(
item.ident,
None,
+ generics,
ty,
expr.as_deref(),
&item.vis,
@@ -515,8 +524,16 @@ impl<'a> State<'a> {
ast::AssocItemKind::Fn(box ast::Fn { defaultness, sig, generics, body }) => {
self.print_fn_full(sig, ident, generics, vis, *defaultness, body.as_deref(), attrs);
}
- ast::AssocItemKind::Const(box ast::ConstItem { defaultness, ty, expr }) => {
- self.print_item_const(ident, None, ty, expr.as_deref(), vis, *defaultness);
+ ast::AssocItemKind::Const(box ast::ConstItem { defaultness, generics, ty, expr }) => {
+ self.print_item_const(
+ ident,
+ None,
+ generics,
+ ty,
+ expr.as_deref(),
+ vis,
+ *defaultness,
+ );
}
ast::AssocItemKind::Type(box ast::TyAlias {
defaultness,
diff --git a/compiler/rustc_attr/src/builtin.rs b/compiler/rustc_attr/src/builtin.rs
index 372a58857..3592287b9 100644
--- a/compiler/rustc_attr/src/builtin.rs
+++ b/compiler/rustc_attr/src/builtin.rs
@@ -28,7 +28,7 @@ pub fn rust_version_symbol() -> Symbol {
}
pub fn is_builtin_attr(attr: &Attribute) -> bool {
- attr.is_doc_comment() || attr.ident().filter(|ident| is_builtin_attr_name(ident.name)).is_some()
+ attr.is_doc_comment() || attr.ident().is_some_and(|ident| is_builtin_attr_name(ident.name))
}
enum AttrError {
@@ -800,18 +800,15 @@ pub struct Deprecation {
}
/// Finds the deprecation attribute. `None` if none exists.
-pub fn find_deprecation(sess: &Session, attrs: &[Attribute]) -> Option<(Deprecation, Span)> {
- find_deprecation_generic(sess, attrs.iter())
-}
-
-fn find_deprecation_generic<'a, I>(sess: &Session, attrs_iter: I) -> Option<(Deprecation, Span)>
-where
- I: Iterator<Item = &'a Attribute>,
-{
+pub fn find_deprecation(
+ sess: &Session,
+ features: &Features,
+ attrs: &[Attribute],
+) -> Option<(Deprecation, Span)> {
let mut depr: Option<(Deprecation, Span)> = None;
- let is_rustc = sess.features_untracked().staged_api;
+ let is_rustc = features.staged_api;
- 'outer: for attr in attrs_iter {
+ 'outer: for attr in attrs {
if !attr.has_name(sym::deprecated) {
continue;
}
@@ -872,7 +869,7 @@ where
}
}
sym::suggestion => {
- if !sess.features_untracked().deprecated_suggestion {
+ if !features.deprecated_suggestion {
sess.emit_err(session_diagnostics::DeprecatedItemSuggestion {
span: mi.span,
is_nightly: sess.is_nightly_build().then_some(()),
@@ -890,7 +887,7 @@ where
meta.span(),
AttrError::UnknownMetaItem(
pprust::path_to_string(&mi.path),
- if sess.features_untracked().deprecated_suggestion {
+ if features.deprecated_suggestion {
&["since", "note", "suggestion"]
} else {
&["since", "note"]
@@ -1217,3 +1214,20 @@ pub fn parse_alignment(node: &ast::LitKind) -> Result<u32, &'static str> {
Err("not an unsuffixed integer")
}
}
+
+/// Read the content of a `rustc_confusables` attribute, and return the list of candidate names.
+pub fn parse_confusables(attr: &Attribute) -> Option<Vec<Symbol>> {
+ let meta = attr.meta()?;
+ let MetaItem { kind: MetaItemKind::List(ref metas), .. } = meta else { return None };
+
+ let mut candidates = Vec::new();
+
+ for meta in metas {
+ let NestedMetaItem::Lit(meta_lit) = meta else {
+ return None;
+ };
+ candidates.push(meta_lit.symbol);
+ }
+
+ return Some(candidates);
+}
diff --git a/compiler/rustc_borrowck/src/borrowck_errors.rs b/compiler/rustc_borrowck/src/borrowck_errors.rs
index a4e0e773a..a2c7e767b 100644
--- a/compiler/rustc_borrowck/src/borrowck_errors.rs
+++ b/compiler/rustc_borrowck/src/borrowck_errors.rs
@@ -37,8 +37,8 @@ impl<'cx, 'tcx> crate::MirBorrowckCtxt<'cx, 'tcx> {
desc,
);
- err.span_label(borrow_span, format!("{} is borrowed here", borrow_desc));
- err.span_label(span, format!("use of borrowed {}", borrow_desc));
+ err.span_label(borrow_span, format!("{borrow_desc} is borrowed here"));
+ err.span_label(span, format!("use of borrowed {borrow_desc}"));
err
}
@@ -51,8 +51,7 @@ impl<'cx, 'tcx> crate::MirBorrowckCtxt<'cx, 'tcx> {
old_opt_via: &str,
old_load_end_span: Option<Span>,
) -> DiagnosticBuilder<'tcx, ErrorGuaranteed> {
- let via =
- |msg: &str| if msg.is_empty() { "".to_string() } else { format!(" (via {})", msg) };
+ let via = |msg: &str| if msg.is_empty() { "".to_string() } else { format!(" (via {msg})") };
let mut err = struct_span_err!(
self,
new_loan_span,
@@ -143,9 +142,9 @@ impl<'cx, 'tcx> crate::MirBorrowckCtxt<'cx, 'tcx> {
);
err.span_label(
new_loan_span,
- format!("{} construction occurs here{}", container_name, opt_via),
+ format!("{container_name} construction occurs here{opt_via}"),
);
- err.span_label(old_loan_span, format!("borrow occurs here{}", old_opt_via));
+ err.span_label(old_loan_span, format!("borrow occurs here{old_opt_via}"));
if let Some(previous_end_span) = previous_end_span {
err.span_label(previous_end_span, "borrow ends here");
}
@@ -173,13 +172,10 @@ impl<'cx, 'tcx> crate::MirBorrowckCtxt<'cx, 'tcx> {
opt_via,
kind_new,
);
- err.span_label(
- new_loan_span,
- format!("{}borrow occurs here{}", second_borrow_desc, opt_via),
- );
+ err.span_label(new_loan_span, format!("{second_borrow_desc}borrow occurs here{opt_via}"));
err.span_label(
old_loan_span,
- format!("{} construction occurs here{}", container_name, old_opt_via),
+ format!("{container_name} construction occurs here{old_opt_via}"),
);
if let Some(previous_end_span) = previous_end_span {
err.span_label(previous_end_span, "borrow from closure ends here");
@@ -199,8 +195,7 @@ impl<'cx, 'tcx> crate::MirBorrowckCtxt<'cx, 'tcx> {
msg_old: &str,
old_load_end_span: Option<Span>,
) -> DiagnosticBuilder<'cx, ErrorGuaranteed> {
- let via =
- |msg: &str| if msg.is_empty() { "".to_string() } else { format!(" (via {})", msg) };
+ let via = |msg: &str| if msg.is_empty() { "".to_string() } else { format!(" (via {msg})") };
let mut err = struct_span_err!(
self,
span,
@@ -216,22 +211,21 @@ impl<'cx, 'tcx> crate::MirBorrowckCtxt<'cx, 'tcx> {
if msg_new == "" {
// If `msg_new` is empty, then this isn't a borrow of a union field.
- err.span_label(span, format!("{} borrow occurs here", kind_new));
- err.span_label(old_span, format!("{} borrow occurs here", kind_old));
+ err.span_label(span, format!("{kind_new} borrow occurs here"));
+ err.span_label(old_span, format!("{kind_old} borrow occurs here"));
} else {
// If `msg_new` isn't empty, then this a borrow of a union field.
err.span_label(
span,
format!(
- "{} borrow of {} -- which overlaps with {} -- occurs here",
- kind_new, msg_new, msg_old,
+ "{kind_new} borrow of {msg_new} -- which overlaps with {msg_old} -- occurs here",
),
);
err.span_label(old_span, format!("{} borrow occurs here{}", kind_old, via(msg_old)));
}
if let Some(old_load_end_span) = old_load_end_span {
- err.span_label(old_load_end_span, format!("{} borrow ends here", kind_old));
+ err.span_label(old_load_end_span, format!("{kind_old} borrow ends here"));
}
err
}
@@ -250,8 +244,8 @@ impl<'cx, 'tcx> crate::MirBorrowckCtxt<'cx, 'tcx> {
desc,
);
- err.span_label(borrow_span, format!("{} is borrowed here", desc));
- err.span_label(span, format!("{} is assigned to here but it was already borrowed", desc));
+ err.span_label(borrow_span, format!("{desc} is borrowed here"));
+ err.span_label(span, format!("{desc} is assigned to here but it was already borrowed"));
err
}
@@ -330,7 +324,7 @@ impl<'cx, 'tcx> crate::MirBorrowckCtxt<'cx, 'tcx> {
optional_adverb_for_moved: &str,
moved_path: Option<String>,
) -> DiagnosticBuilder<'tcx, ErrorGuaranteed> {
- let moved_path = moved_path.map(|mp| format!(": `{}`", mp)).unwrap_or_default();
+ let moved_path = moved_path.map(|mp| format!(": `{mp}`")).unwrap_or_default();
struct_span_err!(
self,
@@ -369,8 +363,8 @@ impl<'cx, 'tcx> crate::MirBorrowckCtxt<'cx, 'tcx> {
immutable_place,
immutable_section,
);
- err.span_label(mutate_span, format!("cannot {}", action));
- err.span_label(immutable_span, format!("value is immutable in {}", immutable_section));
+ err.span_label(mutate_span, format!("cannot {action}"));
+ err.span_label(immutable_span, format!("value is immutable in {immutable_section}"));
err
}
@@ -428,7 +422,7 @@ impl<'cx, 'tcx> crate::MirBorrowckCtxt<'cx, 'tcx> {
err.span_label(
span,
- format!("{}s a {} data owned by the current function", return_kind, reference_desc),
+ format!("{return_kind}s a {reference_desc} data owned by the current function"),
);
err
@@ -449,8 +443,8 @@ impl<'cx, 'tcx> crate::MirBorrowckCtxt<'cx, 'tcx> {
"{closure_kind} may outlive the current {scope}, but it borrows {borrowed_path}, \
which is owned by the current {scope}",
);
- err.span_label(capture_span, format!("{} is borrowed here", borrowed_path))
- .span_label(closure_span, format!("may outlive borrowed value {}", borrowed_path));
+ err.span_label(capture_span, format!("{borrowed_path} is borrowed here"))
+ .span_label(closure_span, format!("may outlive borrowed value {borrowed_path}"));
err
}
diff --git a/compiler/rustc_borrowck/src/constraint_generation.rs b/compiler/rustc_borrowck/src/constraint_generation.rs
index 743d117e0..1f642099f 100644
--- a/compiler/rustc_borrowck/src/constraint_generation.rs
+++ b/compiler/rustc_borrowck/src/constraint_generation.rs
@@ -7,8 +7,8 @@ use rustc_middle::mir::{
Body, Local, Location, Place, PlaceRef, ProjectionElem, Rvalue, SourceInfo, Statement,
StatementKind, Terminator, TerminatorKind, UserTypeProjection,
};
-use rustc_middle::ty::subst::SubstsRef;
use rustc_middle::ty::visit::TypeVisitable;
+use rustc_middle::ty::GenericArgsRef;
use rustc_middle::ty::{self, RegionVid, Ty, TyCtxt};
use crate::{
@@ -49,11 +49,11 @@ struct ConstraintGeneration<'cg, 'tcx> {
}
impl<'cg, 'tcx> Visitor<'tcx> for ConstraintGeneration<'cg, 'tcx> {
- /// We sometimes have `substs` within an rvalue, or within a
+ /// We sometimes have `args` within an rvalue, or within a
/// call. Make them live at the location where they appear.
- fn visit_substs(&mut self, substs: &SubstsRef<'tcx>, location: Location) {
- self.add_regular_live_constraint(*substs, location);
- self.super_substs(substs);
+ fn visit_args(&mut self, args: &GenericArgsRef<'tcx>, location: Location) {
+ self.add_regular_live_constraint(*args, location);
+ self.super_args(args);
}
/// We sometimes have `region` within an rvalue, or within a
diff --git a/compiler/rustc_borrowck/src/consumers.rs b/compiler/rustc_borrowck/src/consumers.rs
index d25714537..becfa535a 100644
--- a/compiler/rustc_borrowck/src/consumers.rs
+++ b/compiler/rustc_borrowck/src/consumers.rs
@@ -30,7 +30,7 @@ pub use super::{
/// will be retrieved.
#[derive(Debug, Copy, Clone)]
pub enum ConsumerOptions {
- /// Retrieve the [`Body`] along with the [`BorrowSet`](super::borrow_set::BorrowSet)
+ /// Retrieve the [`Body`] along with the [`BorrowSet`]
/// and [`RegionInferenceContext`]. If you would like the body only, use
/// [`TyCtxt::mir_promoted`].
///
diff --git a/compiler/rustc_borrowck/src/dataflow.rs b/compiler/rustc_borrowck/src/dataflow.rs
index 1064b44d2..4ac633c26 100644
--- a/compiler/rustc_borrowck/src/dataflow.rs
+++ b/compiler/rustc_borrowck/src/dataflow.rs
@@ -2,12 +2,14 @@
#![deny(rustc::diagnostic_outside_of_impl)]
use rustc_data_structures::fx::FxIndexMap;
use rustc_index::bit_set::BitSet;
-use rustc_middle::mir::{self, BasicBlock, Body, Location, Place};
+use rustc_middle::mir::{
+ self, BasicBlock, Body, CallReturnPlaces, Location, Place, TerminatorEdges,
+};
use rustc_middle::ty::RegionVid;
use rustc_middle::ty::TyCtxt;
use rustc_mir_dataflow::impls::{EverInitializedPlaces, MaybeUninitializedPlaces};
use rustc_mir_dataflow::ResultsVisitable;
-use rustc_mir_dataflow::{self, fmt::DebugWithContext, CallReturnPlaces, GenKill};
+use rustc_mir_dataflow::{self, fmt::DebugWithContext, GenKill};
use rustc_mir_dataflow::{Analysis, Direction, Results};
use std::fmt;
@@ -334,6 +336,10 @@ impl<'tcx> rustc_mir_dataflow::AnalysisDomain<'tcx> for Borrows<'_, 'tcx> {
impl<'tcx> rustc_mir_dataflow::GenKillAnalysis<'tcx> for Borrows<'_, 'tcx> {
type Idx = BorrowIndex;
+ fn domain_size(&self, _: &mir::Body<'tcx>) -> usize {
+ self.borrow_set.len()
+ }
+
fn before_statement_effect(
&mut self,
trans: &mut impl GenKill<Self::Idx>,
@@ -360,7 +366,7 @@ impl<'tcx> rustc_mir_dataflow::GenKillAnalysis<'tcx> for Borrows<'_, 'tcx> {
return;
}
let index = self.borrow_set.get_index_of(&location).unwrap_or_else(|| {
- panic!("could not find BorrowIndex for location {:?}", location);
+ panic!("could not find BorrowIndex for location {location:?}");
});
trans.gen(index);
@@ -400,12 +406,12 @@ impl<'tcx> rustc_mir_dataflow::GenKillAnalysis<'tcx> for Borrows<'_, 'tcx> {
self.kill_loans_out_of_scope_at_location(trans, location);
}
- fn terminator_effect(
+ fn terminator_effect<'mir>(
&mut self,
- trans: &mut impl GenKill<Self::Idx>,
- terminator: &mir::Terminator<'tcx>,
+ trans: &mut Self::Domain,
+ terminator: &'mir mir::Terminator<'tcx>,
_location: Location,
- ) {
+ ) -> TerminatorEdges<'mir, 'tcx> {
if let mir::TerminatorKind::InlineAsm { operands, .. } = &terminator.kind {
for op in operands {
if let mir::InlineAsmOperand::Out { place: Some(place), .. }
@@ -415,6 +421,7 @@ impl<'tcx> rustc_mir_dataflow::GenKillAnalysis<'tcx> for Borrows<'_, 'tcx> {
}
}
}
+ terminator.edges()
}
fn call_return_effect(
diff --git a/compiler/rustc_borrowck/src/diagnostics/conflict_errors.rs b/compiler/rustc_borrowck/src/diagnostics/conflict_errors.rs
index c8c8b72b3..fe4a45b38 100644
--- a/compiler/rustc_borrowck/src/diagnostics/conflict_errors.rs
+++ b/compiler/rustc_borrowck/src/diagnostics/conflict_errors.rs
@@ -363,21 +363,12 @@ impl<'cx, 'tcx> MirBorrowckCtxt<'cx, 'tcx> {
}
}
let hir = self.infcx.tcx.hir();
- if let Some(hir::Node::Item(hir::Item {
- kind: hir::ItemKind::Fn(_, _, body_id),
- ..
- })) = hir.find(self.mir_hir_id())
- && let Some(hir::Node::Expr(expr)) = hir.find(body_id.hir_id)
- {
+ if let Some(body_id) = hir.maybe_body_owned_by(self.mir_def_id()) {
+ let expr = hir.body(body_id).value;
let place = &self.move_data.move_paths[mpi].place;
- let span = place.as_local()
- .map(|local| self.body.local_decls[local].source_info.span);
- let mut finder = ExpressionFinder {
- expr_span: move_span,
- expr: None,
- pat: None,
- parent_pat: None,
- };
+ let span = place.as_local().map(|local| self.body.local_decls[local].source_info.span);
+ let mut finder =
+ ExpressionFinder { expr_span: move_span, expr: None, pat: None, parent_pat: None };
finder.visit_expr(expr);
if let Some(span) = span && let Some(expr) = finder.expr {
for (_, expr) in hir.parent_iter(expr.hir_id) {
@@ -461,7 +452,7 @@ impl<'cx, 'tcx> MirBorrowckCtxt<'cx, 'tcx> {
} = move_spans {
// We already suggest cloning for these cases in `explain_captures`.
} else {
- self.suggest_cloning(err, ty, move_span);
+ self.suggest_cloning(err, ty, expr, move_span);
}
}
}
@@ -662,7 +653,7 @@ impl<'cx, 'tcx> MirBorrowckCtxt<'cx, 'tcx> {
err.span_suggestion_verbose(
sugg_span.shrink_to_hi(),
"consider assigning a value",
- format!(" = {}", assign_value),
+ format!(" = {assign_value}"),
Applicability::MaybeIncorrect,
);
}
@@ -702,11 +693,11 @@ impl<'cx, 'tcx> MirBorrowckCtxt<'cx, 'tcx> {
.iter()
.copied()
.find_map(find_fn_kind_from_did),
- ty::Alias(ty::Opaque, ty::AliasTy { def_id, substs, .. }) => tcx
+ ty::Alias(ty::Opaque, ty::AliasTy { def_id, args, .. }) => tcx
.explicit_item_bounds(def_id)
- .subst_iter_copied(tcx, substs)
+ .iter_instantiated_copied(tcx, args)
.find_map(|(clause, span)| find_fn_kind_from_did((clause, span))),
- ty::Closure(_, substs) => match substs.as_closure().kind() {
+ ty::Closure(_, args) => match args.as_closure().kind() {
ty::ClosureKind::Fn => Some(hir::Mutability::Not),
ty::ClosureKind::FnMut => Some(hir::Mutability::Mut),
_ => None,
@@ -714,7 +705,9 @@ impl<'cx, 'tcx> MirBorrowckCtxt<'cx, 'tcx> {
_ => None,
};
- let Some(borrow_level) = borrow_level else { return false; };
+ let Some(borrow_level) = borrow_level else {
+ return false;
+ };
let sugg = move_sites
.iter()
.map(|move_site| {
@@ -734,9 +727,21 @@ impl<'cx, 'tcx> MirBorrowckCtxt<'cx, 'tcx> {
true
}
- fn suggest_cloning(&self, err: &mut Diagnostic, ty: Ty<'tcx>, span: Span) {
+ fn suggest_cloning(
+ &self,
+ err: &mut Diagnostic,
+ ty: Ty<'tcx>,
+ expr: &hir::Expr<'_>,
+ span: Span,
+ ) {
let tcx = self.infcx.tcx;
// Try to find predicates on *generic params* that would allow copying `ty`
+ let suggestion =
+ if let Some(symbol) = tcx.hir().maybe_get_struct_pattern_shorthand_field(expr) {
+ format!(": {symbol}.clone()")
+ } else {
+ ".clone()".to_owned()
+ };
if let Some(clone_trait_def) = tcx.lang_items().clone_trait()
&& self.infcx
.type_implements_trait(
@@ -746,10 +751,20 @@ impl<'cx, 'tcx> MirBorrowckCtxt<'cx, 'tcx> {
)
.must_apply_modulo_regions()
{
+ let msg = if let ty::Adt(def, _) = ty.kind()
+ && [
+ tcx.get_diagnostic_item(sym::Arc),
+ tcx.get_diagnostic_item(sym::Rc),
+ ].contains(&Some(def.did()))
+ {
+ "clone the value to increment its reference count"
+ } else {
+ "consider cloning the value if the performance cost is acceptable"
+ };
err.span_suggestion_verbose(
span.shrink_to_hi(),
- "consider cloning the value if the performance cost is acceptable",
- ".clone()",
+ msg,
+ suggestion,
Applicability::MachineApplicable,
);
}
@@ -763,7 +778,9 @@ impl<'cx, 'tcx> MirBorrowckCtxt<'cx, 'tcx> {
.typeck_root_def_id(self.mir_def_id().to_def_id())
.as_local()
.and_then(|def_id| tcx.hir().get_generics(def_id))
- else { return; };
+ else {
+ return;
+ };
// Try to find predicates on *generic params* that would allow copying `ty`
let ocx = ObligationCtxt::new(&self.infcx);
let copy_did = tcx.require_lang_item(LangItem::Copy, Some(span));
@@ -1155,8 +1172,7 @@ impl<'cx, 'tcx> MirBorrowckCtxt<'cx, 'tcx> {
if union_type_name != "" {
err.note(format!(
- "{} is a field of the union `{}`, so it overlaps the field {}",
- msg_place, union_type_name, msg_borrow,
+ "{msg_place} is a field of the union `{union_type_name}`, so it overlaps the field {msg_borrow}",
));
}
@@ -1220,18 +1236,20 @@ impl<'cx, 'tcx> MirBorrowckCtxt<'cx, 'tcx> {
return;
};
let inner_param_uses = find_all_local_uses::find(self.body, inner_param.local);
- let Some((inner_call_loc, inner_call_term)) = inner_param_uses.into_iter().find_map(|loc| {
- let Either::Right(term) = self.body.stmt_at(loc) else {
- debug!("{:?} is a statement, so it can't be a call", loc);
- return None;
- };
- let TerminatorKind::Call { args, .. } = &term.kind else {
- debug!("not a call: {:?}", term);
- return None;
- };
- debug!("checking call args for uses of inner_param: {:?}", args);
- args.contains(&Operand::Move(inner_param)).then_some((loc, term))
- }) else {
+ let Some((inner_call_loc, inner_call_term)) =
+ inner_param_uses.into_iter().find_map(|loc| {
+ let Either::Right(term) = self.body.stmt_at(loc) else {
+ debug!("{:?} is a statement, so it can't be a call", loc);
+ return None;
+ };
+ let TerminatorKind::Call { args, .. } = &term.kind else {
+ debug!("not a call: {:?}", term);
+ return None;
+ };
+ debug!("checking call args for uses of inner_param: {:?}", args);
+ args.contains(&Operand::Move(inner_param)).then_some((loc, term))
+ })
+ else {
debug!("no uses of inner_param found as a by-move call arg");
return;
};
@@ -1344,8 +1362,7 @@ impl<'cx, 'tcx> MirBorrowckCtxt<'cx, 'tcx> {
let Some(trait_did) = tcx.trait_of_item(def_id) &&
tcx.is_diagnostic_item(sym::Iterator, trait_did) {
err.note(format!(
- "a for loop advances the iterator for you, the result is stored in `{}`.",
- loop_bind
+ "a for loop advances the iterator for you, the result is stored in `{loop_bind}`."
));
err.help("if you want to call `next` on a iterator within the loop, consider using `while let`.");
}
@@ -1442,21 +1459,24 @@ impl<'cx, 'tcx> MirBorrowckCtxt<'cx, 'tcx> {
}
// Get closure's arguments
- let ty::Closure(_, substs) = typeck_results.expr_ty(closure_expr).kind() else { /* hir::Closure can be a generator too */ return };
- let sig = substs.as_closure().sig();
+ let ty::Closure(_, args) = typeck_results.expr_ty(closure_expr).kind() else {
+ /* hir::Closure can be a generator too */
+ return;
+ };
+ let sig = args.as_closure().sig();
let tupled_params =
tcx.erase_late_bound_regions(sig.inputs().iter().next().unwrap().map_bound(|&b| b));
let ty::Tuple(params) = tupled_params.kind() else { return };
// Find the first argument with a matching type, get its name
- let Some((_, this_name)) = params
- .iter()
- .zip(hir.body_param_names(closure.body))
- .find(|(param_ty, name)|{
+ let Some((_, this_name)) =
+ params.iter().zip(hir.body_param_names(closure.body)).find(|(param_ty, name)| {
// FIXME: also support deref for stuff like `Rc` arguments
param_ty.peel_refs() == local_ty && name != &Ident::empty()
})
- else { return };
+ else {
+ return;
+ };
let spans;
if let Some((_path_expr, qpath)) = finder.error_path
@@ -1813,7 +1833,7 @@ impl<'cx, 'tcx> MirBorrowckCtxt<'cx, 'tcx> {
},
ConstraintCategory::CallArgument(None),
var_or_use_span,
- &format!("`{}`", name),
+ &format!("`{name}`"),
"block",
),
(
@@ -1835,7 +1855,7 @@ impl<'cx, 'tcx> MirBorrowckCtxt<'cx, 'tcx> {
region_name,
category,
span,
- &format!("`{}`", name),
+ &format!("`{name}`"),
"function",
),
(
@@ -1909,14 +1929,14 @@ impl<'cx, 'tcx> MirBorrowckCtxt<'cx, 'tcx> {
}
}
- let mut err = self.path_does_not_live_long_enough(borrow_span, &format!("`{}`", name));
+ let mut err = self.path_does_not_live_long_enough(borrow_span, &format!("`{name}`"));
if let Some(annotation) = self.annotate_argument_and_return_for_borrow(borrow) {
let region_name = annotation.emit(self, &mut err);
err.span_label(
borrow_span,
- format!("`{}` would have to be valid for `{}`...", name, region_name),
+ format!("`{name}` would have to be valid for `{region_name}`..."),
);
err.span_label(
@@ -1927,7 +1947,7 @@ impl<'cx, 'tcx> MirBorrowckCtxt<'cx, 'tcx> {
self.infcx
.tcx
.opt_item_name(self.mir_def_id().to_def_id())
- .map(|name| format!("function `{}`", name))
+ .map(|name| format!("function `{name}`"))
.unwrap_or_else(|| {
match &self.infcx.tcx.def_kind(self.mir_def_id()) {
DefKind::Closure => "enclosing closure",
@@ -1962,7 +1982,7 @@ impl<'cx, 'tcx> MirBorrowckCtxt<'cx, 'tcx> {
}
} else {
err.span_label(borrow_span, "borrowed value does not live long enough");
- err.span_label(drop_span, format!("`{}` dropped here while still borrowed", name));
+ err.span_label(drop_span, format!("`{name}` dropped here while still borrowed"));
borrow_spans.args_subdiag(&mut err, |args_span| {
crate::session_diagnostics::CaptureArgLabel::Capture {
@@ -2006,22 +2026,17 @@ impl<'cx, 'tcx> MirBorrowckCtxt<'cx, 'tcx> {
let mut err = self.cannot_borrow_across_destructor(borrow_span);
let what_was_dropped = match self.describe_place(place.as_ref()) {
- Some(name) => format!("`{}`", name),
+ Some(name) => format!("`{name}`"),
None => String::from("temporary value"),
};
let label = match self.describe_place(borrow.borrowed_place.as_ref()) {
Some(borrowed) => format!(
- "here, drop of {D} needs exclusive access to `{B}`, \
- because the type `{T}` implements the `Drop` trait",
- D = what_was_dropped,
- T = dropped_ty,
- B = borrowed
+ "here, drop of {what_was_dropped} needs exclusive access to `{borrowed}`, \
+ because the type `{dropped_ty}` implements the `Drop` trait"
),
None => format!(
- "here is drop of {D}; whose type `{T}` implements the `Drop` trait",
- D = what_was_dropped,
- T = dropped_ty
+ "here is drop of {what_was_dropped}; whose type `{dropped_ty}` implements the `Drop` trait"
),
};
err.span_label(drop_span, label);
@@ -2128,13 +2143,14 @@ impl<'cx, 'tcx> MirBorrowckCtxt<'cx, 'tcx> {
self.current -= 1;
}
fn visit_expr(&mut self, expr: &hir::Expr<'tcx>) {
- if self.span == expr.span {
+ if self.span == expr.span.source_callsite() {
self.found = self.current;
}
walk_expr(self, expr);
}
}
let source_info = self.body.source_info(location);
+ let proper_span = proper_span.source_callsite();
if let Some(scope) = self.body.source_scopes.get(source_info.scope)
&& let ClearCrossCrate::Set(scope_data) = &scope.local_data
&& let Some(node) = self.infcx.tcx.hir().find(scope_data.lint_root)
@@ -2233,10 +2249,7 @@ impl<'cx, 'tcx> MirBorrowckCtxt<'cx, 'tcx> {
} else {
"local data "
};
- (
- format!("{}`{}`", local_kind, place_desc),
- format!("`{}` is borrowed here", place_desc),
- )
+ (format!("{local_kind}`{place_desc}`"), format!("`{place_desc}` is borrowed here"))
} else {
let root_place =
self.prefixes(borrow.borrowed_place.as_ref(), PrefixSet::All).last().unwrap();
@@ -2338,9 +2351,8 @@ impl<'cx, 'tcx> MirBorrowckCtxt<'cx, 'tcx> {
err.span_suggestion_verbose(
sugg_span,
format!(
- "to force the {} to take ownership of {} (and any \
- other referenced variables), use the `move` keyword",
- kind, captured_var
+ "to force the {kind} to take ownership of {captured_var} (and any \
+ other referenced variables), use the `move` keyword"
),
suggestion,
Applicability::MachineApplicable,
@@ -2348,7 +2360,7 @@ impl<'cx, 'tcx> MirBorrowckCtxt<'cx, 'tcx> {
match category {
ConstraintCategory::Return(_) | ConstraintCategory::OpaqueType => {
- let msg = format!("{} is returned here", kind);
+ let msg = format!("{kind} is returned here");
err.span_note(constraint_span, msg);
}
ConstraintCategory::CallArgument(_) => {
@@ -2390,21 +2402,18 @@ impl<'cx, 'tcx> MirBorrowckCtxt<'cx, 'tcx> {
err.span_label(
upvar_span,
- format!("`{}` declared here, outside of the {} body", upvar_name, escapes_from),
+ format!("`{upvar_name}` declared here, outside of the {escapes_from} body"),
);
- err.span_label(borrow_span, format!("borrow is only valid in the {} body", escapes_from));
+ err.span_label(borrow_span, format!("borrow is only valid in the {escapes_from} body"));
if let Some(name) = name {
err.span_label(
escape_span,
- format!("reference to `{}` escapes the {} body here", name, escapes_from),
+ format!("reference to `{name}` escapes the {escapes_from} body here"),
);
} else {
- err.span_label(
- escape_span,
- format!("reference escapes the {} body here", escapes_from),
- );
+ err.span_label(escape_span, format!("reference escapes the {escapes_from} body here"));
}
err
@@ -2667,7 +2676,7 @@ impl<'cx, 'tcx> MirBorrowckCtxt<'cx, 'tcx> {
kind: TerminatorKind::Call { call_source: CallSource::OverloadedOperator, .. },
..
}),
- Some((method_did, method_substs)),
+ Some((method_did, method_args)),
) = (
&self.body[loan.reserve_location.block].terminator,
rustc_middle::util::find_self_call(
@@ -2680,15 +2689,12 @@ impl<'cx, 'tcx> MirBorrowckCtxt<'cx, 'tcx> {
if tcx.is_diagnostic_item(sym::deref_method, method_did) {
let deref_target =
tcx.get_diagnostic_item(sym::deref_target).and_then(|deref_target| {
- Instance::resolve(tcx, self.param_env, deref_target, method_substs)
+ Instance::resolve(tcx, self.param_env, deref_target, method_args)
.transpose()
});
if let Some(Ok(instance)) = deref_target {
let deref_target_ty = instance.ty(tcx, self.param_env);
- err.note(format!(
- "borrow occurs due to deref coercion to `{}`",
- deref_target_ty
- ));
+ err.note(format!("borrow occurs due to deref coercion to `{deref_target_ty}`"));
err.span_note(tcx.def_span(instance.def_id()), "deref defined here");
}
}
@@ -2744,7 +2750,7 @@ impl<'cx, 'tcx> MirBorrowckCtxt<'cx, 'tcx> {
"cannot assign twice to immutable variable"
};
if span != assigned_span && !from_arg {
- err.span_label(assigned_span, format!("first assignment to {}", place_description));
+ err.span_label(assigned_span, format!("first assignment to {place_description}"));
}
if let Some(decl) = local_decl
&& let Some(name) = local_name
@@ -2753,7 +2759,7 @@ impl<'cx, 'tcx> MirBorrowckCtxt<'cx, 'tcx> {
err.span_suggestion(
decl.source_info.span,
"consider making this binding mutable",
- format!("mut {}", name),
+ format!("mut {name}"),
Applicability::MachineApplicable,
);
}
@@ -2847,11 +2853,11 @@ impl<'cx, 'tcx> MirBorrowckCtxt<'cx, 'tcx> {
if is_closure {
None
} else {
- let ty = self.infcx.tcx.type_of(self.mir_def_id()).subst_identity();
+ let ty = self.infcx.tcx.type_of(self.mir_def_id()).instantiate_identity();
match ty.kind() {
ty::FnDef(_, _) | ty::FnPtr(_) => self.annotate_fn_sig(
self.mir_def_id(),
- self.infcx.tcx.fn_sig(self.mir_def_id()).subst_identity(),
+ self.infcx.tcx.fn_sig(self.mir_def_id()).instantiate_identity(),
),
_ => None,
}
@@ -2893,13 +2899,15 @@ impl<'cx, 'tcx> MirBorrowckCtxt<'cx, 'tcx> {
);
// Check if our `target` was captured by a closure.
if let Rvalue::Aggregate(
- box AggregateKind::Closure(def_id, substs),
+ box AggregateKind::Closure(def_id, args),
operands,
) = rvalue
{
let def_id = def_id.expect_local();
for operand in operands {
- let (Operand::Copy(assigned_from) | Operand::Move(assigned_from)) = operand else {
+ let (Operand::Copy(assigned_from) | Operand::Move(assigned_from)) =
+ operand
+ else {
continue;
};
debug!(
@@ -2908,7 +2916,9 @@ impl<'cx, 'tcx> MirBorrowckCtxt<'cx, 'tcx> {
);
// Find the local from the operand.
- let Some(assigned_from_local) = assigned_from.local_or_deref_local() else {
+ let Some(assigned_from_local) =
+ assigned_from.local_or_deref_local()
+ else {
continue;
};
@@ -2920,7 +2930,7 @@ impl<'cx, 'tcx> MirBorrowckCtxt<'cx, 'tcx> {
// into a place then we should annotate the closure in
// case it ends up being assigned into the return place.
annotated_closure =
- self.annotate_fn_sig(def_id, substs.as_closure().sig());
+ self.annotate_fn_sig(def_id, args.as_closure().sig());
debug!(
"annotate_argument_and_return_for_borrow: \
annotated_closure={:?} assigned_from_local={:?} \
@@ -2961,7 +2971,9 @@ impl<'cx, 'tcx> MirBorrowckCtxt<'cx, 'tcx> {
);
// Find the local from the rvalue.
- let Some(assigned_from_local) = assigned_from.local_or_deref_local() else { continue };
+ let Some(assigned_from_local) = assigned_from.local_or_deref_local() else {
+ continue;
+ };
debug!(
"annotate_argument_and_return_for_borrow: \
assigned_from_local={:?}",
@@ -3009,7 +3021,8 @@ impl<'cx, 'tcx> MirBorrowckCtxt<'cx, 'tcx> {
assigned_to, args
);
for operand in args {
- let (Operand::Copy(assigned_from) | Operand::Move(assigned_from)) = operand else {
+ let (Operand::Copy(assigned_from) | Operand::Move(assigned_from)) = operand
+ else {
continue;
};
debug!(
@@ -3207,7 +3220,7 @@ impl<'tcx> AnnotatedBorrowFnSignature<'tcx> {
return_span,
} => {
let argument_ty_name = cx.get_name_for_ty(argument_ty, 0);
- diag.span_label(argument_span, format!("has type `{}`", argument_ty_name));
+ diag.span_label(argument_span, format!("has type `{argument_ty_name}`"));
let return_ty_name = cx.get_name_for_ty(return_ty, 0);
let types_equal = return_ty_name == argument_ty_name;
@@ -3234,15 +3247,14 @@ impl<'tcx> AnnotatedBorrowFnSignature<'tcx> {
// Region of return type and arguments checked to be the same earlier.
let region_name = cx.get_region_name_for_ty(*return_ty, 0);
for (_, argument_span) in arguments {
- diag.span_label(*argument_span, format!("has lifetime `{}`", region_name));
+ diag.span_label(*argument_span, format!("has lifetime `{region_name}`"));
}
- diag.span_label(*return_span, format!("also has lifetime `{}`", region_name,));
+ diag.span_label(*return_span, format!("also has lifetime `{region_name}`",));
diag.help(format!(
- "use data from the highlighted arguments which match the `{}` lifetime of \
+ "use data from the highlighted arguments which match the `{region_name}` lifetime of \
the return type",
- region_name,
));
region_name
diff --git a/compiler/rustc_borrowck/src/diagnostics/explain_borrow.rs b/compiler/rustc_borrowck/src/diagnostics/explain_borrow.rs
index 225c38efb..c66a24473 100644
--- a/compiler/rustc_borrowck/src/diagnostics/explain_borrow.rs
+++ b/compiler/rustc_borrowck/src/diagnostics/explain_borrow.rs
@@ -79,7 +79,7 @@ impl<'tcx> BorrowExplanation<'tcx> {
| hir::ExprKind::Unary(hir::UnOp::Deref, inner)
| hir::ExprKind::Field(inner, _)
| hir::ExprKind::MethodCall(_, inner, _, _)
- | hir::ExprKind::Index(inner, _) = &expr.kind
+ | hir::ExprKind::Index(inner, _, _) = &expr.kind
{
expr = inner;
}
@@ -168,17 +168,17 @@ impl<'tcx> BorrowExplanation<'tcx> {
let local_decl = &body.local_decls[dropped_local];
let mut ty = local_decl.ty;
if local_decl.source_info.span.desugaring_kind() == Some(DesugaringKind::ForLoop) {
- if let ty::Adt(adt, substs) = local_decl.ty.kind() {
+ if let ty::Adt(adt, args) = local_decl.ty.kind() {
if tcx.is_diagnostic_item(sym::Option, adt.did()) {
// in for loop desugaring, only look at the `Some(..)` inner type
- ty = substs.type_at(0);
+ ty = args.type_at(0);
}
}
}
let (dtor_desc, type_desc) = match ty.kind() {
// If type is an ADT that implements Drop, then
// simplify output by reporting just the ADT name.
- ty::Adt(adt, _substs) if adt.has_dtor(tcx) && !adt.is_box() => {
+ ty::Adt(adt, _args) if adt.has_dtor(tcx) && !adt.is_box() => {
("`Drop` code", format!("type `{}`", tcx.def_path_str(adt.did())))
}
diff --git a/compiler/rustc_borrowck/src/diagnostics/mod.rs b/compiler/rustc_borrowck/src/diagnostics/mod.rs
index d292611e6..099e07e88 100644
--- a/compiler/rustc_borrowck/src/diagnostics/mod.rs
+++ b/compiler/rustc_borrowck/src/diagnostics/mod.rs
@@ -732,18 +732,18 @@ impl<'tcx> BorrowedContentSource<'tcx> {
fn from_call(func: Ty<'tcx>, tcx: TyCtxt<'tcx>) -> Option<Self> {
match *func.kind() {
- ty::FnDef(def_id, substs) => {
+ ty::FnDef(def_id, args) => {
let trait_id = tcx.trait_of_item(def_id)?;
let lang_items = tcx.lang_items();
if Some(trait_id) == lang_items.deref_trait()
|| Some(trait_id) == lang_items.deref_mut_trait()
{
- Some(BorrowedContentSource::OverloadedDeref(substs.type_at(0)))
+ Some(BorrowedContentSource::OverloadedDeref(args.type_at(0)))
} else if Some(trait_id) == lang_items.index_trait()
|| Some(trait_id) == lang_items.index_mut_trait()
{
- Some(BorrowedContentSource::OverloadedIndex(substs.type_at(0)))
+ Some(BorrowedContentSource::OverloadedIndex(args.type_at(0)))
} else {
None
}
@@ -847,14 +847,12 @@ impl<'cx, 'tcx> MirBorrowckCtxt<'cx, 'tcx> {
kind: TerminatorKind::Call { fn_span, call_source, .. }, ..
}) = &self.body[location.block].terminator
{
- let Some((method_did, method_substs)) =
- rustc_middle::util::find_self_call(
- self.infcx.tcx,
- &self.body,
- target_temp,
- location.block,
- )
- else {
+ let Some((method_did, method_args)) = rustc_middle::util::find_self_call(
+ self.infcx.tcx,
+ &self.body,
+ target_temp,
+ location.block,
+ ) else {
return normal_ret;
};
@@ -862,7 +860,7 @@ impl<'cx, 'tcx> MirBorrowckCtxt<'cx, 'tcx> {
self.infcx.tcx,
self.param_env,
method_did,
- method_substs,
+ method_args,
*fn_span,
call_source.from_hir_call(),
Some(self.infcx.tcx.fn_arg_names(method_did)[0]),
@@ -1041,7 +1039,7 @@ impl<'cx, 'tcx> MirBorrowckCtxt<'cx, 'tcx> {
});
}
}
- CallKind::Normal { self_arg, desugaring, method_did, method_substs } => {
+ CallKind::Normal { self_arg, desugaring, method_did, method_args } => {
let self_arg = self_arg.unwrap();
let tcx = self.infcx.tcx;
if let Some((CallDesugaringKind::ForLoopIntoIter, _)) = desugaring {
@@ -1108,20 +1106,20 @@ impl<'cx, 'tcx> MirBorrowckCtxt<'cx, 'tcx> {
// Erase and shadow everything that could be passed to the new infcx.
let ty = moved_place.ty(self.body, tcx).ty;
- if let ty::Adt(def, substs) = ty.kind()
+ if let ty::Adt(def, args) = ty.kind()
&& Some(def.did()) == tcx.lang_items().pin_type()
- && let ty::Ref(_, _, hir::Mutability::Mut) = substs.type_at(0).kind()
+ && let ty::Ref(_, _, hir::Mutability::Mut) = args.type_at(0).kind()
&& let self_ty = self.infcx.instantiate_binder_with_fresh_vars(
fn_call_span,
LateBoundRegionConversionTime::FnCall,
- tcx.fn_sig(method_did).subst(tcx, method_substs).input(0),
+ tcx.fn_sig(method_did).instantiate(tcx, method_args).input(0),
)
&& self.infcx.can_eq(self.param_env, ty, self_ty)
{
err.eager_subdiagnostic(
&self.infcx.tcx.sess.parse_sess.span_diagnostic,
CaptureReasonSuggest::FreshReborrow {
- span: fn_call_span.shrink_to_lo(),
+ span: move_span.shrink_to_hi(),
});
}
if let Some(clone_trait) = tcx.lang_items().clone_trait()
@@ -1135,10 +1133,10 @@ impl<'cx, 'tcx> MirBorrowckCtxt<'cx, 'tcx> {
&& self.infcx.predicate_must_hold_modulo_regions(&o)
{
err.span_suggestion_verbose(
- fn_call_span.shrink_to_lo(),
+ move_span.shrink_to_hi(),
"you can `clone` the value and consume it, but this might not be \
your desired behavior",
- "clone().".to_string(),
+ ".clone()".to_string(),
Applicability::MaybeIncorrect,
);
}
@@ -1163,7 +1161,7 @@ impl<'cx, 'tcx> MirBorrowckCtxt<'cx, 'tcx> {
let parent_self_ty =
matches!(tcx.def_kind(parent_did), rustc_hir::def::DefKind::Impl { .. })
.then_some(parent_did)
- .and_then(|did| match tcx.type_of(did).subst_identity().kind() {
+ .and_then(|did| match tcx.type_of(did).instantiate_identity().kind() {
ty::Adt(def, ..) => Some(def.did()),
_ => None,
});
diff --git a/compiler/rustc_borrowck/src/diagnostics/move_errors.rs b/compiler/rustc_borrowck/src/diagnostics/move_errors.rs
index 8b77477a3..e05c04e11 100644
--- a/compiler/rustc_borrowck/src/diagnostics/move_errors.rs
+++ b/compiler/rustc_borrowck/src/diagnostics/move_errors.rs
@@ -184,7 +184,9 @@ impl<'a, 'tcx> MirBorrowckCtxt<'a, 'tcx> {
}
// Error with the pattern
LookupResult::Exact(_) => {
- let LookupResult::Parent(Some(mpi)) = self.move_data.rev_lookup.find(move_from.as_ref()) else {
+ let LookupResult::Parent(Some(mpi)) =
+ self.move_data.rev_lookup.find(move_from.as_ref())
+ else {
// move_from should be a projection from match_place.
unreachable!("Probably not unreachable...");
};
@@ -322,10 +324,10 @@ impl<'a, 'tcx> MirBorrowckCtxt<'a, 'tcx> {
ty::Array(..) | ty::Slice(..) => {
self.cannot_move_out_of_interior_noncopy(span, ty, None)
}
- ty::Closure(def_id, closure_substs)
+ ty::Closure(def_id, closure_args)
if def_id.as_local() == Some(self.mir_def_id()) && upvar_field.is_some() =>
{
- let closure_kind_ty = closure_substs.as_closure().kind_ty();
+ let closure_kind_ty = closure_args.as_closure().kind_ty();
let closure_kind = match closure_kind_ty.to_opt_closure_kind() {
Some(kind @ (ty::ClosureKind::Fn | ty::ClosureKind::FnMut)) => kind,
Some(ty::ClosureKind::FnOnce) => {
@@ -494,8 +496,10 @@ impl<'a, 'tcx> MirBorrowckCtxt<'a, 'tcx> {
if let LocalInfo::User(BindingForm::Var(VarBindingForm { pat_span, .. })) =
*bind_to.local_info()
{
- let Ok(pat_snippet) =
- self.infcx.tcx.sess.source_map().span_to_snippet(pat_span) else { continue; };
+ let Ok(pat_snippet) = self.infcx.tcx.sess.source_map().span_to_snippet(pat_span)
+ else {
+ continue;
+ };
let Some(stripped) = pat_snippet.strip_prefix('&') else {
suggestions.push((
bind_to.source_info.span.shrink_to_lo(),
diff --git a/compiler/rustc_borrowck/src/diagnostics/mutability_errors.rs b/compiler/rustc_borrowck/src/diagnostics/mutability_errors.rs
index 1f2fefadf..d62541daf 100644
--- a/compiler/rustc_borrowck/src/diagnostics/mutability_errors.rs
+++ b/compiler/rustc_borrowck/src/diagnostics/mutability_errors.rs
@@ -2,7 +2,6 @@ use rustc_errors::{Applicability, Diagnostic, DiagnosticBuilder, ErrorGuaranteed
use rustc_hir as hir;
use rustc_hir::intravisit::Visitor;
use rustc_hir::Node;
-use rustc_middle::hir::map::Map;
use rustc_middle::mir::{Mutability, Place, PlaceRef, ProjectionElem};
use rustc_middle::ty::{self, Ty, TyCtxt};
use rustc_middle::{
@@ -568,7 +567,7 @@ impl<'a, 'tcx> MirBorrowckCtxt<'a, 'tcx> {
}
};
if let hir::ExprKind::Assign(place, rv, _sp) = expr.kind
- && let hir::ExprKind::Index(val, index) = place.kind
+ && let hir::ExprKind::Index(val, index, _) = place.kind
&& (expr.span == self.assign_span || place.span == self.assign_span)
{
// val[index] = rv;
@@ -621,7 +620,7 @@ impl<'a, 'tcx> MirBorrowckCtxt<'a, 'tcx> {
);
self.suggested = true;
} else if let hir::ExprKind::MethodCall(_path, receiver, _, sp) = expr.kind
- && let hir::ExprKind::Index(val, index) = receiver.kind
+ && let hir::ExprKind::Index(val, index, _) = receiver.kind
&& expr.span == self.assign_span
{
// val[index].path(args..);
@@ -646,10 +645,8 @@ impl<'a, 'tcx> MirBorrowckCtxt<'a, 'tcx> {
}
let hir_map = self.infcx.tcx.hir();
let def_id = self.body.source.def_id();
- let hir_id = hir_map.local_def_id_to_hir_id(def_id.as_local().unwrap());
- let node = hir_map.find(hir_id);
- let Some(hir::Node::Item(item)) = node else { return; };
- let hir::ItemKind::Fn(.., body_id) = item.kind else { return; };
+ let Some(local_def_id) = def_id.as_local() else { return };
+ let Some(body_id) = hir_map.maybe_body_owned_by(local_def_id) else { return };
let body = self.infcx.tcx.hir().body(body_id);
let mut v = V { assign_span: span, err, ty, suggested: false };
@@ -786,23 +783,12 @@ impl<'a, 'tcx> MirBorrowckCtxt<'a, 'tcx> {
// In the future, attempt in all path but initially for RHS of for_loop
fn suggest_similar_mut_method_for_for_loop(&self, err: &mut Diagnostic) {
use hir::{
- BodyId, Expr,
+ Expr,
ExprKind::{Block, Call, DropTemps, Match, MethodCall},
- HirId, ImplItem, ImplItemKind, Item, ItemKind,
};
- fn maybe_body_id_of_fn(hir_map: Map<'_>, id: HirId) -> Option<BodyId> {
- match hir_map.find(id) {
- Some(Node::Item(Item { kind: ItemKind::Fn(_, _, body_id), .. }))
- | Some(Node::ImplItem(ImplItem { kind: ImplItemKind::Fn(_, body_id), .. })) => {
- Some(*body_id)
- }
- _ => None,
- }
- }
let hir_map = self.infcx.tcx.hir();
- let mir_body_hir_id = self.mir_hir_id();
- if let Some(fn_body_id) = maybe_body_id_of_fn(hir_map, mir_body_hir_id) {
+ if let Some(body_id) = hir_map.maybe_body_owned_by(self.mir_def_id()) {
if let Block(
hir::Block {
expr:
@@ -836,7 +822,7 @@ impl<'a, 'tcx> MirBorrowckCtxt<'a, 'tcx> {
..
},
_,
- ) = hir_map.body(fn_body_id).value.kind
+ ) = hir_map.body(body_id).value.kind
{
let opt_suggestions = self
.infcx
@@ -1098,46 +1084,45 @@ impl<'a, 'tcx> MirBorrowckCtxt<'a, 'tcx> {
}
let hir_map = self.infcx.tcx.hir();
let def_id = self.body.source.def_id();
- let hir_id = hir_map.local_def_id_to_hir_id(def_id.expect_local());
- let node = hir_map.find(hir_id);
- let hir_id = if let Some(hir::Node::Item(item)) = node
- && let hir::ItemKind::Fn(.., body_id) = item.kind
- {
- let body = hir_map.body(body_id);
- let mut v = BindingFinder {
- span: err_label_span,
- hir_id: None,
+ let hir_id = if let Some(local_def_id) = def_id.as_local() &&
+ let Some(body_id) = hir_map.maybe_body_owned_by(local_def_id)
+ {
+ let body = hir_map.body(body_id);
+ let mut v = BindingFinder {
+ span: err_label_span,
+ hir_id: None,
+ };
+ v.visit_body(body);
+ v.hir_id
+ } else {
+ None
};
- v.visit_body(body);
- v.hir_id
- } else {
- None
- };
+
if let Some(hir_id) = hir_id
&& let Some(hir::Node::Local(local)) = hir_map.find(hir_id)
- {
- let (changing, span, sugg) = match local.ty {
- Some(ty) => ("changing", ty.span, message),
- None => (
- "specifying",
- local.pat.span.shrink_to_hi(),
- format!(": {message}"),
- ),
- };
- err.span_suggestion_verbose(
- span,
- format!("consider {changing} this binding's type"),
- sugg,
- Applicability::HasPlaceholders,
- );
- } else {
- err.span_label(
- err_label_span,
- format!(
- "consider changing this binding's type to be: `{message}`"
- ),
- );
- }
+ {
+ let (changing, span, sugg) = match local.ty {
+ Some(ty) => ("changing", ty.span, message),
+ None => (
+ "specifying",
+ local.pat.span.shrink_to_hi(),
+ format!(": {message}"),
+ ),
+ };
+ err.span_suggestion_verbose(
+ span,
+ format!("consider {changing} this binding's type"),
+ sugg,
+ Applicability::HasPlaceholders,
+ );
+ } else {
+ err.span_label(
+ err_label_span,
+ format!(
+ "consider changing this binding's type to be: `{message}`"
+ ),
+ );
+ }
}
None => {}
}
diff --git a/compiler/rustc_borrowck/src/diagnostics/region_errors.rs b/compiler/rustc_borrowck/src/diagnostics/region_errors.rs
index 617c85174..2ea399789 100644
--- a/compiler/rustc_borrowck/src/diagnostics/region_errors.rs
+++ b/compiler/rustc_borrowck/src/diagnostics/region_errors.rs
@@ -22,7 +22,7 @@ use rustc_infer::infer::{
};
use rustc_middle::hir::place::PlaceBase;
use rustc_middle::mir::{ConstraintCategory, ReturnConstraint};
-use rustc_middle::ty::subst::InternalSubsts;
+use rustc_middle::ty::GenericArgs;
use rustc_middle::ty::TypeVisitor;
use rustc_middle::ty::{self, RegionVid, Ty};
use rustc_middle::ty::{Region, TyCtxt};
@@ -183,9 +183,9 @@ impl<'a, 'tcx> MirBorrowckCtxt<'a, 'tcx> {
fn is_closure_fn_mut(&self, fr: RegionVid) -> bool {
if let Some(ty::ReFree(free_region)) = self.to_error_region(fr).as_deref()
&& let ty::BoundRegionKind::BrEnv = free_region.bound_region
- && let DefiningTy::Closure(_, substs) = self.regioncx.universal_regions().defining_ty
+ && let DefiningTy::Closure(_, args) = self.regioncx.universal_regions().defining_ty
{
- return substs.as_closure().kind() == ty::ClosureKind::FnMut;
+ return args.as_closure().kind() == ty::ClosureKind::FnMut;
}
false
@@ -224,12 +224,10 @@ impl<'a, 'tcx> MirBorrowckCtxt<'a, 'tcx> {
let mut hrtb_bounds = vec![];
gat_id_and_generics.iter().flatten().for_each(|(gat_hir_id, generics)| {
for pred in generics.predicates {
- let BoundPredicate(
- WhereBoundPredicate {
- bound_generic_params,
- bounds,
- ..
- }) = pred else { continue; };
+ let BoundPredicate(WhereBoundPredicate { bound_generic_params, bounds, .. }) = pred
+ else {
+ continue;
+ };
if bound_generic_params
.iter()
.rfind(|bgp| hir.local_def_id_to_hir_id(bgp.def_id) == *gat_hir_id)
@@ -504,12 +502,12 @@ impl<'a, 'tcx> MirBorrowckCtxt<'a, 'tcx> {
.to_string(),
)
}
- ty::Adt(adt, substs) => {
- let generic_arg = substs[param_index as usize];
- let identity_substs =
- InternalSubsts::identity_for_item(self.infcx.tcx, adt.did());
- let base_ty = Ty::new_adt(self.infcx.tcx, *adt, identity_substs);
- let base_generic_arg = identity_substs[param_index as usize];
+ ty::Adt(adt, args) => {
+ let generic_arg = args[param_index as usize];
+ let identity_args =
+ GenericArgs::identity_for_item(self.infcx.tcx, adt.did());
+ let base_ty = Ty::new_adt(self.infcx.tcx, *adt, identity_args);
+ let base_generic_arg = identity_args[param_index as usize];
let adt_desc = adt.descr();
let desc = format!(
@@ -522,12 +520,11 @@ impl<'a, 'tcx> MirBorrowckCtxt<'a, 'tcx> {
}
ty::FnDef(def_id, _) => {
let name = self.infcx.tcx.item_name(*def_id);
- let identity_substs =
- InternalSubsts::identity_for_item(self.infcx.tcx, *def_id);
+ let identity_args = GenericArgs::identity_for_item(self.infcx.tcx, *def_id);
let desc = format!("a function pointer to `{name}`");
let note = format!(
"the function `{name}` is invariant over the parameter `{}`",
- identity_substs[param_index as usize]
+ identity_args[param_index as usize]
);
(desc, note)
}
@@ -575,7 +572,7 @@ impl<'a, 'tcx> MirBorrowckCtxt<'a, 'tcx> {
let mut output_ty = self.regioncx.universal_regions().unnormalized_output_ty;
if let ty::Alias(ty::Opaque, ty::AliasTy { def_id, .. }) = *output_ty.kind() {
- output_ty = self.infcx.tcx.type_of(def_id).subst_identity()
+ output_ty = self.infcx.tcx.type_of(def_id).instantiate_identity()
};
debug!("report_fnmut_error: output_ty={:?}", output_ty);
@@ -813,7 +810,9 @@ impl<'a, 'tcx> MirBorrowckCtxt<'a, 'tcx> {
return;
}
let suitable_region = self.infcx.tcx.is_suitable_region(f);
- let Some(suitable_region) = suitable_region else { return; };
+ let Some(suitable_region) = suitable_region else {
+ return;
+ };
let fn_returns = self.infcx.tcx.return_type_impl_or_dyn_traits(suitable_region.def_id);
@@ -848,7 +847,10 @@ impl<'a, 'tcx> MirBorrowckCtxt<'a, 'tcx> {
let Some((alias_tys, alias_span, lt_addition_span)) = self
.infcx
.tcx
- .return_type_impl_or_dyn_traits_with_type_alias(suitable_region.def_id) else { return; };
+ .return_type_impl_or_dyn_traits_with_type_alias(suitable_region.def_id)
+ else {
+ return;
+ };
// in case the return type of the method is a type alias
let mut spans_suggs: Vec<_> = Vec::new();
@@ -896,14 +898,14 @@ impl<'a, 'tcx> MirBorrowckCtxt<'a, 'tcx> {
let tcx = self.infcx.tcx;
let instance = if let ConstraintCategory::CallArgument(Some(func_ty)) = category {
- let (fn_did, substs) = match func_ty.kind() {
- ty::FnDef(fn_did, substs) => (fn_did, substs),
+ let (fn_did, args) = match func_ty.kind() {
+ ty::FnDef(fn_did, args) => (fn_did, args),
_ => return,
};
- debug!(?fn_did, ?substs);
+ debug!(?fn_did, ?args);
// Only suggest this on function calls, not closures
- let ty = tcx.type_of(fn_did).subst_identity();
+ let ty = tcx.type_of(fn_did).instantiate_identity();
debug!("ty: {:?}, ty.kind: {:?}", ty, ty.kind());
if let ty::Closure(_, _) = ty.kind() {
return;
@@ -913,7 +915,7 @@ impl<'a, 'tcx> MirBorrowckCtxt<'a, 'tcx> {
tcx,
self.param_env,
*fn_did,
- self.infcx.resolve_vars_if_possible(substs),
+ self.infcx.resolve_vars_if_possible(args),
) {
instance
} else {
@@ -932,8 +934,13 @@ impl<'a, 'tcx> MirBorrowckCtxt<'a, 'tcx> {
let mut visitor = TraitObjectVisitor(FxIndexSet::default());
visitor.visit_ty(param.param_ty);
- let Some((ident, self_ty)) =
- NiceRegionError::get_impl_ident_and_self_ty_from_trait(tcx, instance.def_id(), &visitor.0) else { return; };
+ let Some((ident, self_ty)) = NiceRegionError::get_impl_ident_and_self_ty_from_trait(
+ tcx,
+ instance.def_id(),
+ &visitor.0,
+ ) else {
+ return;
+ };
self.suggest_constrain_dyn_trait_in_impl(diag, &visitor.0, ident, self_ty);
}
@@ -981,23 +988,25 @@ impl<'a, 'tcx> MirBorrowckCtxt<'a, 'tcx> {
sup: RegionVid,
) {
let (Some(sub), Some(sup)) = (self.to_error_region(sub), self.to_error_region(sup)) else {
- return
+ return;
};
let Some((ty_sub, _)) = self
.infcx
.tcx
.is_suitable_region(sub)
- .and_then(|anon_reg| find_anon_type(self.infcx.tcx, sub, &anon_reg.boundregion)) else {
- return
+ .and_then(|anon_reg| find_anon_type(self.infcx.tcx, sub, &anon_reg.boundregion))
+ else {
+ return;
};
let Some((ty_sup, _)) = self
.infcx
.tcx
.is_suitable_region(sup)
- .and_then(|anon_reg| find_anon_type(self.infcx.tcx, sup, &anon_reg.boundregion)) else {
- return
+ .and_then(|anon_reg| find_anon_type(self.infcx.tcx, sup, &anon_reg.boundregion))
+ else {
+ return;
};
suggest_adding_lifetime_params(self.infcx.tcx, sub, ty_sup, ty_sub, diag);
diff --git a/compiler/rustc_borrowck/src/diagnostics/region_name.rs b/compiler/rustc_borrowck/src/diagnostics/region_name.rs
index 074f37bed..337af89b2 100644
--- a/compiler/rustc_borrowck/src/diagnostics/region_name.rs
+++ b/compiler/rustc_borrowck/src/diagnostics/region_name.rs
@@ -5,8 +5,8 @@ use rustc_errors::Diagnostic;
use rustc_hir as hir;
use rustc_hir::def::{DefKind, Res};
use rustc_middle::ty::print::RegionHighlightMode;
-use rustc_middle::ty::subst::{GenericArgKind, SubstsRef};
use rustc_middle::ty::{self, RegionVid, Ty};
+use rustc_middle::ty::{GenericArgKind, GenericArgsRef};
use rustc_span::symbol::{kw, sym, Ident, Symbol};
use rustc_span::{Span, DUMMY_SP};
@@ -321,18 +321,18 @@ impl<'tcx> MirBorrowckCtxt<'_, 'tcx> {
ty::BoundRegionKind::BrEnv => {
let def_ty = self.regioncx.universal_regions().defining_ty;
- let DefiningTy::Closure(_, substs) = def_ty else {
+ let DefiningTy::Closure(_, args) = def_ty else {
// Can't have BrEnv in functions, constants or generators.
bug!("BrEnv outside of closure.");
};
- let hir::ExprKind::Closure(&hir::Closure { fn_decl_span, .. })
- = tcx.hir().expect_expr(self.mir_hir_id()).kind
+ let hir::ExprKind::Closure(&hir::Closure { fn_decl_span, .. }) =
+ tcx.hir().expect_expr(self.mir_hir_id()).kind
else {
bug!("Closure is not defined by a closure expr");
};
let region_name = self.synthesize_region_name();
- let closure_kind_ty = substs.as_closure().kind_ty();
+ let closure_kind_ty = args.as_closure().kind_ty();
let note = match closure_kind_ty.to_opt_closure_kind() {
Some(ty::ClosureKind::Fn) => {
"closure implements `Fn`, so references to captured variables \
@@ -510,20 +510,17 @@ impl<'tcx> MirBorrowckCtxt<'_, 'tcx> {
}
// Match up something like `Foo<'1>`
- (
- ty::Adt(_adt_def, substs),
- hir::TyKind::Path(hir::QPath::Resolved(None, path)),
- ) => {
+ (ty::Adt(_adt_def, args), hir::TyKind::Path(hir::QPath::Resolved(None, path))) => {
match path.res {
// Type parameters of the type alias have no reason to
// be the same as those of the ADT.
// FIXME: We should be able to do something similar to
// match_adt_and_segment in this case.
- Res::Def(DefKind::TyAlias, _) => (),
+ Res::Def(DefKind::TyAlias { .. }, _) => (),
_ => {
if let Some(last_segment) = path.segments.last() {
if let Some(highlight) = self.match_adt_and_segment(
- substs,
+ args,
needle_fr,
last_segment,
search_stack,
@@ -560,22 +557,22 @@ impl<'tcx> MirBorrowckCtxt<'_, 'tcx> {
None
}
- /// We've found an enum/struct/union type with the substitutions
- /// `substs` and -- in the HIR -- a path type with the final
+ /// We've found an enum/struct/union type with the generic args
+ /// `args` and -- in the HIR -- a path type with the final
/// segment `last_segment`. Try to find a `'_` to highlight in
/// the generic args (or, if not, to produce new zipped pairs of
/// types+hir to search through).
fn match_adt_and_segment<'hir>(
&self,
- substs: SubstsRef<'tcx>,
+ args: GenericArgsRef<'tcx>,
needle_fr: RegionVid,
last_segment: &'hir hir::PathSegment<'hir>,
search_stack: &mut Vec<(Ty<'tcx>, &'hir hir::Ty<'hir>)>,
) -> Option<RegionNameHighlight> {
// Did the user give explicit arguments? (e.g., `Foo<..>`)
- let args = last_segment.args.as_ref()?;
+ let explicit_args = last_segment.args.as_ref()?;
let lifetime =
- self.try_match_adt_and_generic_args(substs, needle_fr, args, search_stack)?;
+ self.try_match_adt_and_generic_args(args, needle_fr, explicit_args, search_stack)?;
if lifetime.is_anonymous() {
None
} else {
@@ -583,19 +580,19 @@ impl<'tcx> MirBorrowckCtxt<'_, 'tcx> {
}
}
- /// We've found an enum/struct/union type with the substitutions
- /// `substs` and -- in the HIR -- a path with the generic
- /// arguments `args`. If `needle_fr` appears in the args, return
+ /// We've found an enum/struct/union type with the generic args
+ /// `args` and -- in the HIR -- a path with the generic
+ /// arguments `hir_args`. If `needle_fr` appears in the args, return
/// the `hir::Lifetime` that corresponds to it. If not, push onto
/// `search_stack` the types+hir to search through.
fn try_match_adt_and_generic_args<'hir>(
&self,
- substs: SubstsRef<'tcx>,
+ args: GenericArgsRef<'tcx>,
needle_fr: RegionVid,
- args: &'hir hir::GenericArgs<'hir>,
+ hir_args: &'hir hir::GenericArgs<'hir>,
search_stack: &mut Vec<(Ty<'tcx>, &'hir hir::Ty<'hir>)>,
) -> Option<&'hir hir::Lifetime> {
- for (kind, hir_arg) in iter::zip(substs, args.args) {
+ for (kind, hir_arg) in iter::zip(args, hir_args.args) {
match (kind.unpack(), hir_arg) {
(GenericArgKind::Lifetime(r), hir::GenericArg::Lifetime(lt)) => {
if r.as_var() == needle_fr {
@@ -849,9 +846,10 @@ impl<'tcx> MirBorrowckCtxt<'_, 'tcx> {
return None;
};
- let found = tcx.any_free_region_meets(&tcx.type_of(region_parent).subst_identity(), |r| {
- *r == ty::ReEarlyBound(region)
- });
+ let found = tcx
+ .any_free_region_meets(&tcx.type_of(region_parent).instantiate_identity(), |r| {
+ *r == ty::ReEarlyBound(region)
+ });
Some(RegionName {
name: self.synthesize_region_name(),
@@ -888,6 +886,7 @@ impl<'tcx> MirBorrowckCtxt<'_, 'tcx> {
.universal_regions()
.defining_ty
.upvar_tys()
+ .iter()
.position(|ty| self.any_param_predicate_mentions(&predicates, ty, region))
{
let (upvar_name, upvar_span) = self.regioncx.get_upvar_name_and_span_for_region(
diff --git a/compiler/rustc_borrowck/src/diagnostics/var_name.rs b/compiler/rustc_borrowck/src/diagnostics/var_name.rs
index 98418e237..8832d345d 100644
--- a/compiler/rustc_borrowck/src/diagnostics/var_name.rs
+++ b/compiler/rustc_borrowck/src/diagnostics/var_name.rs
@@ -43,7 +43,7 @@ impl<'tcx> RegionInferenceContext<'tcx> {
fr: RegionVid,
) -> Option<usize> {
let upvar_index =
- self.universal_regions().defining_ty.upvar_tys().position(|upvar_ty| {
+ self.universal_regions().defining_ty.upvar_tys().iter().position(|upvar_ty| {
debug!("get_upvar_index_for_region: upvar_ty={upvar_ty:?}");
tcx.any_free_region_meets(&upvar_ty, |r| {
let r = r.as_var();
@@ -52,7 +52,7 @@ impl<'tcx> RegionInferenceContext<'tcx> {
})
})?;
- let upvar_ty = self.universal_regions().defining_ty.upvar_tys().nth(upvar_index);
+ let upvar_ty = self.universal_regions().defining_ty.upvar_tys().get(upvar_index);
debug!(
"get_upvar_index_for_region: found {fr:?} in upvar {upvar_index} which has type {upvar_ty:?}",
diff --git a/compiler/rustc_borrowck/src/facts.rs b/compiler/rustc_borrowck/src/facts.rs
index 87fad9a35..9916ebca3 100644
--- a/compiler/rustc_borrowck/src/facts.rs
+++ b/compiler/rustc_borrowck/src/facts.rs
@@ -202,7 +202,7 @@ trait FactCell {
impl<A: Debug> FactCell for A {
default fn to_string(&self, _location_table: &LocationTable) -> String {
- format!("{:?}", self)
+ format!("{self:?}")
}
}
diff --git a/compiler/rustc_borrowck/src/invalidation.rs b/compiler/rustc_borrowck/src/invalidation.rs
index 0152d89eb..df5e383ad 100644
--- a/compiler/rustc_borrowck/src/invalidation.rs
+++ b/compiler/rustc_borrowck/src/invalidation.rs
@@ -353,7 +353,6 @@ impl<'cx, 'tcx> InvalidationGenerator<'cx, 'tcx> {
let tcx = self.tcx;
let body = self.body;
let borrow_set = self.borrow_set;
- let indices = self.borrow_set.indices();
each_borrow_involving_path(
self,
tcx,
@@ -361,7 +360,7 @@ impl<'cx, 'tcx> InvalidationGenerator<'cx, 'tcx> {
location,
(sd, place),
borrow_set,
- indices,
+ |_| true,
|this, borrow_index, borrow| {
match (rw, borrow.kind) {
// Obviously an activation is compatible with its own
diff --git a/compiler/rustc_borrowck/src/lib.rs b/compiler/rustc_borrowck/src/lib.rs
index 97d15cb53..efe525c22 100644
--- a/compiler/rustc_borrowck/src/lib.rs
+++ b/compiler/rustc_borrowck/src/lib.rs
@@ -11,6 +11,7 @@
#![feature(trusted_step)]
#![feature(try_blocks)]
#![recursion_limit = "256"]
+#![cfg_attr(not(bootstrap), allow(internal_features))]
#[macro_use]
extern crate rustc_middle;
@@ -23,7 +24,7 @@ use rustc_errors::{Diagnostic, DiagnosticBuilder, DiagnosticMessage, Subdiagnost
use rustc_fluent_macro::fluent_messages;
use rustc_hir as hir;
use rustc_hir::def_id::LocalDefId;
-use rustc_index::bit_set::ChunkedBitSet;
+use rustc_index::bit_set::{BitSet, ChunkedBitSet};
use rustc_index::{IndexSlice, IndexVec};
use rustc_infer::infer::{
InferCtxt, NllRegionVariableOrigin, RegionVariableOrigin, TyCtxtInferExt,
@@ -42,7 +43,6 @@ use rustc_session::lint::builtin::UNUSED_MUT;
use rustc_span::{Span, Symbol};
use rustc_target::abi::FieldIdx;
-use either::Either;
use smallvec::SmallVec;
use std::cell::RefCell;
use std::collections::BTreeMap;
@@ -301,7 +301,7 @@ fn do_mir_borrowck<'tcx>(
let movable_generator =
// The first argument is the generator type passed by value
if let Some(local) = body.local_decls.raw.get(1)
- // Get the interior types and substs which typeck computed
+ // Get the interior types and args which typeck computed
&& let ty::Generator(_, _, hir::Movability::Static) = local.ty.kind()
{
false
@@ -1035,12 +1035,16 @@ impl<'cx, 'tcx> MirBorrowckCtxt<'cx, 'tcx> {
let borrow_set = self.borrow_set.clone();
// Use polonius output if it has been enabled.
- let polonius_output = self.polonius_output.clone();
- let borrows_in_scope = if let Some(polonius) = &polonius_output {
+ let mut polonius_output;
+ let borrows_in_scope = if let Some(polonius) = &self.polonius_output {
let location = self.location_table.start_index(location);
- Either::Left(polonius.errors_at(location).iter().copied())
+ polonius_output = BitSet::new_empty(borrow_set.len());
+ for &idx in polonius.errors_at(location) {
+ polonius_output.insert(idx);
+ }
+ &polonius_output
} else {
- Either::Right(flow_state.borrows.iter())
+ &flow_state.borrows
};
each_borrow_involving_path(
@@ -1050,7 +1054,7 @@ impl<'cx, 'tcx> MirBorrowckCtxt<'cx, 'tcx> {
location,
(sd, place_span.0),
&borrow_set,
- borrows_in_scope,
+ |borrow_index| borrows_in_scope.contains(borrow_index),
|this, borrow_index, borrow| match (rw, borrow.kind) {
// Obviously an activation is compatible with its own
// reservation (or even prior activating uses of same
@@ -1817,8 +1821,7 @@ impl<'cx, 'tcx> MirBorrowckCtxt<'cx, 'tcx> {
}
ProjectionElem::Subslice { .. } => {
- panic!("we don't allow assignments to subslices, location: {:?}",
- location);
+ panic!("we don't allow assignments to subslices, location: {location:?}");
}
ProjectionElem::Field(..) => {
@@ -2017,8 +2020,7 @@ impl<'cx, 'tcx> MirBorrowckCtxt<'cx, 'tcx> {
self.infcx.tcx.sess.delay_span_bug(
span,
format!(
- "Accessing `{:?}` with the kind `{:?}` shouldn't be possible",
- place, kind,
+ "Accessing `{place:?}` with the kind `{kind:?}` shouldn't be possible",
),
);
}
@@ -2306,11 +2308,10 @@ mod error {
pub fn buffer_error(&mut self, t: DiagnosticBuilder<'_, ErrorGuaranteed>) {
if let None = self.tainted_by_errors {
- self.tainted_by_errors = Some(
- self.tcx
- .sess
- .delay_span_bug(t.span.clone(), "diagnostic buffered but not emitted"),
- )
+ self.tainted_by_errors = Some(self.tcx.sess.delay_span_bug(
+ t.span.clone_ignoring_labels(),
+ "diagnostic buffered but not emitted",
+ ))
}
t.buffer(&mut self.buffered);
}
diff --git a/compiler/rustc_borrowck/src/nll.rs b/compiler/rustc_borrowck/src/nll.rs
index b5014a3f4..679a19710 100644
--- a/compiler/rustc_borrowck/src/nll.rs
+++ b/compiler/rustc_borrowck/src/nll.rs
@@ -347,7 +347,7 @@ pub(super) fn dump_mir_results<'tcx>(
for_each_region_constraint(
infcx.tcx,
closure_region_requirements,
- &mut |msg| writeln!(out, "| {}", msg),
+ &mut |msg| writeln!(out, "| {msg}"),
)?;
writeln!(out, "|")?;
}
@@ -426,7 +426,7 @@ pub(super) fn dump_annotation<'tcx>(
};
if !opaque_type_values.is_empty() {
- err.note(format!("Inferred opaque type values:\n{:#?}", opaque_type_values));
+ err.note(format!("Inferred opaque type values:\n{opaque_type_values:#?}"));
}
errors.buffer_non_error_diag(err);
@@ -439,7 +439,7 @@ fn for_each_region_constraint<'tcx>(
) -> io::Result<()> {
for req in &closure_region_requirements.outlives_requirements {
let subject = match req.subject {
- ClosureOutlivesSubject::Region(subject) => format!("{:?}", subject),
+ ClosureOutlivesSubject::Region(subject) => format!("{subject:?}"),
ClosureOutlivesSubject::Ty(ty) => {
format!("{:?}", ty.instantiate(tcx, |vid| ty::Region::new_var(tcx, vid)))
}
diff --git a/compiler/rustc_borrowck/src/path_utils.rs b/compiler/rustc_borrowck/src/path_utils.rs
index ea9f8683c..ed93a5609 100644
--- a/compiler/rustc_borrowck/src/path_utils.rs
+++ b/compiler/rustc_borrowck/src/path_utils.rs
@@ -33,20 +33,24 @@ pub(super) fn each_borrow_involving_path<'tcx, F, I, S>(
_location: Location,
access_place: (AccessDepth, Place<'tcx>),
borrow_set: &BorrowSet<'tcx>,
- candidates: I,
+ is_candidate: I,
mut op: F,
) where
F: FnMut(&mut S, BorrowIndex, &BorrowData<'tcx>) -> Control,
- I: Iterator<Item = BorrowIndex>,
+ I: Fn(BorrowIndex) -> bool,
{
let (access, place) = access_place;
- // FIXME: analogous code in check_loans first maps `place` to
- // its base_path.
+ // The number of candidates can be large, but borrows for different locals cannot conflict with
+ // each other, so we restrict the working set a priori.
+ let Some(borrows_for_place_base) = borrow_set.local_map.get(&place.local) else { return };
// check for loan restricting path P being used. Accounts for
// borrows of P, P.a.b, etc.
- for i in candidates {
+ for &i in borrows_for_place_base {
+ if !is_candidate(i) {
+ continue;
+ }
let borrowed = &borrow_set[i];
if places_conflict::borrow_conflicts_with_place(
diff --git a/compiler/rustc_borrowck/src/places_conflict.rs b/compiler/rustc_borrowck/src/places_conflict.rs
index 1217dcb9c..c02f6f3b6 100644
--- a/compiler/rustc_borrowck/src/places_conflict.rs
+++ b/compiler/rustc_borrowck/src/places_conflict.rs
@@ -1,3 +1,55 @@
+//! The borrowck rules for proving disjointness are applied from the "root" of the
+//! borrow forwards, iterating over "similar" projections in lockstep until
+//! we can prove overlap one way or another. Essentially, we treat `Overlap` as
+//! a monoid and report a conflict if the product ends up not being `Disjoint`.
+//!
+//! At each step, if we didn't run out of borrow or place, we know that our elements
+//! have the same type, and that they only overlap if they are the identical.
+//!
+//! For example, if we are comparing these:
+//! ```text
+//! BORROW: (*x1[2].y).z.a
+//! ACCESS: (*x1[i].y).w.b
+//! ```
+//!
+//! Then our steps are:
+//! ```text
+//! x1 | x1 -- places are the same
+//! x1[2] | x1[i] -- equal or disjoint (disjoint if indexes differ)
+//! x1[2].y | x1[i].y -- equal or disjoint
+//! *x1[2].y | *x1[i].y -- equal or disjoint
+//! (*x1[2].y).z | (*x1[i].y).w -- we are disjoint and don't need to check more!
+//! ```
+//!
+//! Because `zip` does potentially bad things to the iterator inside, this loop
+//! also handles the case where the access might be a *prefix* of the borrow, e.g.
+//!
+//! ```text
+//! BORROW: (*x1[2].y).z.a
+//! ACCESS: x1[i].y
+//! ```
+//!
+//! Then our steps are:
+//! ```text
+//! x1 | x1 -- places are the same
+//! x1[2] | x1[i] -- equal or disjoint (disjoint if indexes differ)
+//! x1[2].y | x1[i].y -- equal or disjoint
+//! ```
+//!
+//! -- here we run out of access - the borrow can access a part of it. If this
+//! is a full deep access, then we *know* the borrow conflicts with it. However,
+//! if the access is shallow, then we can proceed:
+//!
+//! ```text
+//! x1[2].y | (*x1[i].y) -- a deref! the access can't get past this, so we
+//! are disjoint
+//! ```
+//!
+//! Our invariant is, that at each step of the iteration:
+//! - If we didn't run out of access to match, our borrow and access are comparable
+//! and either equal or disjoint.
+//! - If we did run out of access, the borrow can access a part of it.
+
#![deny(rustc::untranslatable_diagnostic)]
#![deny(rustc::diagnostic_outside_of_impl)]
use crate::ArtificialField;
@@ -5,7 +57,7 @@ use crate::Overlap;
use crate::{AccessDepth, Deep, Shallow};
use rustc_hir as hir;
use rustc_middle::mir::{
- Body, BorrowKind, Local, MutBorrowKind, Place, PlaceElem, PlaceRef, ProjectionElem,
+ Body, BorrowKind, MutBorrowKind, Place, PlaceElem, PlaceRef, ProjectionElem,
};
use rustc_middle::ty::{self, TyCtxt};
use std::cmp::max;
@@ -48,7 +100,7 @@ pub fn places_conflict<'tcx>(
/// access depth. The `bias` parameter is used to determine how the unknowable (comparing runtime
/// array indices, for example) should be interpreted - this depends on what the caller wants in
/// order to make the conservative choice and preserve soundness.
-#[instrument(level = "debug", skip(tcx, body))]
+#[inline]
pub(super) fn borrow_conflicts_with_place<'tcx>(
tcx: TyCtxt<'tcx>,
body: &Body<'tcx>,
@@ -58,15 +110,24 @@ pub(super) fn borrow_conflicts_with_place<'tcx>(
access: AccessDepth,
bias: PlaceConflictBias,
) -> bool {
+ let borrow_local = borrow_place.local;
+ let access_local = access_place.local;
+
+ if borrow_local != access_local {
+ // We have proven the borrow disjoint - further projections will remain disjoint.
+ return false;
+ }
+
// This Local/Local case is handled by the more general code below, but
// it's so common that it's a speed win to check for it first.
- if let Some(l1) = borrow_place.as_local() && let Some(l2) = access_place.as_local() {
- return l1 == l2;
+ if borrow_place.projection.is_empty() && access_place.projection.is_empty() {
+ return true;
}
place_components_conflict(tcx, body, borrow_place, borrow_kind, access_place, access, bias)
}
+#[instrument(level = "debug", skip(tcx, body))]
fn place_components_conflict<'tcx>(
tcx: TyCtxt<'tcx>,
body: &Body<'tcx>,
@@ -76,65 +137,10 @@ fn place_components_conflict<'tcx>(
access: AccessDepth,
bias: PlaceConflictBias,
) -> bool {
- // The borrowck rules for proving disjointness are applied from the "root" of the
- // borrow forwards, iterating over "similar" projections in lockstep until
- // we can prove overlap one way or another. Essentially, we treat `Overlap` as
- // a monoid and report a conflict if the product ends up not being `Disjoint`.
- //
- // At each step, if we didn't run out of borrow or place, we know that our elements
- // have the same type, and that they only overlap if they are the identical.
- //
- // For example, if we are comparing these:
- // BORROW: (*x1[2].y).z.a
- // ACCESS: (*x1[i].y).w.b
- //
- // Then our steps are:
- // x1 | x1 -- places are the same
- // x1[2] | x1[i] -- equal or disjoint (disjoint if indexes differ)
- // x1[2].y | x1[i].y -- equal or disjoint
- // *x1[2].y | *x1[i].y -- equal or disjoint
- // (*x1[2].y).z | (*x1[i].y).w -- we are disjoint and don't need to check more!
- //
- // Because `zip` does potentially bad things to the iterator inside, this loop
- // also handles the case where the access might be a *prefix* of the borrow, e.g.
- //
- // BORROW: (*x1[2].y).z.a
- // ACCESS: x1[i].y
- //
- // Then our steps are:
- // x1 | x1 -- places are the same
- // x1[2] | x1[i] -- equal or disjoint (disjoint if indexes differ)
- // x1[2].y | x1[i].y -- equal or disjoint
- //
- // -- here we run out of access - the borrow can access a part of it. If this
- // is a full deep access, then we *know* the borrow conflicts with it. However,
- // if the access is shallow, then we can proceed:
- //
- // x1[2].y | (*x1[i].y) -- a deref! the access can't get past this, so we
- // are disjoint
- //
- // Our invariant is, that at each step of the iteration:
- // - If we didn't run out of access to match, our borrow and access are comparable
- // and either equal or disjoint.
- // - If we did run out of access, the borrow can access a part of it.
-
let borrow_local = borrow_place.local;
let access_local = access_place.local;
-
- match place_base_conflict(borrow_local, access_local) {
- Overlap::Arbitrary => {
- bug!("Two base can't return Arbitrary");
- }
- Overlap::EqualOrDisjoint => {
- // This is the recursive case - proceed to the next element.
- }
- Overlap::Disjoint => {
- // We have proven the borrow disjoint - further
- // projections will remain disjoint.
- debug!("borrow_conflicts_with_place: disjoint");
- return false;
- }
- }
+ // borrow_conflicts_with_place should have checked that.
+ assert_eq!(borrow_local, access_local);
// loop invariant: borrow_c is always either equal to access_c or disjoint from it.
for ((borrow_place, borrow_c), &access_c) in
@@ -280,21 +286,6 @@ fn place_components_conflict<'tcx>(
// Given that the bases of `elem1` and `elem2` are always either equal
// or disjoint (and have the same type!), return the overlap situation
// between `elem1` and `elem2`.
-fn place_base_conflict(l1: Local, l2: Local) -> Overlap {
- if l1 == l2 {
- // the same local - base case, equal
- debug!("place_element_conflict: DISJOINT-OR-EQ-LOCAL");
- Overlap::EqualOrDisjoint
- } else {
- // different locals - base case, disjoint
- debug!("place_element_conflict: DISJOINT-LOCAL");
- Overlap::Disjoint
- }
-}
-
-// Given that the bases of `elem1` and `elem2` are always either equal
-// or disjoint (and have the same type!), return the overlap situation
-// between `elem1` and `elem2`.
fn place_projection_conflict<'tcx>(
tcx: TyCtxt<'tcx>,
body: &Body<'tcx>,
diff --git a/compiler/rustc_borrowck/src/region_infer/dump_mir.rs b/compiler/rustc_borrowck/src/region_infer/dump_mir.rs
index 6524b594e..4d620ac9d 100644
--- a/compiler/rustc_borrowck/src/region_infer/dump_mir.rs
+++ b/compiler/rustc_borrowck/src/region_infer/dump_mir.rs
@@ -52,7 +52,7 @@ impl<'tcx> RegionInferenceContext<'tcx> {
writeln!(out, "|")?;
writeln!(out, "| Inference Constraints")?;
- self.for_each_constraint(tcx, &mut |msg| writeln!(out, "| {}", msg))?;
+ self.for_each_constraint(tcx, &mut |msg| writeln!(out, "| {msg}"))?;
Ok(())
}
@@ -69,7 +69,7 @@ impl<'tcx> RegionInferenceContext<'tcx> {
for region in self.definitions.indices() {
let value = self.liveness_constraints.region_value_str(region);
if value != "{}" {
- with_msg(&format!("{:?} live at {}", region, value))?;
+ with_msg(&format!("{region:?} live at {value}"))?;
}
}
@@ -81,12 +81,9 @@ impl<'tcx> RegionInferenceContext<'tcx> {
Locations::All(span) => {
("All", tcx.sess.source_map().span_to_embeddable_string(*span))
}
- Locations::Single(loc) => ("Single", format!("{:?}", loc)),
+ Locations::Single(loc) => ("Single", format!("{loc:?}")),
};
- with_msg(&format!(
- "{:?}: {:?} due to {:?} at {}({}) ({:?}",
- sup, sub, category, name, arg, span
- ))?;
+ with_msg(&format!("{sup:?}: {sub:?} due to {category:?} at {name}({arg}) ({span:?}"))?;
}
Ok(())
diff --git a/compiler/rustc_borrowck/src/region_infer/graphviz.rs b/compiler/rustc_borrowck/src/region_infer/graphviz.rs
index 2e15586e0..a0cf22e93 100644
--- a/compiler/rustc_borrowck/src/region_infer/graphviz.rs
+++ b/compiler/rustc_borrowck/src/region_infer/graphviz.rs
@@ -49,7 +49,7 @@ impl<'a, 'this, 'tcx> dot::Labeller<'this> for RawConstraints<'a, 'tcx> {
Some(dot::LabelText::LabelStr(Cow::Borrowed("box")))
}
fn node_label(&'this self, n: &RegionVid) -> dot::LabelText<'this> {
- dot::LabelText::LabelStr(format!("{:?}", n).into())
+ dot::LabelText::LabelStr(format!("{n:?}").into())
}
fn edge_label(&'this self, e: &OutlivesConstraint<'tcx>) -> dot::LabelText<'this> {
dot::LabelText::LabelStr(format!("{:?}", e.locations).into())
@@ -100,7 +100,7 @@ impl<'a, 'this, 'tcx> dot::Labeller<'this> for SccConstraints<'a, 'tcx> {
}
fn node_label(&'this self, n: &ConstraintSccIndex) -> dot::LabelText<'this> {
let nodes = &self.nodes_per_scc[*n];
- dot::LabelText::LabelStr(format!("{:?} = {:?}", n, nodes).into())
+ dot::LabelText::LabelStr(format!("{n:?} = {nodes:?}").into())
}
}
diff --git a/compiler/rustc_borrowck/src/region_infer/mod.rs b/compiler/rustc_borrowck/src/region_infer/mod.rs
index e45d3a2c8..b8cd94e54 100644
--- a/compiler/rustc_borrowck/src/region_infer/mod.rs
+++ b/compiler/rustc_borrowck/src/region_infer/mod.rs
@@ -259,7 +259,7 @@ fn sccs_info<'cx, 'tcx>(
let mut reg_vars_to_origins_str = "region variables to origins:\n".to_string();
for (reg_var, origin) in var_to_origin_sorted.into_iter() {
- reg_vars_to_origins_str.push_str(&format!("{:?}: {:?}\n", reg_var, origin));
+ reg_vars_to_origins_str.push_str(&format!("{reg_var:?}: {origin:?}\n"));
}
debug!("{}", reg_vars_to_origins_str);
@@ -784,13 +784,20 @@ impl<'tcx> RegionInferenceContext<'tcx> {
/// is considered a *lower bound*. If possible, we will modify
/// the constraint to set it equal to one of the option regions.
/// If we make any changes, returns true, else false.
+ ///
+ /// This function only adds the member constraints to the region graph,
+ /// it does not check them. They are later checked in
+ /// `check_member_constraints` after the region graph has been computed.
#[instrument(skip(self, member_constraint_index), level = "debug")]
fn apply_member_constraint(
&mut self,
scc: ConstraintSccIndex,
member_constraint_index: NllMemberConstraintIndex,
choice_regions: &[ty::RegionVid],
- ) -> bool {
+ ) {
+ // Lazily compute the reverse graph, we'll need it later.
+ self.compute_reverse_scc_graph();
+
// Create a mutable vector of the options. We'll try to winnow
// them down.
let mut choice_regions: Vec<ty::RegionVid> = choice_regions.to_vec();
@@ -805,10 +812,11 @@ impl<'tcx> RegionInferenceContext<'tcx> {
*c_r = self.scc_representatives[scc];
}
- // The 'member region' in a member constraint is part of the
- // hidden type, which must be in the root universe. Therefore,
- // it cannot have any placeholders in its value.
- assert!(self.scc_universes[scc] == ty::UniverseIndex::ROOT);
+ // If the member region lives in a higher universe, we currently choose
+ // the most conservative option by leaving it unchanged.
+ if self.scc_universes[scc] != ty::UniverseIndex::ROOT {
+ return;
+ }
debug_assert!(
self.scc_values.placeholders_contained_in(scc).next().is_none(),
"scc {:?} in a member constraint has placeholder value: {:?}",
@@ -832,7 +840,6 @@ impl<'tcx> RegionInferenceContext<'tcx> {
// free region that must outlive the member region `R0` (`UB:
// R0`). Therefore, we need only keep an option `O` if `UB: O`
// for all UB.
- self.compute_reverse_scc_graph();
let universal_region_relations = &self.universal_region_relations;
for ub in self.rev_scc_graph.as_ref().unwrap().upper_bounds(scc) {
debug!(?ub);
@@ -867,7 +874,7 @@ impl<'tcx> RegionInferenceContext<'tcx> {
}
}) else {
debug!("no unique minimum choice");
- return false;
+ return;
};
let min_choice_scc = self.constraint_sccs.scc(min_choice);
@@ -878,10 +885,6 @@ impl<'tcx> RegionInferenceContext<'tcx> {
min_choice,
member_constraint_index,
});
-
- true
- } else {
- false
}
}
@@ -1115,7 +1118,7 @@ impl<'tcx> RegionInferenceContext<'tcx> {
) -> Option<ClosureOutlivesSubject<'tcx>> {
let tcx = infcx.tcx;
- // Opaque types' substs may include useless lifetimes.
+ // Opaque types' args may include useless lifetimes.
// We will replace them with ReStatic.
struct OpaqueFolder<'tcx> {
tcx: TyCtxt<'tcx>,
@@ -1127,19 +1130,18 @@ impl<'tcx> RegionInferenceContext<'tcx> {
fn fold_ty(&mut self, t: Ty<'tcx>) -> Ty<'tcx> {
use ty::TypeSuperFoldable as _;
let tcx = self.tcx;
- let &ty::Alias(ty::Opaque, ty::AliasTy { substs, def_id, .. }) = t.kind() else {
+ let &ty::Alias(ty::Opaque, ty::AliasTy { args, def_id, .. }) = t.kind() else {
return t.super_fold_with(self);
};
- let substs =
- std::iter::zip(substs, tcx.variances_of(def_id)).map(|(arg, v)| {
- match (arg.unpack(), v) {
- (ty::GenericArgKind::Lifetime(_), ty::Bivariant) => {
- tcx.lifetimes.re_static.into()
- }
- _ => arg.fold_with(self),
+ let args = std::iter::zip(args, tcx.variances_of(def_id)).map(|(arg, v)| {
+ match (arg.unpack(), v) {
+ (ty::GenericArgKind::Lifetime(_), ty::Bivariant) => {
+ tcx.lifetimes.re_static.into()
}
- });
- Ty::new_opaque(tcx, def_id, tcx.mk_substs_from_iter(substs))
+ _ => arg.fold_with(self),
+ }
+ });
+ Ty::new_opaque(tcx, def_id, tcx.mk_args_from_iter(args))
}
}
@@ -2058,10 +2060,17 @@ impl<'tcx> RegionInferenceContext<'tcx> {
let mut extra_info = vec![];
for constraint in path.iter() {
let outlived = constraint.sub;
- let Some(origin) = self.var_infos.get(outlived) else { continue; };
- let RegionVariableOrigin::Nll(NllRegionVariableOrigin::Placeholder(p)) = origin.origin else { continue; };
+ let Some(origin) = self.var_infos.get(outlived) else {
+ continue;
+ };
+ let RegionVariableOrigin::Nll(NllRegionVariableOrigin::Placeholder(p)) = origin.origin
+ else {
+ continue;
+ };
debug!(?constraint, ?p);
- let ConstraintCategory::Predicate(span) = constraint.category else { continue; };
+ let ConstraintCategory::Predicate(span) = constraint.category else {
+ continue;
+ };
extra_info.push(ExtraConstraintInfo::PlaceholderFromPredicate(span));
// We only want to point to one
break;
diff --git a/compiler/rustc_borrowck/src/region_infer/opaque_types.rs b/compiler/rustc_borrowck/src/region_infer/opaque_types.rs
index 1a227f2d1..4da7b6025 100644
--- a/compiler/rustc_borrowck/src/region_infer/opaque_types.rs
+++ b/compiler/rustc_borrowck/src/region_infer/opaque_types.rs
@@ -6,9 +6,9 @@ use rustc_infer::infer::InferCtxt;
use rustc_infer::infer::TyCtxtInferExt as _;
use rustc_infer::traits::{Obligation, ObligationCause};
use rustc_middle::traits::DefiningAnchor;
-use rustc_middle::ty::subst::{GenericArgKind, InternalSubsts};
use rustc_middle::ty::visit::TypeVisitableExt;
use rustc_middle::ty::{self, OpaqueHiddenType, OpaqueTypeKey, Ty, TyCtxt, TypeFoldable};
+use rustc_middle::ty::{GenericArgKind, GenericArgs};
use rustc_span::Span;
use rustc_trait_selection::traits::error_reporting::TypeErrCtxtExt as _;
use rustc_trait_selection::traits::ObligationCtxt;
@@ -38,15 +38,15 @@ impl<'tcx> RegionInferenceContext<'tcx> {
/// back to concrete lifetimes: `'static`, `ReEarlyBound` or `ReFree`.
///
/// First we map all the lifetimes in the concrete type to an equal
- /// universal region that occurs in the concrete type's substs, in this case
- /// this would result in `&'1 i32`. We only consider regions in the substs
+ /// universal region that occurs in the concrete type's args, in this case
+ /// this would result in `&'1 i32`. We only consider regions in the args
/// in case there is an equal region that does not. For example, this should
/// be allowed:
/// `fn f<'a: 'b, 'b: 'a>(x: *mut &'b i32) -> impl Sized + 'a { x }`
///
/// Then we map the regions in both the type and the subst to their
/// `external_name` giving `concrete_type = &'a i32`,
- /// `substs = ['static, 'a]`. This will then allow
+ /// `args = ['static, 'a]`. This will then allow
/// `infer_opaque_definition_from_instantiation` to determine that
/// `_Return<'_a> = &'_a i32`.
///
@@ -73,8 +73,8 @@ impl<'tcx> RegionInferenceContext<'tcx> {
debug!(?member_constraints);
for (opaque_type_key, concrete_type) in opaque_ty_decls {
- let substs = opaque_type_key.substs;
- debug!(?concrete_type, ?substs);
+ let args = opaque_type_key.args;
+ debug!(?concrete_type, ?args);
let mut subst_regions = vec![self.universal_regions.fr_static];
@@ -95,7 +95,7 @@ impl<'tcx> RegionInferenceContext<'tcx> {
ty::Region::new_error_with_message(
infcx.tcx,
concrete_type.span,
- "opaque type with non-universal region substs",
+ "opaque type with non-universal region args",
)
}
}
@@ -110,17 +110,17 @@ impl<'tcx> RegionInferenceContext<'tcx> {
}
debug!(?subst_regions);
- // Next, insert universal regions from substs, so we can translate regions that appear
- // in them but are not subject to member constraints, for instance closure substs.
- let universal_substs = infcx.tcx.fold_regions(substs, |region, _| {
+ // Next, insert universal regions from args, so we can translate regions that appear
+ // in them but are not subject to member constraints, for instance closure args.
+ let universal_args = infcx.tcx.fold_regions(args, |region, _| {
if let ty::RePlaceholder(..) = region.kind() {
- // Higher kinded regions don't need remapping, they don't refer to anything outside of this the substs.
+ // Higher kinded regions don't need remapping, they don't refer to anything outside of this the args.
return region;
}
let vid = self.to_region_vid(region);
to_universal_region(vid, &mut subst_regions)
});
- debug!(?universal_substs);
+ debug!(?universal_args);
debug!(?subst_regions);
// Deduplicate the set of regions while keeping the chosen order.
@@ -139,7 +139,7 @@ impl<'tcx> RegionInferenceContext<'tcx> {
debug!(?universal_concrete_type);
let opaque_type_key =
- OpaqueTypeKey { def_id: opaque_type_key.def_id, substs: universal_substs };
+ OpaqueTypeKey { def_id: opaque_type_key.def_id, args: universal_args };
let ty = infcx.infer_opaque_definition_from_instantiation(
opaque_type_key,
universal_concrete_type,
@@ -175,7 +175,7 @@ impl<'tcx> RegionInferenceContext<'tcx> {
/// Map the regions in the type to named regions. This is similar to what
/// `infer_opaque_types` does, but can infer any universal region, not only
- /// ones from the substs for the opaque type. It also doesn't double check
+ /// ones from the args for the opaque type. It also doesn't double check
/// that the regions produced are in fact equal to the named region they are
/// replaced with. This is fine because this function is only to improve the
/// region names in error messages.
@@ -185,6 +185,21 @@ impl<'tcx> RegionInferenceContext<'tcx> {
{
tcx.fold_regions(ty, |region, _| match *region {
ty::ReVar(vid) => {
+ let scc = self.constraint_sccs.scc(vid);
+
+ // Special handling of higher-ranked regions.
+ if self.scc_universes[scc] != ty::UniverseIndex::ROOT {
+ match self.scc_values.placeholders_contained_in(scc).enumerate().last() {
+ // If the region contains a single placeholder then they're equal.
+ Some((0, placeholder)) => {
+ return ty::Region::new_placeholder(tcx, placeholder);
+ }
+
+ // Fallback: this will produce a cryptic error message.
+ _ => return region,
+ }
+ }
+
// Find something that we can name
let upper_bound = self.approx_universal_upper_bound(vid);
let upper_bound = &self.definitions[upper_bound];
@@ -238,7 +253,7 @@ impl<'tcx> InferCtxtExt<'tcx> for InferCtxt<'tcx> {
/// # Parameters
///
/// - `def_id`, the `impl Trait` type
- /// - `substs`, the substs used to instantiate this opaque type
+ /// - `args`, the args used to instantiate this opaque type
/// - `instantiated_ty`, the inferred type C1 -- fully resolved, lifted version of
/// `opaque_defn.concrete_ty`
#[instrument(level = "debug", skip(self))]
@@ -309,11 +324,11 @@ fn check_opaque_type_well_formed<'tcx>(
})
.build();
let ocx = ObligationCtxt::new(&infcx);
- let identity_substs = InternalSubsts::identity_for_item(tcx, def_id);
+ let identity_args = GenericArgs::identity_for_item(tcx, def_id);
// Require that the hidden type actually fulfills all the bounds of the opaque type, even without
// the bounds that the function supplies.
- let opaque_ty = Ty::new_opaque(tcx, def_id.to_def_id(), identity_substs);
+ let opaque_ty = Ty::new_opaque(tcx, def_id.to_def_id(), identity_args);
ocx.eq(&ObligationCause::misc(definition_span, def_id), param_env, opaque_ty, definition_ty)
.map_err(|err| {
infcx
@@ -339,8 +354,8 @@ fn check_opaque_type_well_formed<'tcx>(
// version.
let errors = ocx.select_all_or_error();
- // This is still required for many(half of the tests in ui/type-alias-impl-trait)
- // tests to pass
+ // This is fishy, but we check it again in `check_opaque_meets_bounds`.
+ // Remove once we can prepopulate with known hidden types.
let _ = infcx.take_opaque_types();
if errors.is_empty() {
@@ -356,40 +371,27 @@ fn check_opaque_type_parameter_valid(
span: Span,
) -> Result<(), ErrorGuaranteed> {
let opaque_ty_hir = tcx.hir().expect_item(opaque_type_key.def_id);
- match opaque_ty_hir.expect_opaque_ty().origin {
- // No need to check return position impl trait (RPIT)
- // because for type and const parameters they are correct
- // by construction: we convert
- //
- // fn foo<P0..Pn>() -> impl Trait
- //
- // into
- //
- // type Foo<P0...Pn>
- // fn foo<P0..Pn>() -> Foo<P0...Pn>.
- //
- // For lifetime parameters we convert
- //
- // fn foo<'l0..'ln>() -> impl Trait<'l0..'lm>
- //
- // into
- //
- // type foo::<'p0..'pn>::Foo<'q0..'qm>
- // fn foo<l0..'ln>() -> foo::<'static..'static>::Foo<'l0..'lm>.
- //
- // which would error here on all of the `'static` args.
- OpaqueTyOrigin::FnReturn(..) | OpaqueTyOrigin::AsyncFn(..) => return Ok(()),
- // Check these
- OpaqueTyOrigin::TyAlias { .. } => {}
- }
+ let is_ty_alias = match opaque_ty_hir.expect_opaque_ty().origin {
+ OpaqueTyOrigin::TyAlias { .. } => true,
+ OpaqueTyOrigin::AsyncFn(..) | OpaqueTyOrigin::FnReturn(..) => false,
+ };
+
let opaque_generics = tcx.generics_of(opaque_type_key.def_id);
let mut seen_params: FxIndexMap<_, Vec<_>> = FxIndexMap::default();
- for (i, arg) in opaque_type_key.substs.iter().enumerate() {
+ for (i, arg) in opaque_type_key.args.iter().enumerate() {
+ if let Err(guar) = arg.error_reported() {
+ return Err(guar);
+ }
+
let arg_is_param = match arg.unpack() {
GenericArgKind::Type(ty) => matches!(ty.kind(), ty::Param(_)),
- GenericArgKind::Lifetime(lt) => {
+ GenericArgKind::Lifetime(lt) if is_ty_alias => {
matches!(*lt, ty::ReEarlyBound(_) | ty::ReFree(_))
}
+ // FIXME(#113916): we can't currently check for unique lifetime params,
+ // see that issue for more. We will also have to ignore unused lifetime
+ // params for RPIT, but that's comparatively trivial ✨
+ GenericArgKind::Lifetime(_) => continue,
GenericArgKind::Const(ct) => matches!(ct.kind(), ty::ConstKind::Param(_)),
};
@@ -419,7 +421,7 @@ fn check_opaque_type_parameter_valid(
return Err(tcx
.sess
.struct_span_err(span, "non-defining opaque type use in defining scope")
- .span_note(spans, format!("{} used multiple times", descr))
+ .span_note(spans, format!("{descr} used multiple times"))
.emit());
}
}
diff --git a/compiler/rustc_borrowck/src/region_infer/values.rs b/compiler/rustc_borrowck/src/region_infer/values.rs
index 9290e7479..d205862cd 100644
--- a/compiler/rustc_borrowck/src/region_infer/values.rs
+++ b/compiler/rustc_borrowck/src/region_infer/values.rs
@@ -470,7 +470,7 @@ fn region_value_str(elements: impl IntoIterator<Item = RegionElement>) -> String
}
push_sep(&mut result);
- result.push_str(&format!("{:?}", fr));
+ result.push_str(&format!("{fr:?}"));
}
RegionElement::PlaceholderRegion(placeholder) => {
@@ -481,7 +481,7 @@ fn region_value_str(elements: impl IntoIterator<Item = RegionElement>) -> String
}
push_sep(&mut result);
- result.push_str(&format!("{:?}", placeholder));
+ result.push_str(&format!("{placeholder:?}"));
}
}
}
@@ -497,7 +497,7 @@ fn region_value_str(elements: impl IntoIterator<Item = RegionElement>) -> String
fn push_location_range(str: &mut String, location1: Location, location2: Location) {
if location1 == location2 {
- str.push_str(&format!("{:?}", location1));
+ str.push_str(&format!("{location1:?}"));
} else {
assert_eq!(location1.block, location2.block);
str.push_str(&format!(
diff --git a/compiler/rustc_borrowck/src/renumber.rs b/compiler/rustc_borrowck/src/renumber.rs
index 4389d2b60..4c69ea843 100644
--- a/compiler/rustc_borrowck/src/renumber.rs
+++ b/compiler/rustc_borrowck/src/renumber.rs
@@ -6,7 +6,7 @@ use rustc_infer::infer::NllRegionVariableOrigin;
use rustc_middle::mir::visit::{MutVisitor, TyContext};
use rustc_middle::mir::Constant;
use rustc_middle::mir::{Body, Location, Promoted};
-use rustc_middle::ty::subst::SubstsRef;
+use rustc_middle::ty::GenericArgsRef;
use rustc_middle::ty::{self, Ty, TyCtxt, TypeFoldable};
use rustc_span::{Span, Symbol};
@@ -94,10 +94,10 @@ impl<'a, 'tcx> MutVisitor<'tcx> for RegionRenumberer<'a, 'tcx> {
}
#[instrument(skip(self), level = "debug")]
- fn visit_substs(&mut self, substs: &mut SubstsRef<'tcx>, location: Location) {
- *substs = self.renumber_regions(*substs, || RegionCtxt::Location(location));
+ fn visit_args(&mut self, args: &mut GenericArgsRef<'tcx>, location: Location) {
+ *args = self.renumber_regions(*args, || RegionCtxt::Location(location));
- debug!(?substs);
+ debug!(?args);
}
#[instrument(skip(self), level = "debug")]
diff --git a/compiler/rustc_borrowck/src/session_diagnostics.rs b/compiler/rustc_borrowck/src/session_diagnostics.rs
index fceae5bb3..d1d8cfa74 100644
--- a/compiler/rustc_borrowck/src/session_diagnostics.rs
+++ b/compiler/rustc_borrowck/src/session_diagnostics.rs
@@ -398,7 +398,7 @@ pub(crate) enum CaptureReasonSuggest<'tcx> {
#[suggestion(
borrowck_suggest_create_freash_reborrow,
applicability = "maybe-incorrect",
- code = "as_mut().",
+ code = ".as_mut()",
style = "verbose"
)]
FreshReborrow {
diff --git a/compiler/rustc_borrowck/src/type_check/canonical.rs b/compiler/rustc_borrowck/src/type_check/canonical.rs
index c19fbf20c..16f5e68a0 100644
--- a/compiler/rustc_borrowck/src/type_check/canonical.rs
+++ b/compiler/rustc_borrowck/src/type_check/canonical.rs
@@ -90,11 +90,7 @@ impl<'a, 'tcx> TypeChecker<'a, 'tcx> {
) {
self.prove_predicate(
ty::Binder::dummy(ty::PredicateKind::Clause(ty::ClauseKind::Trait(
- ty::TraitPredicate {
- trait_ref,
- constness: ty::BoundConstness::NotConst,
- polarity: ty::ImplPolarity::Positive,
- },
+ ty::TraitPredicate { trait_ref, polarity: ty::ImplPolarity::Positive },
))),
locations,
category,
diff --git a/compiler/rustc_borrowck/src/type_check/constraint_conversion.rs b/compiler/rustc_borrowck/src/type_check/constraint_conversion.rs
index 71eae7b27..21d8026e1 100644
--- a/compiler/rustc_borrowck/src/type_check/constraint_conversion.rs
+++ b/compiler/rustc_borrowck/src/type_check/constraint_conversion.rs
@@ -5,7 +5,7 @@ use rustc_infer::infer::outlives::obligations::{TypeOutlives, TypeOutlivesDelega
use rustc_infer::infer::region_constraints::{GenericKind, VerifyBound};
use rustc_infer::infer::{self, InferCtxt, SubregionOrigin};
use rustc_middle::mir::{ClosureOutlivesSubject, ClosureRegionRequirements, ConstraintCategory};
-use rustc_middle::ty::subst::GenericArgKind;
+use rustc_middle::ty::GenericArgKind;
use rustc_middle::ty::{self, TyCtxt};
use rustc_middle::ty::{TypeFoldable, TypeVisitableExt};
use rustc_span::{Span, DUMMY_SP};
@@ -89,20 +89,20 @@ impl<'a, 'tcx> ConstraintConversion<'a, 'tcx> {
/// Given an instance of the closure type, this method instantiates the "extra" requirements
/// that we computed for the closure. This has the effect of adding new outlives obligations
- /// to existing region variables in `closure_substs`.
+ /// to existing region variables in `closure_args`.
#[instrument(skip(self), level = "debug")]
pub fn apply_closure_requirements(
&mut self,
closure_requirements: &ClosureRegionRequirements<'tcx>,
closure_def_id: DefId,
- closure_substs: ty::SubstsRef<'tcx>,
+ closure_args: ty::GenericArgsRef<'tcx>,
) {
- // Extract the values of the free regions in `closure_substs`
+ // Extract the values of the free regions in `closure_args`
// into a vector. These are the regions that we will be
// relating to one another.
let closure_mapping = &UniversalRegions::closure_mapping(
self.tcx,
- closure_substs,
+ closure_args,
closure_requirements.num_external_vids,
closure_def_id.expect_local(),
);
diff --git a/compiler/rustc_borrowck/src/type_check/liveness/polonius.rs b/compiler/rustc_borrowck/src/type_check/liveness/polonius.rs
index b344ab46a..c621df371 100644
--- a/compiler/rustc_borrowck/src/type_check/liveness/polonius.rs
+++ b/compiler/rustc_borrowck/src/type_check/liveness/polonius.rs
@@ -2,7 +2,7 @@ use crate::def_use::{self, DefUse};
use crate::location::{LocationIndex, LocationTable};
use rustc_middle::mir::visit::{MutatingUseContext, PlaceContext, Visitor};
use rustc_middle::mir::{Body, Local, Location, Place};
-use rustc_middle::ty::subst::GenericArg;
+use rustc_middle::ty::GenericArg;
use rustc_mir_dataflow::move_paths::{LookupResult, MoveData, MovePathIndex};
use super::TypeChecker;
@@ -20,7 +20,7 @@ struct UseFactsExtractor<'me, 'tcx> {
}
// A Visitor to walk through the MIR and extract point-wise facts
-impl UseFactsExtractor<'_, '_> {
+impl<'tcx> UseFactsExtractor<'_, 'tcx> {
fn location_to_index(&self, location: Location) -> LocationIndex {
self.location_table.mid_index(location)
}
@@ -45,7 +45,7 @@ impl UseFactsExtractor<'_, '_> {
self.path_accessed_at_base.push((path, self.location_to_index(location)));
}
- fn place_to_mpi(&self, place: &Place<'_>) -> Option<MovePathIndex> {
+ fn place_to_mpi(&self, place: &Place<'tcx>) -> Option<MovePathIndex> {
match self.move_data.rev_lookup.find(place.as_ref()) {
LookupResult::Exact(mpi) => Some(mpi),
LookupResult::Parent(mmpi) => mmpi,
diff --git a/compiler/rustc_borrowck/src/type_check/liveness/trace.rs b/compiler/rustc_borrowck/src/type_check/liveness/trace.rs
index eb02604b9..5702d39db 100644
--- a/compiler/rustc_borrowck/src/type_check/liveness/trace.rs
+++ b/compiler/rustc_borrowck/src/type_check/liveness/trace.rs
@@ -161,8 +161,12 @@ impl<'me, 'typeck, 'flow, 'tcx> LivenessResults<'me, 'typeck, 'flow, 'tcx> {
}
}
- // Runs dropck for locals whose liveness isn't relevant. This is
- // necessary to eagerly detect unbound recursion during drop glue computation.
+ /// Runs dropck for locals whose liveness isn't relevant. This is
+ /// necessary to eagerly detect unbound recursion during drop glue computation.
+ ///
+ /// These are all the locals which do not potentially reference a region local
+ /// to this body. Locals which only reference free regions are always drop-live
+ /// and can therefore safely be dropped.
fn dropck_boring_locals(&mut self, boring_locals: Vec<Local>) {
for local in boring_locals {
let local_ty = self.cx.body.local_decls[local].ty;
diff --git a/compiler/rustc_borrowck/src/type_check/mod.rs b/compiler/rustc_borrowck/src/type_check/mod.rs
index a15e1065c..50d875dfa 100644
--- a/compiler/rustc_borrowck/src/type_check/mod.rs
+++ b/compiler/rustc_borrowck/src/type_check/mod.rs
@@ -30,12 +30,12 @@ use rustc_middle::traits::query::NoSolution;
use rustc_middle::traits::ObligationCause;
use rustc_middle::ty::adjustment::PointerCoercion;
use rustc_middle::ty::cast::CastTy;
-use rustc_middle::ty::subst::{SubstsRef, UserSubsts};
use rustc_middle::ty::visit::TypeVisitableExt;
use rustc_middle::ty::{
self, Binder, CanonicalUserTypeAnnotation, CanonicalUserTypeAnnotations, Dynamic,
OpaqueHiddenType, OpaqueTypeKey, RegionVid, Ty, TyCtxt, UserType, UserTypeAnnotationIndex,
};
+use rustc_middle::ty::{GenericArgsRef, UserArgs};
use rustc_span::def_id::CRATE_DEF_ID;
use rustc_span::symbol::sym;
use rustc_span::{Span, DUMMY_SP};
@@ -389,15 +389,12 @@ impl<'a, 'b, 'tcx> Visitor<'tcx> for TypeVerifier<'a, 'b, 'tcx> {
} else {
self.cx.ascribe_user_type(
constant.literal.ty(),
- UserType::TypeOf(
- uv.def,
- UserSubsts { substs: uv.substs, user_self_ty: None },
- ),
+ UserType::TypeOf(uv.def, UserArgs { args: uv.args, user_self_ty: None }),
locations.span(&self.cx.body),
);
}
} else if let Some(static_def_id) = constant.check_static_ptr(tcx) {
- let unnormalized_ty = tcx.type_of(static_def_id).subst_identity();
+ let unnormalized_ty = tcx.type_of(static_def_id).instantiate_identity();
let normalized_ty = self.cx.normalize(unnormalized_ty, locations);
let literal_ty = constant.literal.ty().builtin_deref(true).unwrap().ty;
@@ -411,19 +408,13 @@ impl<'a, 'b, 'tcx> Visitor<'tcx> for TypeVerifier<'a, 'b, 'tcx> {
}
}
- if let ty::FnDef(def_id, substs) = *constant.literal.ty().kind() {
- // const_trait_impl: use a non-const param env when checking that a FnDef type is well formed.
- // this is because the well-formedness of the function does not need to be proved to have `const`
- // impls for trait bounds.
- let instantiated_predicates = tcx.predicates_of(def_id).instantiate(tcx, substs);
- let prev = self.cx.param_env;
- self.cx.param_env = prev.without_const();
+ if let ty::FnDef(def_id, args) = *constant.literal.ty().kind() {
+ let instantiated_predicates = tcx.predicates_of(def_id).instantiate(tcx, args);
self.cx.normalize_and_prove_instantiated_predicates(
def_id,
instantiated_predicates,
locations,
);
- self.cx.param_env = prev;
}
}
}
@@ -507,14 +498,13 @@ impl<'a, 'b, 'tcx> TypeVerifier<'a, 'b, 'tcx> {
/// Checks that the types internal to the `place` match up with
/// what would be expected.
+ #[instrument(level = "debug", skip(self, location), ret)]
fn sanitize_place(
&mut self,
place: &Place<'tcx>,
location: Location,
context: PlaceContext,
) -> PlaceTy<'tcx> {
- debug!("sanitize_place: {:?}", place);
-
let mut place_ty = PlaceTy::from_ty(self.body().local_decls[place.local].ty);
for elem in place.projection.iter() {
@@ -617,7 +607,7 @@ impl<'a, 'b, 'tcx> TypeVerifier<'a, 'b, 'tcx> {
}
}
- #[instrument(skip(self), level = "debug")]
+ #[instrument(skip(self, location), ret, level = "debug")]
fn sanitize_projection(
&mut self,
base: PlaceTy<'tcx>,
@@ -626,7 +616,6 @@ impl<'a, 'b, 'tcx> TypeVerifier<'a, 'b, 'tcx> {
location: Location,
context: PlaceContext,
) -> PlaceTy<'tcx> {
- debug!("sanitize_projection: {:?} {:?} {:?}", base, pi, place);
let tcx = self.tcx();
let base_ty = base.ty;
match pi {
@@ -666,7 +655,7 @@ impl<'a, 'b, 'tcx> TypeVerifier<'a, 'b, 'tcx> {
})
}
ProjectionElem::Downcast(maybe_name, index) => match base_ty.kind() {
- ty::Adt(adt_def, _substs) if adt_def.is_enum() => {
+ ty::Adt(adt_def, _args) if adt_def.is_enum() => {
if index.as_usize() >= adt_def.variants().len() {
PlaceTy::from_ty(span_mirbug_and_err!(
self,
@@ -776,16 +765,16 @@ impl<'a, 'b, 'tcx> TypeVerifier<'a, 'b, 'tcx> {
) -> Result<Ty<'tcx>, FieldAccessError> {
let tcx = self.tcx();
- let (variant, substs) = match base_ty {
+ let (variant, args) = match base_ty {
PlaceTy { ty, variant_index: Some(variant_index) } => match *ty.kind() {
- ty::Adt(adt_def, substs) => (adt_def.variant(variant_index), substs),
- ty::Generator(def_id, substs, _) => {
- let mut variants = substs.as_generator().state_tys(def_id, tcx);
+ ty::Adt(adt_def, args) => (adt_def.variant(variant_index), args),
+ ty::Generator(def_id, args, _) => {
+ let mut variants = args.as_generator().state_tys(def_id, tcx);
let Some(mut variant) = variants.nth(variant_index.into()) else {
bug!(
"variant_index of generator out of range: {:?}/{:?}",
variant_index,
- substs.as_generator().state_tys(def_id, tcx).count()
+ args.as_generator().state_tys(def_id, tcx).count()
);
};
return match variant.nth(field.index()) {
@@ -796,29 +785,24 @@ impl<'a, 'b, 'tcx> TypeVerifier<'a, 'b, 'tcx> {
_ => bug!("can't have downcast of non-adt non-generator type"),
},
PlaceTy { ty, variant_index: None } => match *ty.kind() {
- ty::Adt(adt_def, substs) if !adt_def.is_enum() => {
- (adt_def.variant(FIRST_VARIANT), substs)
+ ty::Adt(adt_def, args) if !adt_def.is_enum() => {
+ (adt_def.variant(FIRST_VARIANT), args)
}
- ty::Closure(_, substs) => {
- return match substs
- .as_closure()
- .tupled_upvars_ty()
- .tuple_fields()
- .get(field.index())
- {
+ ty::Closure(_, args) => {
+ return match args.as_closure().upvar_tys().get(field.index()) {
Some(&ty) => Ok(ty),
None => Err(FieldAccessError::OutOfRange {
- field_count: substs.as_closure().upvar_tys().count(),
+ field_count: args.as_closure().upvar_tys().len(),
}),
};
}
- ty::Generator(_, substs, _) => {
+ ty::Generator(_, args, _) => {
// Only prefix fields (upvars and current state) are
// accessible without a variant index.
- return match substs.as_generator().prefix_tys().nth(field.index()) {
- Some(ty) => Ok(ty),
+ return match args.as_generator().prefix_tys().get(field.index()) {
+ Some(ty) => Ok(*ty),
None => Err(FieldAccessError::OutOfRange {
- field_count: substs.as_generator().prefix_tys().count(),
+ field_count: args.as_generator().prefix_tys().len(),
}),
};
}
@@ -840,7 +824,7 @@ impl<'a, 'b, 'tcx> TypeVerifier<'a, 'b, 'tcx> {
};
if let Some(field) = variant.fields.get(field) {
- Ok(self.cx.normalize(field.ty(tcx, substs), location))
+ Ok(self.cx.normalize(field.ty(tcx, args), location))
} else {
Err(FieldAccessError::OutOfRange { field_count: variant.fields.len() })
}
@@ -1065,7 +1049,7 @@ impl<'a, 'tcx> TypeChecker<'a, 'tcx> {
ocx.infcx.add_item_bounds_for_hidden_type(
opaque_type_key.def_id.to_def_id(),
- opaque_type_key.substs,
+ opaque_type_key.args,
cause,
param_env,
hidden_ty.ty,
@@ -1770,32 +1754,32 @@ impl<'a, 'tcx> TypeChecker<'a, 'tcx> {
let tcx = self.tcx();
match *ak {
- AggregateKind::Adt(adt_did, variant_index, substs, _, active_field_index) => {
+ AggregateKind::Adt(adt_did, variant_index, args, _, active_field_index) => {
let def = tcx.adt_def(adt_did);
let variant = &def.variant(variant_index);
let adj_field_index = active_field_index.unwrap_or(field_index);
if let Some(field) = variant.fields.get(adj_field_index) {
- Ok(self.normalize(field.ty(tcx, substs), location))
+ Ok(self.normalize(field.ty(tcx, args), location))
} else {
Err(FieldAccessError::OutOfRange { field_count: variant.fields.len() })
}
}
- AggregateKind::Closure(_, substs) => {
- match substs.as_closure().upvar_tys().nth(field_index.as_usize()) {
- Some(ty) => Ok(ty),
+ AggregateKind::Closure(_, args) => {
+ match args.as_closure().upvar_tys().get(field_index.as_usize()) {
+ Some(ty) => Ok(*ty),
None => Err(FieldAccessError::OutOfRange {
- field_count: substs.as_closure().upvar_tys().count(),
+ field_count: args.as_closure().upvar_tys().len(),
}),
}
}
- AggregateKind::Generator(_, substs, _) => {
+ AggregateKind::Generator(_, args, _) => {
// It doesn't make sense to look at a field beyond the prefix;
// these require a variant index, and are not initialized in
// aggregate rvalues.
- match substs.as_generator().prefix_tys().nth(field_index.as_usize()) {
- Some(ty) => Ok(ty),
+ match args.as_generator().prefix_tys().get(field_index.as_usize()) {
+ Some(ty) => Ok(*ty),
None => Err(FieldAccessError::OutOfRange {
- field_count: substs.as_generator().prefix_tys().count(),
+ field_count: args.as_generator().prefix_tys().len(),
}),
}
}
@@ -1821,8 +1805,7 @@ impl<'a, 'tcx> TypeChecker<'a, 'tcx> {
let def_id = uv.def;
if tcx.def_kind(def_id) == DefKind::InlineConst {
let def_id = def_id.expect_local();
- let predicates =
- self.prove_closure_bounds(tcx, def_id, uv.substs, location);
+ let predicates = self.prove_closure_bounds(tcx, def_id, uv.args, location);
self.normalize_and_prove_instantiated_predicates(
def_id.to_def_id(),
predicates,
@@ -1939,7 +1922,7 @@ impl<'a, 'tcx> TypeChecker<'a, 'tcx> {
CastKind::PointerCoercion(PointerCoercion::ClosureFnPointer(unsafety)) => {
let sig = match op.ty(body, tcx).kind() {
- ty::Closure(_, substs) => substs.as_closure().sig(),
+ ty::Closure(_, args) => args.as_closure().sig(),
_ => bug!(),
};
let ty_fn_ptr_from =
@@ -2039,28 +2022,16 @@ impl<'a, 'tcx> TypeChecker<'a, 'tcx> {
}
CastKind::PointerCoercion(PointerCoercion::MutToConstPointer) => {
- let ty::RawPtr(ty::TypeAndMut {
- ty: ty_from,
- mutbl: hir::Mutability::Mut,
- }) = op.ty(body, tcx).kind() else {
- span_mirbug!(
- self,
- rvalue,
- "unexpected base type for cast {:?}",
- ty,
- );
+ let ty::RawPtr(ty::TypeAndMut { ty: ty_from, mutbl: hir::Mutability::Mut }) =
+ op.ty(body, tcx).kind()
+ else {
+ span_mirbug!(self, rvalue, "unexpected base type for cast {:?}", ty,);
return;
};
- let ty::RawPtr(ty::TypeAndMut {
- ty: ty_to,
- mutbl: hir::Mutability::Not,
- }) = ty.kind() else {
- span_mirbug!(
- self,
- rvalue,
- "unexpected target type for cast {:?}",
- ty,
- );
+ let ty::RawPtr(ty::TypeAndMut { ty: ty_to, mutbl: hir::Mutability::Not }) =
+ ty.kind()
+ else {
+ span_mirbug!(self, rvalue, "unexpected target type for cast {:?}", ty,);
return;
};
if let Err(terr) = self.sub_types(
@@ -2603,8 +2574,8 @@ impl<'a, 'tcx> TypeChecker<'a, 'tcx> {
);
let (def_id, instantiated_predicates) = match *aggregate_kind {
- AggregateKind::Adt(adt_did, _, substs, _, _) => {
- (adt_did, tcx.predicates_of(adt_did).instantiate(tcx, substs))
+ AggregateKind::Adt(adt_did, _, args, _, _) => {
+ (adt_did, tcx.predicates_of(adt_did).instantiate(tcx, args))
}
// For closures, we have some **extra requirements** we
@@ -2626,9 +2597,8 @@ impl<'a, 'tcx> TypeChecker<'a, 'tcx> {
// desugaring. A closure gets desugared to a struct, and
// these extra requirements are basically like where
// clauses on the struct.
- AggregateKind::Closure(def_id, substs)
- | AggregateKind::Generator(def_id, substs, _) => {
- (def_id, self.prove_closure_bounds(tcx, def_id.expect_local(), substs, location))
+ AggregateKind::Closure(def_id, args) | AggregateKind::Generator(def_id, args, _) => {
+ (def_id, self.prove_closure_bounds(tcx, def_id.expect_local(), args, location))
}
AggregateKind::Array(_) | AggregateKind::Tuple => {
@@ -2647,7 +2617,7 @@ impl<'a, 'tcx> TypeChecker<'a, 'tcx> {
&mut self,
tcx: TyCtxt<'tcx>,
def_id: LocalDefId,
- substs: SubstsRef<'tcx>,
+ args: GenericArgsRef<'tcx>,
location: Location,
) -> ty::InstantiatedPredicates<'tcx> {
if let Some(closure_requirements) = &tcx.mir_borrowck(def_id).closure_requirements {
@@ -2665,26 +2635,26 @@ impl<'a, 'tcx> TypeChecker<'a, 'tcx> {
.apply_closure_requirements(
&closure_requirements,
def_id.to_def_id(),
- substs,
+ args,
);
}
- // Now equate closure substs to regions inherited from `typeck_root_def_id`. Fixes #98589.
+ // Now equate closure args to regions inherited from `typeck_root_def_id`. Fixes #98589.
let typeck_root_def_id = tcx.typeck_root_def_id(self.body.source.def_id());
- let typeck_root_substs = ty::InternalSubsts::identity_for_item(tcx, typeck_root_def_id);
+ let typeck_root_args = ty::GenericArgs::identity_for_item(tcx, typeck_root_def_id);
- let parent_substs = match tcx.def_kind(def_id) {
- DefKind::Closure => substs.as_closure().parent_substs(),
- DefKind::Generator => substs.as_generator().parent_substs(),
- DefKind::InlineConst => substs.as_inline_const().parent_substs(),
+ let parent_args = match tcx.def_kind(def_id) {
+ DefKind::Closure => args.as_closure().parent_args(),
+ DefKind::Generator => args.as_generator().parent_args(),
+ DefKind::InlineConst => args.as_inline_const().parent_args(),
other => bug!("unexpected item {:?}", other),
};
- let parent_substs = tcx.mk_substs(parent_substs);
+ let parent_args = tcx.mk_args(parent_args);
- assert_eq!(typeck_root_substs.len(), parent_substs.len());
- if let Err(_) = self.eq_substs(
- typeck_root_substs,
- parent_substs,
+ assert_eq!(typeck_root_args.len(), parent_args.len());
+ if let Err(_) = self.eq_args(
+ typeck_root_args,
+ parent_args,
location.to_locations(),
ConstraintCategory::BoringNoLocation,
) {
@@ -2692,12 +2662,12 @@ impl<'a, 'tcx> TypeChecker<'a, 'tcx> {
self,
def_id,
"could not relate closure to parent {:?} != {:?}",
- typeck_root_substs,
- parent_substs
+ typeck_root_args,
+ parent_args
);
}
- tcx.predicates_of(def_id).instantiate(tcx, substs)
+ tcx.predicates_of(def_id).instantiate(tcx, args)
}
#[instrument(skip(self, body), level = "debug")]
diff --git a/compiler/rustc_borrowck/src/type_check/relate_tys.rs b/compiler/rustc_borrowck/src/type_check/relate_tys.rs
index 8c4bfb2c6..e0c629562 100644
--- a/compiler/rustc_borrowck/src/type_check/relate_tys.rs
+++ b/compiler/rustc_borrowck/src/type_check/relate_tys.rs
@@ -42,10 +42,10 @@ impl<'a, 'tcx> TypeChecker<'a, 'tcx> {
}
/// Add sufficient constraints to ensure `a == b`. See also [Self::relate_types].
- pub(super) fn eq_substs(
+ pub(super) fn eq_args(
&mut self,
- a: ty::SubstsRef<'tcx>,
- b: ty::SubstsRef<'tcx>,
+ a: ty::GenericArgsRef<'tcx>,
+ b: ty::GenericArgsRef<'tcx>,
locations: Locations,
category: ConstraintCategory<'tcx>,
) -> Result<(), NoSolution> {
diff --git a/compiler/rustc_borrowck/src/universal_regions.rs b/compiler/rustc_borrowck/src/universal_regions.rs
index 7821b82bf..56945f43f 100644
--- a/compiler/rustc_borrowck/src/universal_regions.rs
+++ b/compiler/rustc_borrowck/src/universal_regions.rs
@@ -12,7 +12,6 @@
//! The code in this file doesn't *do anything* with those results; it
//! just returns them for other code to use.
-use either::Either;
use rustc_data_structures::fx::FxHashMap;
use rustc_errors::Diagnostic;
use rustc_hir as hir;
@@ -22,8 +21,8 @@ use rustc_hir::BodyOwnerKind;
use rustc_index::IndexVec;
use rustc_infer::infer::NllRegionVariableOrigin;
use rustc_middle::ty::fold::TypeFoldable;
-use rustc_middle::ty::{self, InlineConstSubsts, InlineConstSubstsParts, RegionVid, Ty, TyCtxt};
-use rustc_middle::ty::{InternalSubsts, SubstsRef};
+use rustc_middle::ty::{self, InlineConstArgs, InlineConstArgsParts, RegionVid, Ty, TyCtxt};
+use rustc_middle::ty::{GenericArgs, GenericArgsRef};
use rustc_span::symbol::{kw, sym};
use rustc_span::Symbol;
use std::iter;
@@ -88,26 +87,26 @@ pub struct UniversalRegions<'tcx> {
#[derive(Copy, Clone, Debug)]
pub enum DefiningTy<'tcx> {
/// The MIR is a closure. The signature is found via
- /// `ClosureSubsts::closure_sig_ty`.
- Closure(DefId, SubstsRef<'tcx>),
+ /// `ClosureArgs::closure_sig_ty`.
+ Closure(DefId, GenericArgsRef<'tcx>),
/// The MIR is a generator. The signature is that generators take
/// no parameters and return the result of
- /// `ClosureSubsts::generator_return_ty`.
- Generator(DefId, SubstsRef<'tcx>, hir::Movability),
+ /// `ClosureArgs::generator_return_ty`.
+ Generator(DefId, GenericArgsRef<'tcx>, hir::Movability),
- /// The MIR is a fn item with the given `DefId` and substs. The signature
+ /// The MIR is a fn item with the given `DefId` and args. The signature
/// of the function can be bound then with the `fn_sig` query.
- FnDef(DefId, SubstsRef<'tcx>),
+ FnDef(DefId, GenericArgsRef<'tcx>),
/// The MIR represents some form of constant. The signature then
/// is that it has no inputs and a single return value, which is
/// the value of the constant.
- Const(DefId, SubstsRef<'tcx>),
+ Const(DefId, GenericArgsRef<'tcx>),
/// The MIR represents an inline const. The signature has no inputs and a
- /// single return value found via `InlineConstSubsts::ty`.
- InlineConst(DefId, SubstsRef<'tcx>),
+ /// single return value found via `InlineConstArgs::ty`.
+ InlineConst(DefId, GenericArgsRef<'tcx>),
}
impl<'tcx> DefiningTy<'tcx> {
@@ -115,14 +114,12 @@ impl<'tcx> DefiningTy<'tcx> {
/// not a closure or generator, there are no upvars, and hence it
/// will be an empty list. The order of types in this list will
/// match up with the upvar order in the HIR, typesystem, and MIR.
- pub fn upvar_tys(self) -> impl Iterator<Item = Ty<'tcx>> + 'tcx {
+ pub fn upvar_tys(self) -> &'tcx ty::List<Ty<'tcx>> {
match self {
- DefiningTy::Closure(_, substs) => Either::Left(substs.as_closure().upvar_tys()),
- DefiningTy::Generator(_, substs, _) => {
- Either::Right(Either::Left(substs.as_generator().upvar_tys()))
- }
+ DefiningTy::Closure(_, args) => args.as_closure().upvar_tys(),
+ DefiningTy::Generator(_, args, _) => args.as_generator().upvar_tys(),
DefiningTy::FnDef(..) | DefiningTy::Const(..) | DefiningTy::InlineConst(..) => {
- Either::Right(Either::Right(iter::empty()))
+ ty::List::empty()
}
}
}
@@ -164,9 +161,9 @@ struct UniversalRegionIndices<'tcx> {
/// used because trait matching and type-checking will feed us
/// region constraints that reference those regions and we need to
/// be able to map them to our internal `RegionVid`. This is
- /// basically equivalent to an `InternalSubsts`, except that it also
+ /// basically equivalent to an `GenericArgs`, except that it also
/// contains an entry for `ReStatic` -- it might be nice to just
- /// use a substs, and then handle `ReStatic` another way.
+ /// use a args, and then handle `ReStatic` another way.
indices: FxHashMap<ty::Region<'tcx>, RegionVid>,
/// The vid assigned to `'static`. Used only for diagnostics.
@@ -243,13 +240,13 @@ impl<'tcx> UniversalRegions<'tcx> {
/// `V[1]: V[2]`.
pub fn closure_mapping(
tcx: TyCtxt<'tcx>,
- closure_substs: SubstsRef<'tcx>,
+ closure_args: GenericArgsRef<'tcx>,
expected_num_vars: usize,
closure_def_id: LocalDefId,
) -> IndexVec<RegionVid, ty::Region<'tcx>> {
let mut region_mapping = IndexVec::with_capacity(expected_num_vars);
region_mapping.push(tcx.lifetimes.re_static);
- tcx.for_each_free_region(&closure_substs, |fr| {
+ tcx.for_each_free_region(&closure_args, |fr| {
region_mapping.push(fr);
});
@@ -334,11 +331,11 @@ impl<'tcx> UniversalRegions<'tcx> {
/// state.
pub(crate) fn annotate(&self, tcx: TyCtxt<'tcx>, err: &mut Diagnostic) {
match self.defining_ty {
- DefiningTy::Closure(def_id, substs) => {
+ DefiningTy::Closure(def_id, args) => {
err.note(format!(
- "defining type: {} with closure substs {:#?}",
- tcx.def_path_str_with_substs(def_id, substs),
- &substs[tcx.generics_of(def_id).parent_count..],
+ "defining type: {} with closure args {:#?}",
+ tcx.def_path_str_with_args(def_id, args),
+ &args[tcx.generics_of(def_id).parent_count..],
));
// FIXME: It'd be nice to print the late-bound regions
@@ -350,11 +347,11 @@ impl<'tcx> UniversalRegions<'tcx> {
err.note(format!("late-bound region is {:?}", self.to_region_vid(r)));
});
}
- DefiningTy::Generator(def_id, substs, _) => {
+ DefiningTy::Generator(def_id, args, _) => {
err.note(format!(
- "defining type: {} with generator substs {:#?}",
- tcx.def_path_str_with_substs(def_id, substs),
- &substs[tcx.generics_of(def_id).parent_count..],
+ "defining type: {} with generator args {:#?}",
+ tcx.def_path_str_with_args(def_id, args),
+ &args[tcx.generics_of(def_id).parent_count..],
));
// FIXME: As above, we'd like to print out the region
@@ -364,22 +361,19 @@ impl<'tcx> UniversalRegions<'tcx> {
err.note(format!("late-bound region is {:?}", self.to_region_vid(r)));
});
}
- DefiningTy::FnDef(def_id, substs) => {
- err.note(format!(
- "defining type: {}",
- tcx.def_path_str_with_substs(def_id, substs),
- ));
+ DefiningTy::FnDef(def_id, args) => {
+ err.note(format!("defining type: {}", tcx.def_path_str_with_args(def_id, args),));
}
- DefiningTy::Const(def_id, substs) => {
+ DefiningTy::Const(def_id, args) => {
err.note(format!(
"defining constant type: {}",
- tcx.def_path_str_with_substs(def_id, substs),
+ tcx.def_path_str_with_args(def_id, args),
));
}
- DefiningTy::InlineConst(def_id, substs) => {
+ DefiningTy::InlineConst(def_id, args) => {
err.note(format!(
"defining inline constant type: {}",
- tcx.def_path_str_with_substs(def_id, substs),
+ tcx.def_path_str_with_args(def_id, args),
));
}
}
@@ -501,8 +495,11 @@ impl<'cx, 'tcx> UniversalRegionsBuilder<'cx, 'tcx> {
.as_var();
let region = ty::Region::new_var(self.infcx.tcx, reg_vid);
- let va_list_ty =
- self.infcx.tcx.type_of(va_list_did).subst(self.infcx.tcx, &[region.into()]);
+ let va_list_ty = self
+ .infcx
+ .tcx
+ .type_of(va_list_did)
+ .instantiate(self.infcx.tcx, &[region.into()]);
unnormalized_input_tys = self.infcx.tcx.mk_type_list_from_iter(
unnormalized_input_tys.iter().copied().chain(iter::once(va_list_ty)),
@@ -522,7 +519,7 @@ impl<'cx, 'tcx> UniversalRegionsBuilder<'cx, 'tcx> {
debug!("build: local regions = {}..{}", first_local_index, num_universals);
let yield_ty = match defining_ty {
- DefiningTy::Generator(_, substs, _) => Some(substs.as_generator().yield_ty()),
+ DefiningTy::Generator(_, args, _) => Some(args.as_generator().yield_ty()),
_ => None,
};
@@ -548,7 +545,7 @@ impl<'cx, 'tcx> UniversalRegionsBuilder<'cx, 'tcx> {
match tcx.hir().body_owner_kind(self.mir_def) {
BodyOwnerKind::Closure | BodyOwnerKind::Fn => {
- let defining_ty = tcx.type_of(self.mir_def).subst_identity();
+ let defining_ty = tcx.type_of(self.mir_def).instantiate_identity();
debug!("defining_ty (pre-replacement): {:?}", defining_ty);
@@ -556,11 +553,11 @@ impl<'cx, 'tcx> UniversalRegionsBuilder<'cx, 'tcx> {
self.infcx.replace_free_regions_with_nll_infer_vars(FR, defining_ty);
match *defining_ty.kind() {
- ty::Closure(def_id, substs) => DefiningTy::Closure(def_id, substs),
- ty::Generator(def_id, substs, movability) => {
- DefiningTy::Generator(def_id, substs, movability)
+ ty::Closure(def_id, args) => DefiningTy::Closure(def_id, args),
+ ty::Generator(def_id, args, movability) => {
+ DefiningTy::Generator(def_id, args, movability)
}
- ty::FnDef(def_id, substs) => DefiningTy::FnDef(def_id, substs),
+ ty::FnDef(def_id, args) => DefiningTy::FnDef(def_id, args),
_ => span_bug!(
tcx.def_span(self.mir_def),
"expected defining type for `{:?}`: `{:?}`",
@@ -571,11 +568,11 @@ impl<'cx, 'tcx> UniversalRegionsBuilder<'cx, 'tcx> {
}
BodyOwnerKind::Const | BodyOwnerKind::Static(..) => {
- let identity_substs = InternalSubsts::identity_for_item(tcx, typeck_root_def_id);
+ let identity_args = GenericArgs::identity_for_item(tcx, typeck_root_def_id);
if self.mir_def.to_def_id() == typeck_root_def_id {
- let substs =
- self.infcx.replace_free_regions_with_nll_infer_vars(FR, identity_substs);
- DefiningTy::Const(self.mir_def.to_def_id(), substs)
+ let args =
+ self.infcx.replace_free_regions_with_nll_infer_vars(FR, identity_args);
+ DefiningTy::Const(self.mir_def.to_def_id(), args)
} else {
// FIXME this line creates a dependency between borrowck and typeck.
//
@@ -584,18 +581,18 @@ impl<'cx, 'tcx> UniversalRegionsBuilder<'cx, 'tcx> {
// into borrowck, which is ICE #78174.
//
// As a workaround, inline consts have an additional generic param (`ty`
- // below), so that `type_of(inline_const_def_id).substs(substs)` uses the
+ // below), so that `type_of(inline_const_def_id).args(args)` uses the
// proper type with NLL infer vars.
let ty = tcx
.typeck(self.mir_def)
.node_type(tcx.local_def_id_to_hir_id(self.mir_def));
- let substs = InlineConstSubsts::new(
+ let args = InlineConstArgs::new(
tcx,
- InlineConstSubstsParts { parent_substs: identity_substs, ty },
+ InlineConstArgsParts { parent_args: identity_args, ty },
)
- .substs;
- let substs = self.infcx.replace_free_regions_with_nll_infer_vars(FR, substs);
- DefiningTy::InlineConst(self.mir_def.to_def_id(), substs)
+ .args;
+ let args = self.infcx.replace_free_regions_with_nll_infer_vars(FR, args);
+ DefiningTy::InlineConst(self.mir_def.to_def_id(), args)
}
}
}
@@ -612,29 +609,29 @@ impl<'cx, 'tcx> UniversalRegionsBuilder<'cx, 'tcx> {
) -> UniversalRegionIndices<'tcx> {
let tcx = self.infcx.tcx;
let typeck_root_def_id = tcx.typeck_root_def_id(self.mir_def.to_def_id());
- let identity_substs = InternalSubsts::identity_for_item(tcx, typeck_root_def_id);
- let fr_substs = match defining_ty {
- DefiningTy::Closure(_, substs)
- | DefiningTy::Generator(_, substs, _)
- | DefiningTy::InlineConst(_, substs) => {
+ let identity_args = GenericArgs::identity_for_item(tcx, typeck_root_def_id);
+ let fr_args = match defining_ty {
+ DefiningTy::Closure(_, args)
+ | DefiningTy::Generator(_, args, _)
+ | DefiningTy::InlineConst(_, args) => {
// In the case of closures, we rely on the fact that
- // the first N elements in the ClosureSubsts are
+ // the first N elements in the ClosureArgs are
// inherited from the `typeck_root_def_id`.
// Therefore, when we zip together (below) with
- // `identity_substs`, we will get only those regions
+ // `identity_args`, we will get only those regions
// that correspond to early-bound regions declared on
// the `typeck_root_def_id`.
- assert!(substs.len() >= identity_substs.len());
- assert_eq!(substs.regions().count(), identity_substs.regions().count());
- substs
+ assert!(args.len() >= identity_args.len());
+ assert_eq!(args.regions().count(), identity_args.regions().count());
+ args
}
- DefiningTy::FnDef(_, substs) | DefiningTy::Const(_, substs) => substs,
+ DefiningTy::FnDef(_, args) | DefiningTy::Const(_, args) => args,
};
let global_mapping = iter::once((tcx.lifetimes.re_static, fr_static));
let subst_mapping =
- iter::zip(identity_substs.regions(), fr_substs.regions().map(|r| r.as_var()));
+ iter::zip(identity_args.regions(), fr_args.regions().map(|r| r.as_var()));
UniversalRegionIndices { indices: global_mapping.chain(subst_mapping).collect(), fr_static }
}
@@ -646,9 +643,9 @@ impl<'cx, 'tcx> UniversalRegionsBuilder<'cx, 'tcx> {
) -> ty::Binder<'tcx, &'tcx ty::List<Ty<'tcx>>> {
let tcx = self.infcx.tcx;
match defining_ty {
- DefiningTy::Closure(def_id, substs) => {
+ DefiningTy::Closure(def_id, args) => {
assert_eq!(self.mir_def.to_def_id(), def_id);
- let closure_sig = substs.as_closure().sig();
+ let closure_sig = args.as_closure().sig();
let inputs_and_output = closure_sig.inputs_and_output();
let bound_vars = tcx.mk_bound_variable_kinds_from_iter(
inputs_and_output
@@ -661,7 +658,7 @@ impl<'cx, 'tcx> UniversalRegionsBuilder<'cx, 'tcx> {
kind: ty::BrEnv,
};
let env_region = ty::Region::new_late_bound(tcx, ty::INNERMOST, br);
- let closure_ty = tcx.closure_env_ty(def_id, substs, env_region).unwrap();
+ let closure_ty = tcx.closure_env_ty(def_id, args, env_region).unwrap();
// The "inputs" of the closure in the
// signature appear as a tuple. The MIR side
@@ -681,18 +678,18 @@ impl<'cx, 'tcx> UniversalRegionsBuilder<'cx, 'tcx> {
)
}
- DefiningTy::Generator(def_id, substs, movability) => {
+ DefiningTy::Generator(def_id, args, movability) => {
assert_eq!(self.mir_def.to_def_id(), def_id);
- let resume_ty = substs.as_generator().resume_ty();
- let output = substs.as_generator().return_ty();
- let generator_ty = Ty::new_generator(tcx, def_id, substs, movability);
+ let resume_ty = args.as_generator().resume_ty();
+ let output = args.as_generator().return_ty();
+ let generator_ty = Ty::new_generator(tcx, def_id, args, movability);
let inputs_and_output =
self.infcx.tcx.mk_type_list(&[generator_ty, resume_ty, output]);
ty::Binder::dummy(inputs_and_output)
}
DefiningTy::FnDef(def_id, _) => {
- let sig = tcx.fn_sig(def_id).subst_identity();
+ let sig = tcx.fn_sig(def_id).instantiate_identity();
let sig = indices.fold_to_region_vids(tcx, sig);
sig.inputs_and_output()
}
@@ -701,14 +698,14 @@ impl<'cx, 'tcx> UniversalRegionsBuilder<'cx, 'tcx> {
// For a constant body, there are no inputs, and one
// "output" (the type of the constant).
assert_eq!(self.mir_def.to_def_id(), def_id);
- let ty = tcx.type_of(self.mir_def).subst_identity();
+ let ty = tcx.type_of(self.mir_def).instantiate_identity();
let ty = indices.fold_to_region_vids(tcx, ty);
ty::Binder::dummy(tcx.mk_type_list(&[ty]))
}
- DefiningTy::InlineConst(def_id, substs) => {
+ DefiningTy::InlineConst(def_id, args) => {
assert_eq!(self.mir_def.to_def_id(), def_id);
- let ty = substs.as_inline_const().ty();
+ let ty = args.as_inline_const().ty();
ty::Binder::dummy(tcx.mk_type_list(&[ty]))
}
}
@@ -929,7 +926,9 @@ fn for_each_late_bound_region_in_item<'tcx>(
}
for bound_var in tcx.late_bound_vars(tcx.hir().local_def_id_to_hir_id(mir_def_id)) {
- let ty::BoundVariableKind::Region(bound_region) = bound_var else { continue; };
+ let ty::BoundVariableKind::Region(bound_region) = bound_var else {
+ continue;
+ };
let liberated_region = ty::Region::new_free(tcx, mir_def_id.to_def_id(), bound_region);
f(liberated_region);
}
diff --git a/compiler/rustc_builtin_macros/messages.ftl b/compiler/rustc_builtin_macros/messages.ftl
index 322222ae3..8d8db4c13 100644
--- a/compiler/rustc_builtin_macros/messages.ftl
+++ b/compiler/rustc_builtin_macros/messages.ftl
@@ -109,8 +109,8 @@ builtin_macros_derive_path_args_value = traits in `#[derive(...)]` don't accept
.suggestion = remove the value
builtin_macros_env_not_defined = environment variable `{$var}` not defined at compile time
- .cargo = Cargo sets build script variables at run time. Use `std::env::var("{$var}")` instead
- .other = use `std::env::var("{$var}")` to read the variable at run time
+ .cargo = Cargo sets build script variables at run time. Use `std::env::var({$var_expr})` instead
+ .custom = use `std::env::var({$var_expr})` to read the variable at run time
builtin_macros_env_takes_args = `env!()` takes 1 or 2 arguments
@@ -227,3 +227,5 @@ builtin_macros_unexpected_lit = expected path to a trait, found literal
.label = not a trait
.str_lit = try using `#[derive({$sym})]`
.other = for example, write `#[derive(Debug)]` for `Debug`
+
+builtin_macros_unnameable_test_items = cannot test inner items
diff --git a/compiler/rustc_builtin_macros/src/asm.rs b/compiler/rustc_builtin_macros/src/asm.rs
index 6187e4f51..9e66eaf73 100644
--- a/compiler/rustc_builtin_macros/src/asm.rs
+++ b/compiler/rustc_builtin_macros/src/asm.rs
@@ -157,8 +157,7 @@ pub fn parse_asm_args<'a>(
} else if p.eat_keyword(sym::sym) {
let expr = p.parse_expr()?;
let ast::ExprKind::Path(qself, path) = &expr.kind else {
- let err = diag
- .create_err(errors::AsmSymNoPath { span: expr.span });
+ let err = diag.create_err(errors::AsmSymNoPath { span: expr.span });
return Err(err);
};
let sym = ast::InlineAsmSym {
@@ -402,7 +401,7 @@ fn parse_clobber_abi<'a>(p: &mut Parser<'a>, args: &mut AsmArgs) -> PResult<'a,
// should have errored above during parsing
[] => unreachable!(),
[(abi, _span)] => args.clobber_abis.push((*abi, full_span)),
- [abis @ ..] => {
+ abis => {
for (abi, span) in abis {
args.clobber_abis.push((*abi, *span));
}
@@ -576,7 +575,7 @@ fn expand_preparsed_asm(ecx: &mut ExtCtxt<'_>, args: AsmArgs) -> Option<ast::Inl
|| named_pos.contains_key(&idx)
|| args.reg_args.contains(idx)
{
- let msg = format!("invalid reference to argument at index {}", idx);
+ let msg = format!("invalid reference to argument at index {idx}");
let mut err = ecx.struct_span_err(span, msg);
err.span_label(span, "from here");
@@ -589,9 +588,9 @@ fn expand_preparsed_asm(ecx: &mut ExtCtxt<'_>, args: AsmArgs) -> Option<ast::Inl
""
};
let msg = match positional_args {
- 0 => format!("no {}arguments were given", positional),
- 1 => format!("there is 1 {}argument", positional),
- x => format!("there are {} {}arguments", x, positional),
+ 0 => format!("no {positional}arguments were given"),
+ 1 => format!("there is 1 {positional}argument"),
+ x => format!("there are {x} {positional}arguments"),
};
err.note(msg);
@@ -625,7 +624,7 @@ fn expand_preparsed_asm(ecx: &mut ExtCtxt<'_>, args: AsmArgs) -> Option<ast::Inl
match args.named_args.get(&Symbol::intern(name)) {
Some(&idx) => Some(idx),
None => {
- let msg = format!("there is no argument named `{}`", name);
+ let msg = format!("there is no argument named `{name}`");
let span = arg.position_span;
ecx.struct_span_err(
template_span
@@ -698,8 +697,7 @@ fn expand_preparsed_asm(ecx: &mut ExtCtxt<'_>, args: AsmArgs) -> Option<ast::Inl
err.span_label(sp, msg);
err.help(format!(
"if this argument is intentionally unused, \
- consider using it in an asm comment: `\"/*{} */\"`",
- help_str
+ consider using it in an asm comment: `\"/*{help_str} */\"`"
));
err.emit();
}
@@ -713,8 +711,7 @@ fn expand_preparsed_asm(ecx: &mut ExtCtxt<'_>, args: AsmArgs) -> Option<ast::Inl
}
err.help(format!(
"if these arguments are intentionally unused, \
- consider using them in an asm comment: `\"/*{} */\"`",
- help_str
+ consider using them in an asm comment: `\"/*{help_str} */\"`"
));
err.emit();
}
diff --git a/compiler/rustc_builtin_macros/src/assert.rs b/compiler/rustc_builtin_macros/src/assert.rs
index ab4ea9c8c..9302db104 100644
--- a/compiler/rustc_builtin_macros/src/assert.rs
+++ b/compiler/rustc_builtin_macros/src/assert.rs
@@ -4,8 +4,9 @@ use crate::edition_panic::use_panic_2021;
use crate::errors;
use rustc_ast::ptr::P;
use rustc_ast::token;
+use rustc_ast::token::Delimiter;
use rustc_ast::tokenstream::{DelimSpan, TokenStream};
-use rustc_ast::{DelimArgs, Expr, ExprKind, MacCall, MacDelimiter, Path, PathSegment, UnOp};
+use rustc_ast::{DelimArgs, Expr, ExprKind, MacCall, Path, PathSegment, UnOp};
use rustc_ast_pretty::pprust;
use rustc_errors::PResult;
use rustc_expand::base::{DummyResult, ExtCtxt, MacEager, MacResult};
@@ -58,7 +59,7 @@ pub fn expand_assert<'cx>(
path: panic_path(),
args: P(DelimArgs {
dspan: DelimSpan::from_single(call_site_span),
- delim: MacDelimiter::Parenthesis,
+ delim: Delimiter::Parenthesis,
tokens,
}),
})),
@@ -68,7 +69,7 @@ pub fn expand_assert<'cx>(
// If `generic_assert` is enabled, generates rich captured outputs
//
// FIXME(c410-f3r) See https://github.com/rust-lang/rust/issues/96949
- else if let Some(features) = cx.ecfg.features && features.generic_assert {
+ else if cx.ecfg.features.generic_assert {
context::Context::new(cx, call_site_span).build(cond_expr, panic_path())
}
// If `generic_assert` is not enabled, only outputs a literal "assertion failed: ..."
diff --git a/compiler/rustc_builtin_macros/src/assert/context.rs b/compiler/rustc_builtin_macros/src/assert/context.rs
index 902c1e40c..bda473120 100644
--- a/compiler/rustc_builtin_macros/src/assert/context.rs
+++ b/compiler/rustc_builtin_macros/src/assert/context.rs
@@ -1,9 +1,10 @@
use rustc_ast::{
ptr::P,
token,
+ token::Delimiter,
tokenstream::{DelimSpan, TokenStream, TokenTree},
- BinOpKind, BorrowKind, DelimArgs, Expr, ExprKind, ItemKind, MacCall, MacDelimiter, MethodCall,
- Mutability, Path, PathSegment, Stmt, StructRest, UnOp, UseTree, UseTreeKind, DUMMY_NODE_ID,
+ BinOpKind, BorrowKind, DelimArgs, Expr, ExprKind, ItemKind, MacCall, MethodCall, Mutability,
+ Path, PathSegment, Stmt, StructRest, UnOp, UseTree, UseTreeKind, DUMMY_NODE_ID,
};
use rustc_ast_pretty::pprust;
use rustc_data_structures::fx::FxHashSet;
@@ -179,7 +180,7 @@ impl<'cx, 'a> Context<'cx, 'a> {
path: panic_path,
args: P(DelimArgs {
dspan: DelimSpan::from_single(self.span),
- delim: MacDelimiter::Parenthesis,
+ delim: Delimiter::Parenthesis,
tokens: initial.into_iter().chain(captures).collect::<TokenStream>(),
}),
})),
@@ -236,7 +237,7 @@ impl<'cx, 'a> Context<'cx, 'a> {
ExprKind::If(local_expr, _, _) => {
self.manage_cond_expr(local_expr);
}
- ExprKind::Index(prefix, suffix) => {
+ ExprKind::Index(prefix, suffix, _) => {
self.manage_cond_expr(prefix);
self.manage_cond_expr(suffix);
}
diff --git a/compiler/rustc_builtin_macros/src/cfg.rs b/compiler/rustc_builtin_macros/src/cfg.rs
index 1397cee7a..31cac5184 100644
--- a/compiler/rustc_builtin_macros/src/cfg.rs
+++ b/compiler/rustc_builtin_macros/src/cfg.rs
@@ -24,7 +24,7 @@ pub fn expand_cfg(
&cfg,
&cx.sess.parse_sess,
cx.current_expansion.lint_node_id,
- cx.ecfg.features,
+ Some(cx.ecfg.features),
);
MacEager::expr(cx.expr_bool(sp, matches_cfg))
}
diff --git a/compiler/rustc_builtin_macros/src/cfg_eval.rs b/compiler/rustc_builtin_macros/src/cfg_eval.rs
index 49401e9ca..f826c6e77 100644
--- a/compiler/rustc_builtin_macros/src/cfg_eval.rs
+++ b/compiler/rustc_builtin_macros/src/cfg_eval.rs
@@ -31,10 +31,11 @@ pub(crate) fn expand(
pub(crate) fn cfg_eval(
sess: &Session,
- features: Option<&Features>,
+ features: &Features,
annotatable: Annotatable,
lint_node_id: NodeId,
) -> Annotatable {
+ let features = Some(features);
CfgEval { cfg: &mut StripUnconfigured { sess, features, config_tokens: true, lint_node_id } }
.configure_annotatable(annotatable)
// Since the item itself has already been configured by the `InvocationCollector`,
diff --git a/compiler/rustc_builtin_macros/src/deriving/clone.rs b/compiler/rustc_builtin_macros/src/deriving/clone.rs
index 9ba98d0a5..b468abe32 100644
--- a/compiler/rustc_builtin_macros/src/deriving/clone.rs
+++ b/compiler/rustc_builtin_macros/src/deriving/clone.rs
@@ -144,7 +144,7 @@ fn cs_clone_simple(
}
_ => cx.span_bug(
trait_span,
- format!("unexpected substructure in simple `derive({})`", name),
+ format!("unexpected substructure in simple `derive({name})`"),
),
}
}
@@ -178,10 +178,10 @@ fn cs_clone(
vdata = &variant.data;
}
EnumTag(..) | AllFieldlessEnum(..) => {
- cx.span_bug(trait_span, format!("enum tags in `derive({})`", name,))
+ cx.span_bug(trait_span, format!("enum tags in `derive({name})`",))
}
StaticEnum(..) | StaticStruct(..) => {
- cx.span_bug(trait_span, format!("associated function in `derive({})`", name))
+ cx.span_bug(trait_span, format!("associated function in `derive({name})`"))
}
}
@@ -193,7 +193,7 @@ fn cs_clone(
let Some(ident) = field.name else {
cx.span_bug(
trait_span,
- format!("unnamed field in normal struct in `derive({})`", name,),
+ format!("unnamed field in normal struct in `derive({name})`",),
);
};
let call = subcall(cx, field);
diff --git a/compiler/rustc_builtin_macros/src/deriving/cmp/ord.rs b/compiler/rustc_builtin_macros/src/deriving/cmp/ord.rs
index 4401cf8a9..ea81cee78 100644
--- a/compiler/rustc_builtin_macros/src/deriving/cmp/ord.rs
+++ b/compiler/rustc_builtin_macros/src/deriving/cmp/ord.rs
@@ -61,8 +61,8 @@ pub fn cs_cmp(cx: &mut ExtCtxt<'_>, span: Span, substr: &Substructure<'_>) -> Bl
|cx, fold| match fold {
CsFold::Single(field) => {
let [other_expr] = &field.other_selflike_exprs[..] else {
- cx.span_bug(field.span, "not exactly 2 arguments in `derive(Ord)`");
- };
+ cx.span_bug(field.span, "not exactly 2 arguments in `derive(Ord)`");
+ };
let args = thin_vec![field.self_expr.clone(), other_expr.clone()];
cx.expr_call_global(field.span, cmp_path.clone(), args)
}
diff --git a/compiler/rustc_builtin_macros/src/deriving/cmp/partial_ord.rs b/compiler/rustc_builtin_macros/src/deriving/cmp/partial_ord.rs
index 54b6cb7d7..a5b3a504e 100644
--- a/compiler/rustc_builtin_macros/src/deriving/cmp/partial_ord.rs
+++ b/compiler/rustc_builtin_macros/src/deriving/cmp/partial_ord.rs
@@ -94,8 +94,8 @@ fn cs_partial_cmp(
|cx, fold| match fold {
CsFold::Single(field) => {
let [other_expr] = &field.other_selflike_exprs[..] else {
- cx.span_bug(field.span, "not exactly 2 arguments in `derive(Ord)`");
- };
+ cx.span_bug(field.span, "not exactly 2 arguments in `derive(Ord)`");
+ };
let args = thin_vec![field.self_expr.clone(), other_expr.clone()];
cx.expr_call_global(field.span, partial_cmp_path.clone(), args)
}
diff --git a/compiler/rustc_builtin_macros/src/deriving/decodable.rs b/compiler/rustc_builtin_macros/src/deriving/decodable.rs
index 3921533c8..bcf11cb4c 100644
--- a/compiler/rustc_builtin_macros/src/deriving/decodable.rs
+++ b/compiler/rustc_builtin_macros/src/deriving/decodable.rs
@@ -204,7 +204,7 @@ where
let fields = fields
.iter()
.enumerate()
- .map(|(i, &span)| getarg(cx, span, Symbol::intern(&format!("_field{}", i)), i))
+ .map(|(i, &span)| getarg(cx, span, Symbol::intern(&format!("_field{i}")), i))
.collect();
cx.expr_call(trait_span, path_expr, fields)
diff --git a/compiler/rustc_builtin_macros/src/deriving/encodable.rs b/compiler/rustc_builtin_macros/src/deriving/encodable.rs
index a3b11309d..2dc20c324 100644
--- a/compiler/rustc_builtin_macros/src/deriving/encodable.rs
+++ b/compiler/rustc_builtin_macros/src/deriving/encodable.rs
@@ -173,7 +173,7 @@ fn encodable_substructure(
for (i, &FieldInfo { name, ref self_expr, span, .. }) in fields.iter().enumerate() {
let name = match name {
Some(id) => id.name,
- None => Symbol::intern(&format!("_field{}", i)),
+ None => Symbol::intern(&format!("_field{i}")),
};
let self_ref = cx.expr_addr_of(span, self_expr.clone());
let enc =
diff --git a/compiler/rustc_builtin_macros/src/deriving/generic/mod.rs b/compiler/rustc_builtin_macros/src/deriving/generic/mod.rs
index 4ba09335c..6597ee3cf 100644
--- a/compiler/rustc_builtin_macros/src/deriving/generic/mod.rs
+++ b/compiler/rustc_builtin_macros/src/deriving/generic/mod.rs
@@ -1134,9 +1134,14 @@ impl<'a> MethodDef<'a> {
trait_: &TraitDef<'b>,
enum_def: &'b EnumDef,
type_ident: Ident,
- selflike_args: ThinVec<P<Expr>>,
+ mut selflike_args: ThinVec<P<Expr>>,
nonselflike_args: &[P<Expr>],
) -> BlockOrExpr {
+ assert!(
+ !selflike_args.is_empty(),
+ "static methods must use `expand_static_enum_method_body`",
+ );
+
let span = trait_.span;
let variants = &enum_def.variants;
@@ -1144,10 +1149,15 @@ impl<'a> MethodDef<'a> {
let unify_fieldless_variants =
self.fieldless_variants_strategy == FieldlessVariantsStrategy::Unify;
- // There is no sensible code to be generated for *any* deriving on a
- // zero-variant enum. So we just generate a failing expression.
+ // For zero-variant enum, this function body is unreachable. Generate
+ // `match *self {}`. This produces machine code identical to `unsafe {
+ // core::intrinsics::unreachable() }` while being safe and stable.
if variants.is_empty() {
- return BlockOrExpr(ThinVec::new(), Some(deriving::call_unreachable(cx, span)));
+ selflike_args.truncate(1);
+ let match_arg = cx.expr_deref(span, selflike_args.pop().unwrap());
+ let match_arms = ThinVec::new();
+ let expr = cx.expr_match(span, match_arg, match_arms);
+ return BlockOrExpr(ThinVec::new(), Some(expr));
}
let prefixes = iter::once("__self".to_string())
@@ -1156,7 +1166,7 @@ impl<'a> MethodDef<'a> {
.iter()
.enumerate()
.skip(1)
- .map(|(arg_count, _selflike_arg)| format!("__arg{}", arg_count)),
+ .map(|(arg_count, _selflike_arg)| format!("__arg{arg_count}")),
)
.collect::<Vec<String>>();
@@ -1171,7 +1181,7 @@ impl<'a> MethodDef<'a> {
let get_tag_pieces = |cx: &ExtCtxt<'_>| {
let tag_idents: Vec<_> = prefixes
.iter()
- .map(|name| Ident::from_str_and_span(&format!("{}_tag", name), span))
+ .map(|name| Ident::from_str_and_span(&format!("{name}_tag"), span))
.collect();
let mut tag_exprs: Vec<_> = tag_idents
@@ -1511,7 +1521,7 @@ impl<'a> TraitDef<'a> {
}
fn mk_pattern_ident(&self, prefix: &str, i: usize) -> Ident {
- Ident::from_str_and_span(&format!("{}_{}", prefix, i), self.span)
+ Ident::from_str_and_span(&format!("{prefix}_{i}"), self.span)
}
fn create_struct_pattern_fields(
@@ -1592,8 +1602,7 @@ impl<'a> TraitDef<'a> {
sp,
ast::CRATE_NODE_ID,
format!(
- "{} slice in a packed struct that derives a built-in trait",
- ty
+ "{ty} slice in a packed struct that derives a built-in trait"
),
rustc_lint_defs::BuiltinLintDiagnostics::ByteSliceInPackedStructWithDerive
);
diff --git a/compiler/rustc_builtin_macros/src/edition_panic.rs b/compiler/rustc_builtin_macros/src/edition_panic.rs
index ef0db23ff..1e1dadab4 100644
--- a/compiler/rustc_builtin_macros/src/edition_panic.rs
+++ b/compiler/rustc_builtin_macros/src/edition_panic.rs
@@ -1,4 +1,5 @@
use rustc_ast::ptr::P;
+use rustc_ast::token::Delimiter;
use rustc_ast::tokenstream::{DelimSpan, TokenStream};
use rustc_ast::*;
use rustc_expand::base::*;
@@ -60,7 +61,7 @@ fn expand<'cx>(
},
args: P(DelimArgs {
dspan: DelimSpan::from_single(sp),
- delim: MacDelimiter::Parenthesis,
+ delim: Delimiter::Parenthesis,
tokens: tts,
}),
})),
diff --git a/compiler/rustc_builtin_macros/src/env.rs b/compiler/rustc_builtin_macros/src/env.rs
index 8f64e3328..92da0c069 100644
--- a/compiler/rustc_builtin_macros/src/env.rs
+++ b/compiler/rustc_builtin_macros/src/env.rs
@@ -4,7 +4,7 @@
//
use rustc_ast::tokenstream::TokenStream;
-use rustc_ast::{self as ast, GenericArg};
+use rustc_ast::{self as ast, AstDeref, GenericArg};
use rustc_expand::base::{self, *};
use rustc_span::symbol::{kw, sym, Ident, Symbol};
use rustc_span::Span;
@@ -76,22 +76,36 @@ pub fn expand_env<'cx>(
},
};
- let sp = cx.with_def_site_ctxt(sp);
+ let span = cx.with_def_site_ctxt(sp);
let value = env::var(var.as_str()).ok().as_deref().map(Symbol::intern);
cx.sess.parse_sess.env_depinfo.borrow_mut().insert((var, value));
let e = match value {
None => {
- // Use the string literal in the code in the diagnostic to avoid confusing diagnostics,
- // e.g. when the literal contains escape sequences.
- let ast::ExprKind::Lit(ast::token::Lit { kind: ast::token::LitKind::Str, symbol: original_var, ..}) = &var_expr.kind else {
+ let ast::ExprKind::Lit(ast::token::Lit {
+ kind: ast::token::LitKind::Str | ast::token::LitKind::StrRaw(..),
+ symbol,
+ ..
+ }) = &var_expr.kind
+ else {
unreachable!("`expr_to_string` ensures this is a string lit")
};
- cx.emit_err(errors::EnvNotDefined {
- span: sp,
- msg: custom_msg,
- var: *original_var,
- help: custom_msg.is_none().then(|| help_for_missing_env_var(var.as_str())),
- });
+
+ if let Some(msg_from_user) = custom_msg {
+ cx.emit_err(errors::EnvNotDefinedWithUserMessage { span, msg_from_user });
+ } else if is_cargo_env_var(var.as_str()) {
+ cx.emit_err(errors::EnvNotDefined::CargoEnvVar {
+ span,
+ var: *symbol,
+ var_expr: var_expr.ast_deref(),
+ });
+ } else {
+ cx.emit_err(errors::EnvNotDefined::CustomEnvVar {
+ span,
+ var: *symbol,
+ var_expr: var_expr.ast_deref(),
+ });
+ }
+
return DummyResult::any(sp);
}
Some(value) => cx.expr_str(sp, value),
@@ -99,13 +113,9 @@ pub fn expand_env<'cx>(
MacEager::expr(e)
}
-fn help_for_missing_env_var(var: &str) -> errors::EnvNotDefinedHelp {
- if var.starts_with("CARGO_")
+/// Returns `true` if an environment variable from `env!` is one used by Cargo.
+fn is_cargo_env_var(var: &str) -> bool {
+ var.starts_with("CARGO_")
|| var.starts_with("DEP_")
|| matches!(var, "OUT_DIR" | "OPT_LEVEL" | "PROFILE" | "HOST" | "TARGET")
- {
- errors::EnvNotDefinedHelp::CargoVar
- } else {
- errors::EnvNotDefinedHelp::Other
- }
}
diff --git a/compiler/rustc_builtin_macros/src/errors.rs b/compiler/rustc_builtin_macros/src/errors.rs
index 7b2a375a8..fbf0395bb 100644
--- a/compiler/rustc_builtin_macros/src/errors.rs
+++ b/compiler/rustc_builtin_macros/src/errors.rs
@@ -440,43 +440,43 @@ pub(crate) struct EnvTakesArgs {
pub(crate) span: Span,
}
-//#[derive(Diagnostic)]
-//#[diag(builtin_macros_env_not_defined)]
-pub(crate) struct EnvNotDefined {
+pub(crate) struct EnvNotDefinedWithUserMessage {
pub(crate) span: Span,
- pub(crate) msg: Option<Symbol>,
- pub(crate) var: Symbol,
- pub(crate) help: Option<EnvNotDefinedHelp>,
+ pub(crate) msg_from_user: Symbol,
}
-// Hand-written implementation to support custom user messages
-impl<'a, G: EmissionGuarantee> IntoDiagnostic<'a, G> for EnvNotDefined {
+// Hand-written implementation to support custom user messages.
+impl<'a, G: EmissionGuarantee> IntoDiagnostic<'a, G> for EnvNotDefinedWithUserMessage {
#[track_caller]
fn into_diagnostic(self, handler: &'a Handler) -> DiagnosticBuilder<'a, G> {
- let mut diag = if let Some(msg) = self.msg {
- #[expect(
- rustc::untranslatable_diagnostic,
- reason = "cannot translate user-provided messages"
- )]
- handler.struct_diagnostic(msg.to_string())
- } else {
- handler.struct_diagnostic(crate::fluent_generated::builtin_macros_env_not_defined)
- };
- diag.set_arg("var", self.var);
+ #[expect(
+ rustc::untranslatable_diagnostic,
+ reason = "cannot translate user-provided messages"
+ )]
+ let mut diag = handler.struct_diagnostic(self.msg_from_user.to_string());
diag.set_span(self.span);
- if let Some(help) = self.help {
- diag.subdiagnostic(help);
- }
diag
}
}
-#[derive(Subdiagnostic)]
-pub(crate) enum EnvNotDefinedHelp {
+#[derive(Diagnostic)]
+pub(crate) enum EnvNotDefined<'a> {
+ #[diag(builtin_macros_env_not_defined)]
#[help(builtin_macros_cargo)]
- CargoVar,
- #[help(builtin_macros_other)]
- Other,
+ CargoEnvVar {
+ #[primary_span]
+ span: Span,
+ var: Symbol,
+ var_expr: &'a rustc_ast::Expr,
+ },
+ #[diag(builtin_macros_env_not_defined)]
+ #[help(builtin_macros_custom)]
+ CustomEnvVar {
+ #[primary_span]
+ span: Span,
+ var: Symbol,
+ var_expr: &'a rustc_ast::Expr,
+ },
}
#[derive(Diagnostic)]
diff --git a/compiler/rustc_builtin_macros/src/format.rs b/compiler/rustc_builtin_macros/src/format.rs
index 4c878785b..ede95dbf8 100644
--- a/compiler/rustc_builtin_macros/src/format.rs
+++ b/compiler/rustc_builtin_macros/src/format.rs
@@ -1,6 +1,6 @@
use rustc_ast::ptr::P;
-use rustc_ast::token;
use rustc_ast::tokenstream::TokenStream;
+use rustc_ast::{token, StmtKind};
use rustc_ast::{
Expr, ExprKind, FormatAlignment, FormatArgPosition, FormatArgPositionKind, FormatArgs,
FormatArgsPiece, FormatArgument, FormatArgumentKind, FormatArguments, FormatCount,
@@ -163,7 +163,7 @@ fn make_format_args(
let MacroInput { fmtstr: efmt, mut args, is_direct_literal } = input;
- let (fmt_str, fmt_style, fmt_span) = match expr_to_spanned_string(ecx, efmt, msg) {
+ let (fmt_str, fmt_style, fmt_span) = match expr_to_spanned_string(ecx, efmt.clone(), msg) {
Ok(mut fmt) if append_newline => {
fmt.0 = Symbol::intern(&format!("{}\n", fmt.0));
fmt
@@ -171,17 +171,33 @@ fn make_format_args(
Ok(fmt) => fmt,
Err(err) => {
if let Some((mut err, suggested)) = err {
- let sugg_fmt = match args.explicit_args().len() {
- 0 => "{}".to_string(),
- _ => format!("{}{{}}", "{} ".repeat(args.explicit_args().len())),
- };
if !suggested {
- err.span_suggestion(
- unexpanded_fmt_span.shrink_to_lo(),
- "you might be missing a string literal to format with",
- format!("\"{}\", ", sugg_fmt),
- Applicability::MaybeIncorrect,
- );
+ if let ExprKind::Block(block, None) = &efmt.kind
+ && block.stmts.len() == 1
+ && let StmtKind::Expr(expr) = &block.stmts[0].kind
+ && let ExprKind::Path(None, path) = &expr.kind
+ && path.is_potential_trivial_const_arg()
+ {
+ err.multipart_suggestion(
+ "quote your inlined format argument to use as string literal",
+ vec![
+ (unexpanded_fmt_span.shrink_to_hi(), "\"".to_string()),
+ (unexpanded_fmt_span.shrink_to_lo(), "\"".to_string()),
+ ],
+ Applicability::MaybeIncorrect,
+ );
+ } else {
+ let sugg_fmt = match args.explicit_args().len() {
+ 0 => "{}".to_string(),
+ _ => format!("{}{{}}", "{} ".repeat(args.explicit_args().len())),
+ };
+ err.span_suggestion(
+ unexpanded_fmt_span.shrink_to_lo(),
+ "you might be missing a string literal to format with",
+ format!("\"{sugg_fmt}\", "),
+ Applicability::MaybeIncorrect,
+ );
+ }
}
err.emit();
}
@@ -668,7 +684,7 @@ fn report_invalid_references(
let num_args_desc = match args.explicit_args().len() {
0 => "no arguments were given".to_string(),
1 => "there is 1 argument".to_string(),
- n => format!("there are {} arguments", n),
+ n => format!("there are {n} arguments"),
};
let mut e;
@@ -780,7 +796,7 @@ fn report_invalid_references(
if num_placeholders == 1 {
"is 1 argument".to_string()
} else {
- format!("are {} arguments", num_placeholders)
+ format!("are {num_placeholders} arguments")
},
),
);
@@ -811,7 +827,7 @@ fn report_invalid_references(
};
e = ecx.struct_span_err(
span,
- format!("invalid reference to positional {} ({})", arg_list, num_args_desc),
+ format!("invalid reference to positional {arg_list} ({num_args_desc})"),
);
e.note("positional arguments are zero-based");
}
diff --git a/compiler/rustc_builtin_macros/src/format_foreign.rs b/compiler/rustc_builtin_macros/src/format_foreign.rs
index bd5356575..2fc8a0763 100644
--- a/compiler/rustc_builtin_macros/src/format_foreign.rs
+++ b/compiler/rustc_builtin_macros/src/format_foreign.rs
@@ -86,10 +86,7 @@ pub(crate) mod printf {
'-' => c_left = true,
'+' => c_plus = true,
_ => {
- return Err(Some(format!(
- "the flag `{}` is unknown or unsupported",
- c
- )));
+ return Err(Some(format!("the flag `{c}` is unknown or unsupported")));
}
}
}
@@ -268,21 +265,21 @@ pub(crate) mod printf {
impl Num {
fn from_str(s: &str, arg: Option<&str>) -> Self {
if let Some(arg) = arg {
- Num::Arg(arg.parse().unwrap_or_else(|_| panic!("invalid format arg `{:?}`", arg)))
+ Num::Arg(arg.parse().unwrap_or_else(|_| panic!("invalid format arg `{arg:?}`")))
} else if s == "*" {
Num::Next
} else {
- Num::Num(s.parse().unwrap_or_else(|_| panic!("invalid format num `{:?}`", s)))
+ Num::Num(s.parse().unwrap_or_else(|_| panic!("invalid format num `{s:?}`")))
}
}
fn translate(&self, s: &mut String) -> std::fmt::Result {
use std::fmt::Write;
match *self {
- Num::Num(n) => write!(s, "{}", n),
+ Num::Num(n) => write!(s, "{n}"),
Num::Arg(n) => {
let n = n.checked_sub(1).ok_or(std::fmt::Error)?;
- write!(s, "{}$", n)
+ write!(s, "{n}$")
}
Num::Next => write!(s, "*"),
}
@@ -626,8 +623,8 @@ pub mod shell {
impl Substitution<'_> {
pub fn as_str(&self) -> String {
match self {
- Substitution::Ordinal(n, _) => format!("${}", n),
- Substitution::Name(n, _) => format!("${}", n),
+ Substitution::Ordinal(n, _) => format!("${n}"),
+ Substitution::Name(n, _) => format!("${n}"),
Substitution::Escape(_) => "$$".into(),
}
}
diff --git a/compiler/rustc_builtin_macros/src/global_allocator.rs b/compiler/rustc_builtin_macros/src/global_allocator.rs
index 577247193..1bec00add 100644
--- a/compiler/rustc_builtin_macros/src/global_allocator.rs
+++ b/compiler/rustc_builtin_macros/src/global_allocator.rs
@@ -2,7 +2,7 @@ use crate::util::check_builtin_macro_attribute;
use crate::errors;
use rustc_ast::expand::allocator::{
- global_fn_name, AllocatorMethod, AllocatorTy, ALLOCATOR_METHODS,
+ global_fn_name, AllocatorMethod, AllocatorMethodInput, AllocatorTy, ALLOCATOR_METHODS,
};
use rustc_ast::ptr::P;
use rustc_ast::{self as ast, AttrVec, Expr, FnHeader, FnSig, Generics, Param, StmtKind};
@@ -70,19 +70,13 @@ struct AllocFnFactory<'a, 'b> {
impl AllocFnFactory<'_, '_> {
fn allocator_fn(&self, method: &AllocatorMethod) -> Stmt {
let mut abi_args = ThinVec::new();
- let mut i = 0;
- let mut mk = || {
- let name = Ident::from_str_and_span(&format!("arg{}", i), self.span);
- i += 1;
- name
- };
- let args = method.inputs.iter().map(|ty| self.arg_ty(ty, &mut abi_args, &mut mk)).collect();
+ let args = method.inputs.iter().map(|input| self.arg_ty(input, &mut abi_args)).collect();
let result = self.call_allocator(method.name, args);
- let (output_ty, output_expr) = self.ret_ty(&method.output, result);
+ let output_ty = self.ret_ty(&method.output);
let decl = self.cx.fn_decl(abi_args, ast::FnRetTy::Ty(output_ty));
let header = FnHeader { unsafety: Unsafe::Yes(self.span), ..FnHeader::default() };
let sig = FnSig { decl, header, span: self.span };
- let body = Some(self.cx.block_expr(output_expr));
+ let body = Some(self.cx.block_expr(result));
let kind = ItemKind::Fn(Box::new(Fn {
defaultness: ast::Defaultness::Final,
sig,
@@ -113,18 +107,19 @@ impl AllocFnFactory<'_, '_> {
thin_vec![self.cx.attr_word(sym::rustc_std_internal_symbol, self.span)]
}
- fn arg_ty(
- &self,
- ty: &AllocatorTy,
- args: &mut ThinVec<Param>,
- ident: &mut dyn FnMut() -> Ident,
- ) -> P<Expr> {
- match *ty {
+ fn arg_ty(&self, input: &AllocatorMethodInput, args: &mut ThinVec<Param>) -> P<Expr> {
+ match input.ty {
AllocatorTy::Layout => {
+ // If an allocator method is ever introduced having multiple
+ // Layout arguments, these argument names need to be
+ // disambiguated somehow. Currently the generated code would
+ // fail to compile with "identifier is bound more than once in
+ // this parameter list".
+ let size = Ident::from_str_and_span("size", self.span);
+ let align = Ident::from_str_and_span("align", self.span);
+
let usize = self.cx.path_ident(self.span, Ident::new(sym::usize, self.span));
let ty_usize = self.cx.ty_path(usize);
- let size = ident();
- let align = ident();
args.push(self.cx.param(self.span, size, ty_usize.clone()));
args.push(self.cx.param(self.span, align, ty_usize));
@@ -138,14 +133,13 @@ impl AllocFnFactory<'_, '_> {
}
AllocatorTy::Ptr => {
- let ident = ident();
+ let ident = Ident::from_str_and_span(input.name, self.span);
args.push(self.cx.param(self.span, ident, self.ptr_u8()));
- let arg = self.cx.expr_ident(self.span, ident);
- self.cx.expr_cast(self.span, arg, self.ptr_u8())
+ self.cx.expr_ident(self.span, ident)
}
AllocatorTy::Usize => {
- let ident = ident();
+ let ident = Ident::from_str_and_span(input.name, self.span);
args.push(self.cx.param(self.span, ident, self.usize()));
self.cx.expr_ident(self.span, ident)
}
@@ -156,18 +150,11 @@ impl AllocFnFactory<'_, '_> {
}
}
- fn ret_ty(&self, ty: &AllocatorTy, expr: P<Expr>) -> (P<Ty>, P<Expr>) {
+ fn ret_ty(&self, ty: &AllocatorTy) -> P<Ty> {
match *ty {
- AllocatorTy::ResultPtr => {
- // We're creating:
- //
- // #expr as *mut u8
-
- let expr = self.cx.expr_cast(self.span, expr, self.ptr_u8());
- (self.ptr_u8(), expr)
- }
+ AllocatorTy::ResultPtr => self.ptr_u8(),
- AllocatorTy::Unit => (self.cx.ty(self.span, TyKind::Tup(ThinVec::new())), expr),
+ AllocatorTy::Unit => self.cx.ty(self.span, TyKind::Tup(ThinVec::new())),
AllocatorTy::Layout | AllocatorTy::Usize | AllocatorTy::Ptr => {
panic!("can't convert `AllocatorTy` to an output")
diff --git a/compiler/rustc_builtin_macros/src/proc_macro_harness.rs b/compiler/rustc_builtin_macros/src/proc_macro_harness.rs
index b35a2e2a2..dae1bc5bf 100644
--- a/compiler/rustc_builtin_macros/src/proc_macro_harness.rs
+++ b/compiler/rustc_builtin_macros/src/proc_macro_harness.rs
@@ -5,6 +5,7 @@ use rustc_ast::{self as ast, attr, NodeId};
use rustc_ast_pretty::pprust;
use rustc_expand::base::{parse_macro_name_and_helper_attrs, ExtCtxt, ResolverExpand};
use rustc_expand::expand::{AstFragment, ExpansionConfig};
+use rustc_feature::Features;
use rustc_session::Session;
use rustc_span::hygiene::AstPass;
use rustc_span::source_map::SourceMap;
@@ -46,13 +47,14 @@ struct CollectProcMacros<'a> {
pub fn inject(
krate: &mut ast::Crate,
sess: &Session,
+ features: &Features,
resolver: &mut dyn ResolverExpand,
is_proc_macro_crate: bool,
has_proc_macro_decls: bool,
is_test_crate: bool,
handler: &rustc_errors::Handler,
) {
- let ecfg = ExpansionConfig::default("proc_macro".to_string());
+ let ecfg = ExpansionConfig::default("proc_macro".to_string(), features);
let mut cx = ExtCtxt::new(sess, ecfg, resolver, None);
let mut collect = CollectProcMacros {
@@ -89,7 +91,9 @@ impl<'a> CollectProcMacros<'a> {
}
fn collect_custom_derive(&mut self, item: &'a ast::Item, attr: &'a ast::Attribute) {
- let Some((trait_name, proc_attrs)) = parse_macro_name_and_helper_attrs(self.handler, attr, "derive") else {
+ let Some((trait_name, proc_attrs)) =
+ parse_macro_name_and_helper_attrs(self.handler, attr, "derive")
+ else {
return;
};
@@ -177,8 +181,7 @@ impl<'a> Visitor<'a> for CollectProcMacros<'a> {
== prev_item.path.segments[0].ident.name
{
format!(
- "only one `#[{}]` attribute is allowed on any given function",
- path_str,
+ "only one `#[{path_str}]` attribute is allowed on any given function",
)
} else {
format!(
diff --git a/compiler/rustc_builtin_macros/src/source_util.rs b/compiler/rustc_builtin_macros/src/source_util.rs
index e613b904d..433da7423 100644
--- a/compiler/rustc_builtin_macros/src/source_util.rs
+++ b/compiler/rustc_builtin_macros/src/source_util.rs
@@ -149,7 +149,7 @@ pub fn expand_include<'cx>(
Ok(None) => {
if self.p.token != token::Eof {
let token = pprust::token_to_string(&self.p.token);
- let msg = format!("expected item, found `{}`", token);
+ let msg = format!("expected item, found `{token}`");
self.p.struct_span_err(self.p.token.span, msg).emit();
}
diff --git a/compiler/rustc_builtin_macros/src/standard_library_imports.rs b/compiler/rustc_builtin_macros/src/standard_library_imports.rs
index 6493c6f13..3ee3112f0 100644
--- a/compiler/rustc_builtin_macros/src/standard_library_imports.rs
+++ b/compiler/rustc_builtin_macros/src/standard_library_imports.rs
@@ -1,6 +1,7 @@
use rustc_ast::{self as ast, attr};
use rustc_expand::base::{ExtCtxt, ResolverExpand};
use rustc_expand::expand::ExpansionConfig;
+use rustc_feature::Features;
use rustc_session::Session;
use rustc_span::edition::Edition::*;
use rustc_span::hygiene::AstPass;
@@ -13,6 +14,7 @@ pub fn inject(
pre_configured_attrs: &[ast::Attribute],
resolver: &mut dyn ResolverExpand,
sess: &Session,
+ features: &Features,
) -> usize {
let orig_num_items = krate.items.len();
let edition = sess.parse_sess.edition;
@@ -39,25 +41,34 @@ pub fn inject(
let span = DUMMY_SP.with_def_site_ctxt(expn_id.to_expn_id());
let call_site = DUMMY_SP.with_call_site_ctxt(expn_id.to_expn_id());
- let ecfg = ExpansionConfig::default("std_lib_injection".to_string());
+ let ecfg = ExpansionConfig::default("std_lib_injection".to_string(), features);
let cx = ExtCtxt::new(sess, ecfg, resolver, None);
// .rev() to preserve ordering above in combination with insert(0, ...)
for &name in names.iter().rev() {
- let ident = if edition >= Edition2018 {
- Ident::new(name, span)
+ let ident_span = if edition >= Edition2018 { span } else { call_site };
+ let item = if name == sym::compiler_builtins {
+ // compiler_builtins is a private implementation detail. We only
+ // need to insert it into the crate graph for linking and should not
+ // expose any of its public API.
+ //
+ // FIXME(#113634) We should inject this during post-processing like
+ // we do for the panic runtime, profiler runtime, etc.
+ cx.item(
+ span,
+ Ident::new(kw::Underscore, ident_span),
+ thin_vec![],
+ ast::ItemKind::ExternCrate(Some(name)),
+ )
} else {
- Ident::new(name, call_site)
- };
- krate.items.insert(
- 0,
cx.item(
span,
- ident,
+ Ident::new(name, ident_span),
thin_vec![cx.attr_word(sym::macro_use, span)],
ast::ItemKind::ExternCrate(None),
- ),
- );
+ )
+ };
+ krate.items.insert(0, item);
}
// The crates have been injected, the assumption is that the first one is
diff --git a/compiler/rustc_builtin_macros/src/test.rs b/compiler/rustc_builtin_macros/src/test.rs
index 6bc4f6fc1..1580a6f6d 100644
--- a/compiler/rustc_builtin_macros/src/test.rs
+++ b/compiler/rustc_builtin_macros/src/test.rs
@@ -255,6 +255,7 @@ pub fn expand_test_or_bench(
ast::ItemKind::Const(
ast::ConstItem {
defaultness: ast::Defaultness::Final,
+ generics: ast::Generics::default(),
ty: cx.ty(sp, ast::TyKind::Path(None, test_path("TestDescAndFn"))),
// test::TestDescAndFn {
expr: Some(
diff --git a/compiler/rustc_builtin_macros/src/test_harness.rs b/compiler/rustc_builtin_macros/src/test_harness.rs
index 81b618548..d8846a9f0 100644
--- a/compiler/rustc_builtin_macros/src/test_harness.rs
+++ b/compiler/rustc_builtin_macros/src/test_harness.rs
@@ -4,10 +4,12 @@ use rustc_ast as ast;
use rustc_ast::entry::EntryPointType;
use rustc_ast::mut_visit::{ExpectOne, *};
use rustc_ast::ptr::P;
+use rustc_ast::visit::{walk_item, Visitor};
use rustc_ast::{attr, ModKind};
use rustc_expand::base::{ExtCtxt, ResolverExpand};
use rustc_expand::expand::{AstFragment, ExpansionConfig};
use rustc_feature::Features;
+use rustc_session::lint::builtin::UNNAMEABLE_TEST_ITEMS;
use rustc_session::Session;
use rustc_span::hygiene::{AstPass, SyntaxContext, Transparency};
use rustc_span::symbol::{sym, Ident, Symbol};
@@ -39,7 +41,12 @@ struct TestCtxt<'a> {
/// Traverse the crate, collecting all the test functions, eliding any
/// existing main functions, and synthesizing a main test harness
-pub fn inject(krate: &mut ast::Crate, sess: &Session, resolver: &mut dyn ResolverExpand) {
+pub fn inject(
+ krate: &mut ast::Crate,
+ sess: &Session,
+ features: &Features,
+ resolver: &mut dyn ResolverExpand,
+) {
let span_diagnostic = sess.diagnostic();
let panic_strategy = sess.panic_strategy();
let platform_panic_strategy = sess.target.panic_strategy;
@@ -74,7 +81,7 @@ pub fn inject(krate: &mut ast::Crate, sess: &Session, resolver: &mut dyn Resolve
resolver,
reexport_test_harness_main,
krate,
- &sess.features_untracked(),
+ features,
panic_strategy,
test_runner,
)
@@ -137,11 +144,31 @@ impl<'a> MutVisitor for TestHarnessGenerator<'a> {
let prev_tests = mem::take(&mut self.tests);
noop_visit_item_kind(&mut item.kind, self);
self.add_test_cases(item.id, span, prev_tests);
+ } else {
+ // But in those cases, we emit a lint to warn the user of these missing tests.
+ walk_item(&mut InnerItemLinter { sess: self.cx.ext_cx.sess }, &item);
}
smallvec![P(item)]
}
}
+struct InnerItemLinter<'a> {
+ sess: &'a Session,
+}
+
+impl<'a> Visitor<'a> for InnerItemLinter<'_> {
+ fn visit_item(&mut self, i: &'a ast::Item) {
+ if let Some(attr) = attr::find_by_name(&i.attrs, sym::rustc_test_marker) {
+ self.sess.parse_sess.buffer_lint(
+ UNNAMEABLE_TEST_ITEMS,
+ attr.span,
+ i.id,
+ crate::fluent_generated::builtin_macros_unnameable_test_items,
+ );
+ }
+ }
+}
+
// Beware, this is duplicated in librustc_passes/entry.rs (with
// `rustc_hir::Item`), so make sure to keep them in sync.
fn entry_point_type(item: &ast::Item, depth: usize) -> EntryPointType {
@@ -221,9 +248,7 @@ fn generate_test_harness(
panic_strategy: PanicStrategy,
test_runner: Option<ast::Path>,
) {
- let mut econfig = ExpansionConfig::default("test".to_string());
- econfig.features = Some(features);
-
+ let econfig = ExpansionConfig::default("test".to_string(), features);
let ext_cx = ExtCtxt::new(sess, econfig, resolver, None);
let expn_id = ext_cx.resolver.expansion_for_ast_pass(
diff --git a/compiler/rustc_codegen_cranelift/.github/workflows/audit.yml b/compiler/rustc_codegen_cranelift/.github/workflows/audit.yml
new file mode 100644
index 000000000..3efdec415
--- /dev/null
+++ b/compiler/rustc_codegen_cranelift/.github/workflows/audit.yml
@@ -0,0 +1,19 @@
+name: Security audit
+on:
+ workflow_dispatch:
+ schedule:
+ - cron: '0 10 * * 1' # every monday at 10:00 UTC
+permissions:
+ issues: write
+ checks: write
+jobs:
+ audit:
+ runs-on: ubuntu-latest
+ steps:
+ - uses: actions/checkout@v3
+ - run: |
+ sed -i 's/components.*/components = []/' rust-toolchain
+ echo 'profile = "minimal"' >> rust-toolchain
+ - uses: rustsec/audit-check@v1.4.1
+ with:
+ token: ${{ secrets.GITHUB_TOKEN }}
diff --git a/compiler/rustc_codegen_cranelift/.github/workflows/main.yml b/compiler/rustc_codegen_cranelift/.github/workflows/main.yml
index 8e6c1e8ad..652d6eca3 100644
--- a/compiler/rustc_codegen_cranelift/.github/workflows/main.yml
+++ b/compiler/rustc_codegen_cranelift/.github/workflows/main.yml
@@ -12,9 +12,11 @@ jobs:
steps:
- uses: actions/checkout@v3
- - name: Install rustfmt
+ - name: Avoid installing rustc-dev
run: |
- rustup component add rustfmt
+ sed -i 's/components.*/components = ["rustfmt"]/' rust-toolchain
+ echo 'profile = "minimal"' >> rust-toolchain
+ rustfmt -v
- name: Rustfmt
run: |
@@ -127,7 +129,7 @@ jobs:
- uses: actions/checkout@v3
- name: Prepare dependencies
- run: ./y.rs prepare
+ run: ./y.sh prepare
- name: Disable JIT tests
run: |
@@ -136,7 +138,7 @@ jobs:
- name: Test
env:
TARGET_TRIPLE: x86_64-unknown-linux-gnu
- run: ./y.rs test --use-backend llvm
+ run: ./y.sh test --use-backend llvm
bench:
runs-on: ubuntu-latest
diff --git a/compiler/rustc_codegen_cranelift/Cargo.lock b/compiler/rustc_codegen_cranelift/Cargo.lock
index 904233d42..af8e43da4 100644
--- a/compiler/rustc_codegen_cranelift/Cargo.lock
+++ b/compiler/rustc_codegen_cranelift/Cargo.lock
@@ -51,18 +51,18 @@ checksum = "baf1de4339761588bc0619e3cbc0120ee582ebb74b53b4efbf79117bd2da40fd"
[[package]]
name = "cranelift-bforest"
-version = "0.96.1"
+version = "0.98.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "9b6160c0a96253993b79fb7e0983534a4515ecf666120ddf8f92068114997ebc"
+checksum = "ec27af72e56235eb326b5bf2de4e70ab7c5ac1fb683a1829595badaf821607fd"
dependencies = [
"cranelift-entity",
]
[[package]]
name = "cranelift-codegen"
-version = "0.96.1"
+version = "0.98.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "7b38da5f63562e42f3c929d7c76871098e5ad12c8ab44b0659ffc529f22a5b3a"
+checksum = "2231e12925e6c5f4bc9c95b62a798eea6ed669a95bc3e00f8b2adb3b7b9b7a80"
dependencies = [
"bumpalo",
"cranelift-bforest",
@@ -81,39 +81,39 @@ dependencies = [
[[package]]
name = "cranelift-codegen-meta"
-version = "0.96.1"
+version = "0.98.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "011371e213e163b55dd9e8404b3f2d9fa52cd14dc2f3dc5b83e61ffceff126db"
+checksum = "413b00b8dfb3aab85674a534677e7ca08854b503f164a70ec0634fce80996e2c"
dependencies = [
"cranelift-codegen-shared",
]
[[package]]
name = "cranelift-codegen-shared"
-version = "0.96.1"
+version = "0.98.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "1bf97dde7f5ad571161cdd203a2c9c88682ef669830aea3c14ea5d164ef8bb43"
+checksum = "cd0feb9ecc8193ef5cb04f494c5bd835e5bfec4bde726e7ac0444fc9dd76229e"
[[package]]
name = "cranelift-control"
-version = "0.96.1"
+version = "0.98.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "fd9a9254aee733b0f2b68e5eaaf0337ad53cb23252a056c10a35370551be8d40"
+checksum = "72eedd2afcf5fee1e042eaaf18d3750e48ad0eca364a9f5971ecfdd5ef85bf71"
dependencies = [
"arbitrary",
]
[[package]]
name = "cranelift-entity"
-version = "0.96.1"
+version = "0.98.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "baf39a33ee39479d1337cd9333f3c09786c5a0ca1ec509edcaf9d1346d5de0e5"
+checksum = "7af19157be42671073cf8c2a52d6a4ae1e7b11f1dcb4131fede356d9f91c29dd"
[[package]]
name = "cranelift-frontend"
-version = "0.96.1"
+version = "0.98.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "65e260b92a193a0a2dccc3938f133d9532e7dcfe8d03e36bf8b7d3518c1c1793"
+checksum = "c2dc7636c5fad156be7d9ae691cd1aaecd97326caf2ab534ba168056d56aa76c"
dependencies = [
"cranelift-codegen",
"log",
@@ -123,15 +123,15 @@ dependencies = [
[[package]]
name = "cranelift-isle"
-version = "0.96.1"
+version = "0.98.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "9446c8e1aadfcdacee1a49592bc2c25d1d9bf5484782c163e7f5485c92cd3c1c"
+checksum = "c1111aea4fb6fade5779903f184249a3fc685a799fe4ec59126f9af59c7c2a74"
[[package]]
name = "cranelift-jit"
-version = "0.96.1"
+version = "0.98.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "689a6df165d0f860c1e1a3d53c28944e2743c3e9ee4c678cf190fe60ad7a6ef5"
+checksum = "dadf88076317f6286ec77ebbe65978734fb43b6befdc96f52ff4c4c511841644"
dependencies = [
"anyhow",
"cranelift-codegen",
@@ -149,9 +149,9 @@ dependencies = [
[[package]]
name = "cranelift-module"
-version = "0.96.1"
+version = "0.98.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "0b1402d6ff1695b429536b2eaa126db560fc94c375ed0e9cfb15051fc07427f7"
+checksum = "c6bae8a82dbf82241b1083e57e06870d2c2bdc9852727be99d58477513816953"
dependencies = [
"anyhow",
"cranelift-codegen",
@@ -160,9 +160,9 @@ dependencies = [
[[package]]
name = "cranelift-native"
-version = "0.96.1"
+version = "0.98.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "eac916f3c5aff4b817e42fc2e682292b931495b3fe2603d5e3c3cf602d74e344"
+checksum = "1ecfc01a634448468a698beac433d98040033046678a0eed3ca39a3a9f63ae86"
dependencies = [
"cranelift-codegen",
"libc",
@@ -171,9 +171,9 @@ dependencies = [
[[package]]
name = "cranelift-object"
-version = "0.96.1"
+version = "0.98.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "23860f4cd064017f2108e6bc5d25660a77cd6eea77f1ac0756870a00abb12e93"
+checksum = "0ee14a7276999f0dcaae2de84043e2c2de50820fb89b3db56fab586a4ad26734"
dependencies = [
"anyhow",
"cranelift-codegen",
@@ -194,6 +194,12 @@ dependencies = [
]
[[package]]
+name = "equivalent"
+version = "1.0.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "88bffebc5d80432c9b140ee17875ff173a8ab62faad5b257da912bd2f6c1c0a1"
+
+[[package]]
name = "fallible-iterator"
version = "0.2.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
@@ -206,7 +212,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "ad0a93d233ebf96623465aad4046a8d3aa4da22d4f4beba5388838c8a434bbb4"
dependencies = [
"fallible-iterator",
- "indexmap",
+ "indexmap 1.9.3",
"stable_deref_trait",
]
@@ -226,6 +232,12 @@ dependencies = [
]
[[package]]
+name = "hashbrown"
+version = "0.14.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "2c6201b9ff9fd90a5a3bac2e56a830d0caa509576f0e503818ee82c181b3437a"
+
+[[package]]
name = "indexmap"
version = "1.9.3"
source = "registry+https://github.com/rust-lang/crates.io-index"
@@ -236,6 +248,16 @@ dependencies = [
]
[[package]]
+name = "indexmap"
+version = "2.0.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "d5477fe2230a79769d8dc68e0eabf5437907c0457a5614a9e8dddb67f65eb65d"
+dependencies = [
+ "equivalent",
+ "hashbrown 0.14.0",
+]
+
+[[package]]
name = "libc"
version = "0.2.138"
source = "registry+https://github.com/rust-lang/crates.io-index"
@@ -283,7 +305,7 @@ checksum = "03b4680b86d9cfafba8fc491dc9b6df26b68cf40e9e6cd73909194759a63c385"
dependencies = [
"crc32fast",
"hashbrown 0.13.2",
- "indexmap",
+ "indexmap 1.9.3",
"memchr",
]
@@ -295,9 +317,9 @@ checksum = "86f0b0d4bf799edbc74508c1e8bf170ff5f41238e5f8225603ca7caaae2b7860"
[[package]]
name = "regalloc2"
-version = "0.8.1"
+version = "0.9.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "d4a52e724646c6c0800fc456ec43b4165d2f91fba88ceaca06d9e0b400023478"
+checksum = "5b4dcbd3a2ae7fb94b5813fa0e957c6ab51bf5d0a8ee1b69e0c2d0f1e6eb8485"
dependencies = [
"hashbrown 0.13.2",
"log",
@@ -335,7 +357,7 @@ dependencies = [
"cranelift-native",
"cranelift-object",
"gimli",
- "indexmap",
+ "indexmap 2.0.0",
"libloading",
"object",
"smallvec",
@@ -374,9 +396,9 @@ checksum = "49874b5167b65d7193b8aba1567f5c7d93d001cafc34600cee003eda787e483f"
[[package]]
name = "wasmtime-jit-icache-coherence"
-version = "9.0.1"
+version = "11.0.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "7d90933b781e1cef7656baed671c7a90bdba0c1c694e04fdd4124419308f5cbb"
+checksum = "e34eb67f0829a5614ec54716c8e0c9fe68fab7b9df3686c85f719c9d247f7169"
dependencies = [
"cfg-if",
"libc",
diff --git a/compiler/rustc_codegen_cranelift/Cargo.toml b/compiler/rustc_codegen_cranelift/Cargo.toml
index 1c1f2d857..8ded81d73 100644
--- a/compiler/rustc_codegen_cranelift/Cargo.toml
+++ b/compiler/rustc_codegen_cranelift/Cargo.toml
@@ -8,17 +8,17 @@ crate-type = ["dylib"]
[dependencies]
# These have to be in sync with each other
-cranelift-codegen = { version = "0.96.1", features = ["unwind", "all-arch"] }
-cranelift-frontend = { version = "0.96.1" }
-cranelift-module = { version = "0.96.1" }
-cranelift-native = { version = "0.96.1" }
-cranelift-jit = { version = "0.96.1", optional = true }
-cranelift-object = { version = "0.96.1" }
+cranelift-codegen = { version = "0.98", features = ["unwind", "all-arch"] }
+cranelift-frontend = { version = "0.98" }
+cranelift-module = { version = "0.98" }
+cranelift-native = { version = "0.98" }
+cranelift-jit = { version = "0.98", optional = true }
+cranelift-object = { version = "0.98" }
target-lexicon = "0.12.0"
gimli = { version = "0.27.2", default-features = false, features = ["write"]}
object = { version = "0.30.3", default-features = false, features = ["std", "read_core", "write", "archive", "coff", "elf", "macho", "pe"] }
-indexmap = "1.9.3"
+indexmap = "2.0.0"
libloading = { version = "0.7.3", optional = true }
smallvec = "1.8.1"
diff --git a/compiler/rustc_codegen_cranelift/Readme.md b/compiler/rustc_codegen_cranelift/Readme.md
index 9469feea0..62eaef359 100644
--- a/compiler/rustc_codegen_cranelift/Readme.md
+++ b/compiler/rustc_codegen_cranelift/Readme.md
@@ -65,12 +65,12 @@ to `./build/host/stage2/bin/`. Note that you would need to do this every time yo
5. Copy cargo from another toolchain: `cp $(rustup which cargo) .build/<your hostname triple>/stage2/bin/cargo`
* Another option is to build it at step 3 and copy with other executables at step 4.
6. Link your new `rustc` to toolchain: `rustup toolchain link stage2 ./build/host/stage2/`.
-7. (Windows only) compile y.rs: `rustc +stage2 -O y.rs`.
-8. You need to prefix every `./y.rs` (or `y` if you built `y.rs`) command by `rustup run stage2` to make cg_clif use your local changes in rustc.
+7. (Windows only) compile the build system: `rustc +stage2 -O build_system/main.rs -o y.exe`.
+8. You need to prefix every `./y.sh` (or `y` if you built `build_system/main.rs` as `y`) command by `rustup run stage2` to make cg_clif use your local changes in rustc.
- * `rustup run stage2 ./y.rs prepare`
- * `rustup run stage2 ./y.rs build`
- * (Optional) run tests: `rustup run stage2 ./y.rs test`
+ * `rustup run stage2 ./y.sh prepare`
+ * `rustup run stage2 ./y.sh build`
+ * (Optional) run tests: `rustup run stage2 ./y.sh test`
9. Now you can use your cg_clif build to compile other Rust programs, e.g. you can open any Rust crate and run commands like `$RustCheckoutDir/compiler/rustc_codegen_cranelift/dist/cargo-clif build --release`.
## Configuration
diff --git a/compiler/rustc_codegen_cranelift/build_system/abi_cafe.rs b/compiler/rustc_codegen_cranelift/build_system/abi_cafe.rs
index 29c127bf5..2e7ba1b20 100644
--- a/compiler/rustc_codegen_cranelift/build_system/abi_cafe.rs
+++ b/compiler/rustc_codegen_cranelift/build_system/abi_cafe.rs
@@ -1,8 +1,8 @@
-use super::build_sysroot;
-use super::path::Dirs;
-use super::prepare::GitRepo;
-use super::utils::{spawn_and_wait, CargoProject, Compiler};
-use super::{CodegenBackend, SysrootKind};
+use crate::build_sysroot;
+use crate::path::Dirs;
+use crate::prepare::GitRepo;
+use crate::utils::{spawn_and_wait, CargoProject, Compiler};
+use crate::{CodegenBackend, SysrootKind};
static ABI_CAFE_REPO: GitRepo = GitRepo::github(
"Gankra",
diff --git a/compiler/rustc_codegen_cranelift/build_system/bench.rs b/compiler/rustc_codegen_cranelift/build_system/bench.rs
index 2bb118000..6c64faaa2 100644
--- a/compiler/rustc_codegen_cranelift/build_system/bench.rs
+++ b/compiler/rustc_codegen_cranelift/build_system/bench.rs
@@ -1,10 +1,11 @@
use std::env;
+use std::io::Write;
use std::path::Path;
-use super::path::{Dirs, RelPath};
-use super::prepare::GitRepo;
-use super::rustc_info::get_file_name;
-use super::utils::{hyperfine_command, spawn_and_wait, Compiler};
+use crate::path::{Dirs, RelPath};
+use crate::prepare::GitRepo;
+use crate::rustc_info::get_file_name;
+use crate::utils::{hyperfine_command, spawn_and_wait, Compiler};
static SIMPLE_RAYTRACER_REPO: GitRepo = GitRepo::github(
"ebobby",
@@ -30,6 +31,12 @@ fn benchmark_simple_raytracer(dirs: &Dirs, bootstrap_host_compiler: &Compiler) {
let bench_runs = env::var("BENCH_RUNS").unwrap_or_else(|_| "10".to_string()).parse().unwrap();
+ let mut gha_step_summary = if let Ok(file) = std::env::var("GITHUB_STEP_SUMMARY") {
+ Some(std::fs::OpenOptions::new().append(true).open(file).unwrap())
+ } else {
+ None
+ };
+
eprintln!("[BENCH COMPILE] ebobby/simple-raytracer");
let cargo_clif = RelPath::DIST
.to_path(dirs)
@@ -60,36 +67,64 @@ fn benchmark_simple_raytracer(dirs: &Dirs, bootstrap_host_compiler: &Compiler) {
target_dir = target_dir.display(),
);
+ let bench_compile_markdown = RelPath::DIST.to_path(dirs).join("bench_compile.md");
+
let bench_compile = hyperfine_command(
1,
bench_runs,
Some(&clean_cmd),
- &[&llvm_build_cmd, &clif_build_cmd, &clif_build_opt_cmd],
+ &[
+ ("cargo build", &llvm_build_cmd),
+ ("cargo-clif build", &clif_build_cmd),
+ ("cargo-clif build --release", &clif_build_opt_cmd),
+ ],
+ &bench_compile_markdown,
);
spawn_and_wait(bench_compile);
+ if let Some(gha_step_summary) = gha_step_summary.as_mut() {
+ gha_step_summary.write_all(b"## Compile ebobby/simple-raytracer\n\n").unwrap();
+ gha_step_summary.write_all(&std::fs::read(bench_compile_markdown).unwrap()).unwrap();
+ gha_step_summary.write_all(b"\n").unwrap();
+ }
+
eprintln!("[BENCH RUN] ebobby/simple-raytracer");
+ let bench_run_markdown = RelPath::DIST.to_path(dirs).join("bench_run.md");
+
+ let raytracer_cg_llvm = Path::new(".").join(get_file_name(
+ &bootstrap_host_compiler.rustc,
+ "raytracer_cg_llvm",
+ "bin",
+ ));
+ let raytracer_cg_clif = Path::new(".").join(get_file_name(
+ &bootstrap_host_compiler.rustc,
+ "raytracer_cg_clif",
+ "bin",
+ ));
+ let raytracer_cg_clif_opt = Path::new(".").join(get_file_name(
+ &bootstrap_host_compiler.rustc,
+ "raytracer_cg_clif_opt",
+ "bin",
+ ));
let mut bench_run = hyperfine_command(
0,
bench_runs,
None,
&[
- Path::new(".")
- .join(get_file_name(&bootstrap_host_compiler.rustc, "raytracer_cg_llvm", "bin"))
- .to_str()
- .unwrap(),
- Path::new(".")
- .join(get_file_name(&bootstrap_host_compiler.rustc, "raytracer_cg_clif", "bin"))
- .to_str()
- .unwrap(),
- Path::new(".")
- .join(get_file_name(&bootstrap_host_compiler.rustc, "raytracer_cg_clif_opt", "bin"))
- .to_str()
- .unwrap(),
+ ("", raytracer_cg_llvm.to_str().unwrap()),
+ ("", raytracer_cg_clif.to_str().unwrap()),
+ ("", raytracer_cg_clif_opt.to_str().unwrap()),
],
+ &bench_run_markdown,
);
bench_run.current_dir(RelPath::BUILD.to_path(dirs));
spawn_and_wait(bench_run);
+
+ if let Some(gha_step_summary) = gha_step_summary.as_mut() {
+ gha_step_summary.write_all(b"## Run ebobby/simple-raytracer\n\n").unwrap();
+ gha_step_summary.write_all(&std::fs::read(bench_run_markdown).unwrap()).unwrap();
+ gha_step_summary.write_all(b"\n").unwrap();
+ }
}
diff --git a/compiler/rustc_codegen_cranelift/build_system/build_backend.rs b/compiler/rustc_codegen_cranelift/build_system/build_backend.rs
index 6855c1a7f..e434c36f9 100644
--- a/compiler/rustc_codegen_cranelift/build_system/build_backend.rs
+++ b/compiler/rustc_codegen_cranelift/build_system/build_backend.rs
@@ -1,9 +1,9 @@
-use std::env;
use std::path::PathBuf;
-use super::path::{Dirs, RelPath};
-use super::rustc_info::get_file_name;
-use super::utils::{is_ci, is_ci_opt, maybe_incremental, CargoProject, Compiler};
+use crate::path::{Dirs, RelPath};
+use crate::rustc_info::get_file_name;
+use crate::shared_utils::{rustflags_from_env, rustflags_to_cmd_env};
+use crate::utils::{is_ci, is_ci_opt, maybe_incremental, CargoProject, Compiler, LogGroup};
pub(crate) static CG_CLIF: CargoProject = CargoProject::new(&RelPath::SOURCE, "cg_clif");
@@ -13,14 +13,16 @@ pub(crate) fn build_backend(
bootstrap_host_compiler: &Compiler,
use_unstable_features: bool,
) -> PathBuf {
+ let _group = LogGroup::guard("Build backend");
+
let mut cmd = CG_CLIF.build(&bootstrap_host_compiler, dirs);
maybe_incremental(&mut cmd);
- let mut rustflags = env::var("RUSTFLAGS").unwrap_or_default();
+ let mut rustflags = rustflags_from_env("RUSTFLAGS");
if is_ci() {
// Deny warnings on CI
- rustflags += " -Dwarnings";
+ rustflags.push("-Dwarnings".to_owned());
if !is_ci_opt() {
cmd.env("CARGO_PROFILE_RELEASE_DEBUG_ASSERTIONS", "true");
@@ -40,10 +42,10 @@ pub(crate) fn build_backend(
_ => unreachable!(),
}
- cmd.env("RUSTFLAGS", rustflags);
+ rustflags_to_cmd_env(&mut cmd, "RUSTFLAGS", &rustflags);
eprintln!("[BUILD] rustc_codegen_cranelift");
- super::utils::spawn_and_wait(cmd);
+ crate::utils::spawn_and_wait(cmd);
CG_CLIF
.target_dir(dirs)
diff --git a/compiler/rustc_codegen_cranelift/build_system/build_sysroot.rs b/compiler/rustc_codegen_cranelift/build_system/build_sysroot.rs
index 74bba9ed5..31a4b2098 100644
--- a/compiler/rustc_codegen_cranelift/build_system/build_sysroot.rs
+++ b/compiler/rustc_codegen_cranelift/build_system/build_sysroot.rs
@@ -2,12 +2,13 @@ use std::fs;
use std::path::{Path, PathBuf};
use std::process::Command;
-use super::path::{Dirs, RelPath};
-use super::rustc_info::get_file_name;
-use super::utils::{
+use crate::path::{Dirs, RelPath};
+use crate::rustc_info::get_file_name;
+use crate::utils::{
maybe_incremental, remove_dir_if_exists, spawn_and_wait, try_hard_link, CargoProject, Compiler,
+ LogGroup,
};
-use super::{CodegenBackend, SysrootKind};
+use crate::{config, CodegenBackend, SysrootKind};
static DIST_DIR: RelPath = RelPath::DIST;
static BIN_DIR: RelPath = RelPath::DIST.join("bin");
@@ -22,6 +23,8 @@ pub(crate) fn build_sysroot(
rustup_toolchain_name: Option<&str>,
target_triple: String,
) -> Compiler {
+ let _guard = LogGroup::guard("Build sysroot");
+
eprintln!("[BUILD] sysroot {:?}", sysroot_kind);
DIST_DIR.ensure_fresh(dirs);
@@ -125,8 +128,8 @@ pub(crate) fn build_sysroot(
cargo: bootstrap_host_compiler.cargo.clone(),
rustc: rustc_clif.clone(),
rustdoc: rustdoc_clif.clone(),
- rustflags: String::new(),
- rustdocflags: String::new(),
+ rustflags: vec![],
+ rustdocflags: vec![],
triple: target_triple,
runner: vec![],
}
@@ -182,7 +185,7 @@ fn build_sysroot_for_triple(
#[must_use]
fn build_llvm_sysroot_for_triple(compiler: Compiler) -> SysrootTarget {
- let default_sysroot = super::rustc_info::get_default_sysroot(&compiler.rustc);
+ let default_sysroot = crate::rustc_info::get_default_sysroot(&compiler.rustc);
let mut target_libs = SysrootTarget { triple: compiler.triple, libs: vec![] };
@@ -231,29 +234,32 @@ fn build_clif_sysroot_for_triple(
let build_dir = STANDARD_LIBRARY.target_dir(dirs).join(&compiler.triple).join(channel);
- if !super::config::get_bool("keep_sysroot") {
+ if !config::get_bool("keep_sysroot") {
// Cleanup the deps dir, but keep build scripts and the incremental cache for faster
// recompilation as they are not affected by changes in cg_clif.
remove_dir_if_exists(&build_dir.join("deps"));
}
// Build sysroot
- let mut rustflags = " -Zforce-unstable-if-unmarked -Cpanic=abort".to_string();
+ let mut rustflags = vec!["-Zforce-unstable-if-unmarked".to_owned(), "-Cpanic=abort".to_owned()];
match cg_clif_dylib_path {
CodegenBackend::Local(path) => {
- rustflags.push_str(&format!(" -Zcodegen-backend={}", path.to_str().unwrap()));
+ rustflags.push(format!("-Zcodegen-backend={}", path.to_str().unwrap()));
}
CodegenBackend::Builtin(name) => {
- rustflags.push_str(&format!(" -Zcodegen-backend={name}"));
+ rustflags.push(format!("-Zcodegen-backend={name}"));
}
};
// Necessary for MinGW to find rsbegin.o and rsend.o
- rustflags
- .push_str(&format!(" --sysroot {}", RTSTARTUP_SYSROOT.to_path(dirs).to_str().unwrap()));
+ rustflags.push("--sysroot".to_owned());
+ rustflags.push(RTSTARTUP_SYSROOT.to_path(dirs).to_str().unwrap().to_owned());
if channel == "release" {
- rustflags.push_str(" -Zmir-opt-level=3");
+ // Incremental compilation by default disables mir inlining. This leads to both a decent
+ // compile perf and a significant runtime perf regression. As such forcefully enable mir
+ // inlining.
+ rustflags.push("-Zinline-mir".to_owned());
}
- compiler.rustflags += &rustflags;
+ compiler.rustflags.extend(rustflags);
let mut build_cmd = STANDARD_LIBRARY.build(&compiler, dirs);
maybe_incremental(&mut build_cmd);
if channel == "release" {
@@ -283,8 +289,8 @@ fn build_clif_sysroot_for_triple(
}
fn build_rtstartup(dirs: &Dirs, compiler: &Compiler) -> Option<SysrootTarget> {
- if !super::config::get_bool("keep_sysroot") {
- super::prepare::prepare_stdlib(dirs, &compiler.rustc);
+ if !config::get_bool("keep_sysroot") {
+ crate::prepare::prepare_stdlib(dirs, &compiler.rustc);
}
if !compiler.triple.ends_with("windows-gnu") {
@@ -300,6 +306,7 @@ fn build_rtstartup(dirs: &Dirs, compiler: &Compiler) -> Option<SysrootTarget> {
let obj = RTSTARTUP_SYSROOT.to_path(dirs).join(format!("{file}.o"));
let mut build_rtstartup_cmd = Command::new(&compiler.rustc);
build_rtstartup_cmd
+ .arg("-Ainternal_features") // Missing #[allow(internal_features)]
.arg("--target")
.arg(&compiler.triple)
.arg("--emit=obj")
diff --git a/compiler/rustc_codegen_cranelift/build_system/main.rs b/compiler/rustc_codegen_cranelift/build_system/main.rs
index 3bc78d5db..798ae9dbd 100644
--- a/compiler/rustc_codegen_cranelift/build_system/main.rs
+++ b/compiler/rustc_codegen_cranelift/build_system/main.rs
@@ -16,6 +16,7 @@ mod config;
mod path;
mod prepare;
mod rustc_info;
+mod shared_utils;
mod tests;
mod utils;
@@ -169,8 +170,8 @@ fn main() {
cargo,
rustc,
rustdoc,
- rustflags: String::new(),
- rustdocflags: String::new(),
+ rustflags: vec![],
+ rustdocflags: vec![],
triple,
runner: vec![],
}
diff --git a/compiler/rustc_codegen_cranelift/build_system/path.rs b/compiler/rustc_codegen_cranelift/build_system/path.rs
index 4f86c0fd2..8572815fc 100644
--- a/compiler/rustc_codegen_cranelift/build_system/path.rs
+++ b/compiler/rustc_codegen_cranelift/build_system/path.rs
@@ -1,7 +1,7 @@
use std::fs;
use std::path::PathBuf;
-use super::utils::remove_dir_if_exists;
+use crate::utils::remove_dir_if_exists;
#[derive(Debug, Clone)]
pub(crate) struct Dirs {
diff --git a/compiler/rustc_codegen_cranelift/build_system/prepare.rs b/compiler/rustc_codegen_cranelift/build_system/prepare.rs
index e31e39a48..165296cb4 100644
--- a/compiler/rustc_codegen_cranelift/build_system/prepare.rs
+++ b/compiler/rustc_codegen_cranelift/build_system/prepare.rs
@@ -3,18 +3,18 @@ use std::fs;
use std::path::{Path, PathBuf};
use std::process::Command;
-use super::build_sysroot::STDLIB_SRC;
-use super::path::{Dirs, RelPath};
-use super::rustc_info::get_default_sysroot;
-use super::utils::{
+use crate::build_sysroot::STDLIB_SRC;
+use crate::path::{Dirs, RelPath};
+use crate::rustc_info::get_default_sysroot;
+use crate::utils::{
copy_dir_recursively, git_command, remove_dir_if_exists, retry_spawn_and_wait, spawn_and_wait,
};
pub(crate) fn prepare(dirs: &Dirs) {
RelPath::DOWNLOAD.ensure_exists(dirs);
- super::tests::RAND_REPO.fetch(dirs);
- super::tests::REGEX_REPO.fetch(dirs);
- super::tests::PORTABLE_SIMD_REPO.fetch(dirs);
+ crate::tests::RAND_REPO.fetch(dirs);
+ crate::tests::REGEX_REPO.fetch(dirs);
+ crate::tests::PORTABLE_SIMD_REPO.fetch(dirs);
}
pub(crate) fn prepare_stdlib(dirs: &Dirs, rustc: &Path) {
@@ -27,6 +27,7 @@ pub(crate) fn prepare_stdlib(dirs: &Dirs, rustc: &Path) {
STDLIB_SRC.to_path(dirs).join("Cargo.toml"),
r#"
[workspace]
+resolver = "1"
members = ["./library/sysroot"]
[patch.crates-io]
diff --git a/compiler/rustc_codegen_cranelift/build_system/shared_utils.rs b/compiler/rustc_codegen_cranelift/build_system/shared_utils.rs
new file mode 100644
index 000000000..0aea545ff
--- /dev/null
+++ b/compiler/rustc_codegen_cranelift/build_system/shared_utils.rs
@@ -0,0 +1,26 @@
+// This file is used by both the build system as well as cargo-clif.rs
+
+// Adapted from https://github.com/rust-lang/cargo/blob/6dc1deaddf62c7748c9097c7ea88e9ec77ff1a1a/src/cargo/core/compiler/build_context/target_info.rs#L750-L77
+pub(crate) fn rustflags_from_env(kind: &str) -> Vec<String> {
+ // First try CARGO_ENCODED_RUSTFLAGS from the environment.
+ // Prefer this over RUSTFLAGS since it's less prone to encoding errors.
+ if let Ok(a) = std::env::var(format!("CARGO_ENCODED_{}", kind)) {
+ if a.is_empty() {
+ return Vec::new();
+ }
+ return a.split('\x1f').map(str::to_string).collect();
+ }
+
+ // Then try RUSTFLAGS from the environment
+ if let Ok(a) = std::env::var(kind) {
+ let args = a.split(' ').map(str::trim).filter(|s| !s.is_empty()).map(str::to_string);
+ return args.collect();
+ }
+
+ // No rustflags to be collected from the environment
+ Vec::new()
+}
+
+pub(crate) fn rustflags_to_cmd_env(cmd: &mut std::process::Command, kind: &str, flags: &[String]) {
+ cmd.env(format!("CARGO_ENCODED_{}", kind), flags.join("\x1f"));
+}
diff --git a/compiler/rustc_codegen_cranelift/build_system/tests.rs b/compiler/rustc_codegen_cranelift/build_system/tests.rs
index 08d8f708c..e7bd8b127 100644
--- a/compiler/rustc_codegen_cranelift/build_system/tests.rs
+++ b/compiler/rustc_codegen_cranelift/build_system/tests.rs
@@ -1,16 +1,17 @@
-use super::build_sysroot;
-use super::config;
-use super::path::{Dirs, RelPath};
-use super::prepare::{apply_patches, GitRepo};
-use super::rustc_info::get_default_sysroot;
-use super::utils::{spawn_and_wait, spawn_and_wait_with_input, CargoProject, Compiler};
-use super::{CodegenBackend, SysrootKind};
-use std::env;
use std::ffi::OsStr;
use std::fs;
use std::path::PathBuf;
use std::process::Command;
+use crate::build_sysroot;
+use crate::config;
+use crate::path::{Dirs, RelPath};
+use crate::prepare::{apply_patches, GitRepo};
+use crate::rustc_info::get_default_sysroot;
+use crate::shared_utils::rustflags_from_env;
+use crate::utils::{spawn_and_wait, spawn_and_wait_with_input, CargoProject, Compiler, LogGroup};
+use crate::{CodegenBackend, SysrootKind};
+
static BUILD_EXAMPLE_OUT_DIR: RelPath = RelPath::BUILD.join("example");
struct TestCase {
@@ -21,6 +22,7 @@ struct TestCase {
enum TestCaseCmd {
Custom { func: &'static dyn Fn(&TestRunner<'_>) },
BuildLib { source: &'static str, crate_types: &'static str },
+ BuildBin { source: &'static str },
BuildBinAndRun { source: &'static str, args: &'static [&'static str] },
JitBin { source: &'static str, args: &'static str },
}
@@ -39,6 +41,10 @@ impl TestCase {
Self { config, cmd: TestCaseCmd::BuildLib { source, crate_types } }
}
+ const fn build_bin(config: &'static str, source: &'static str) -> Self {
+ Self { config, cmd: TestCaseCmd::BuildBin { source } }
+ }
+
const fn build_bin_and_run(
config: &'static str,
source: &'static str,
@@ -92,6 +98,7 @@ const BASE_SYSROOT_SUITE: &[TestCase] = &[
TestCase::build_bin_and_run("aot.float-minmax-pass", "example/float-minmax-pass.rs", &[]),
TestCase::build_bin_and_run("aot.mod_bench", "example/mod_bench.rs", &[]),
TestCase::build_bin_and_run("aot.issue-72793", "example/issue-72793.rs", &[]),
+ TestCase::build_bin("aot.issue-59326", "example/issue-59326.rs"),
];
// FIXME(rust-random/rand#1293): Newer rand versions fail to test on Windows. Update once this is
@@ -119,8 +126,8 @@ pub(crate) static REGEX: CargoProject = CargoProject::new(&REGEX_REPO.source_dir
pub(crate) static PORTABLE_SIMD_REPO: GitRepo = GitRepo::github(
"rust-lang",
"portable-simd",
- "ad8afa8c81273b3b49acbea38cd3bcf17a34cf2b",
- "800548f8000e31bd",
+ "7c7dbe0c505ccbc02ff30c1e37381ab1d47bf46f",
+ "5bcc9c544f6fa7bd",
"portable-simd",
);
@@ -300,7 +307,7 @@ pub(crate) fn run_tests(
);
// Rust's build system denies a couple of lints that trigger on several of the test
// projects. Changing the code to fix them is not worth it, so just silence all lints.
- target_compiler.rustflags += " --cap-lints=allow";
+ target_compiler.rustflags.push("--cap-lints=allow".to_owned());
let runner = TestRunner::new(
dirs.clone(),
@@ -344,18 +351,15 @@ impl<'a> TestRunner<'a> {
is_native: bool,
stdlib_source: PathBuf,
) -> Self {
- if let Ok(rustflags) = env::var("RUSTFLAGS") {
- target_compiler.rustflags.push(' ');
- target_compiler.rustflags.push_str(&rustflags);
- }
- if let Ok(rustdocflags) = env::var("RUSTDOCFLAGS") {
- target_compiler.rustdocflags.push(' ');
- target_compiler.rustdocflags.push_str(&rustdocflags);
- }
+ target_compiler.rustflags.extend(rustflags_from_env("RUSTFLAGS"));
+ target_compiler.rustdocflags.extend(rustflags_from_env("RUSTDOCFLAGS"));
// FIXME fix `#[linkage = "extern_weak"]` without this
if target_compiler.triple.contains("darwin") {
- target_compiler.rustflags.push_str(" -Clink-arg=-undefined -Clink-arg=dynamic_lookup");
+ target_compiler.rustflags.extend([
+ "-Clink-arg=-undefined".to_owned(),
+ "-Clink-arg=dynamic_lookup".to_owned(),
+ ]);
}
let jit_supported = use_unstable_features
@@ -380,15 +384,17 @@ impl<'a> TestRunner<'a> {
let tag = tag.to_uppercase();
let is_jit_test = tag == "JIT";
- if !config::get_bool(config)
+ let _guard = if !config::get_bool(config)
|| (is_jit_test && !self.jit_supported)
|| self.skip_tests.contains(&config)
{
eprintln!("[{tag}] {testname} (skipped)");
continue;
} else {
+ let guard = LogGroup::guard(&format!("[{tag}] {testname}"));
eprintln!("[{tag}] {testname}");
- }
+ guard
+ };
match *cmd {
TestCaseCmd::Custom { func } => func(self),
@@ -405,6 +411,13 @@ impl<'a> TestRunner<'a> {
]);
}
}
+ TestCaseCmd::BuildBin { source } => {
+ if self.use_unstable_features {
+ self.run_rustc([source]);
+ } else {
+ self.run_rustc([source, "--cfg", "no_unstable_features"]);
+ }
+ }
TestCaseCmd::BuildBinAndRun { source, args } => {
if self.use_unstable_features {
self.run_rustc([source]);
@@ -455,7 +468,7 @@ impl<'a> TestRunner<'a> {
S: AsRef<OsStr>,
{
let mut cmd = Command::new(&self.target_compiler.rustc);
- cmd.args(self.target_compiler.rustflags.split_whitespace());
+ cmd.args(&self.target_compiler.rustflags);
cmd.arg("-L");
cmd.arg(format!("crate={}", BUILD_EXAMPLE_OUT_DIR.to_path(&self.dirs).display()));
cmd.arg("--out-dir");
diff --git a/compiler/rustc_codegen_cranelift/build_system/usage.txt b/compiler/rustc_codegen_cranelift/build_system/usage.txt
index 6d3b3a13d..f65259944 100644
--- a/compiler/rustc_codegen_cranelift/build_system/usage.txt
+++ b/compiler/rustc_codegen_cranelift/build_system/usage.txt
@@ -43,7 +43,7 @@ REQUIREMENTS:
* Rustup: By default rustup is used to install the right nightly version. If you don't want to
use rustup, you can manually install the nightly version indicated by rust-toolchain.toml and
point the CARGO, RUSTC and RUSTDOC env vars to the right executables.
- * Git: `./y.sh prepare` uses git for applying patches and on Windows for downloading test repos.
+ * Git: Git is used for applying patches and on Windows for downloading test repos.
* Curl and tar (non-Windows only): Used by `./y.sh prepare` to download a single commit for
repos. Git will be used to clone the whole repo when using Windows.
* [Hyperfine](https://github.com/sharkdp/hyperfine/): Used for benchmarking with `./y.sh bench`.
diff --git a/compiler/rustc_codegen_cranelift/build_system/utils.rs b/compiler/rustc_codegen_cranelift/build_system/utils.rs
index 41fc366e2..24624cdea 100644
--- a/compiler/rustc_codegen_cranelift/build_system/utils.rs
+++ b/compiler/rustc_codegen_cranelift/build_system/utils.rs
@@ -3,16 +3,18 @@ use std::fs;
use std::io::{self, Write};
use std::path::{Path, PathBuf};
use std::process::{self, Command, Stdio};
+use std::sync::atomic::{AtomicBool, Ordering};
-use super::path::{Dirs, RelPath};
+use crate::path::{Dirs, RelPath};
+use crate::shared_utils::rustflags_to_cmd_env;
#[derive(Clone, Debug)]
pub(crate) struct Compiler {
pub(crate) cargo: PathBuf,
pub(crate) rustc: PathBuf,
pub(crate) rustdoc: PathBuf,
- pub(crate) rustflags: String,
- pub(crate) rustdocflags: String,
+ pub(crate) rustflags: Vec<String>,
+ pub(crate) rustdocflags: Vec<String>,
pub(crate) triple: String,
pub(crate) runner: Vec<String>,
}
@@ -22,8 +24,8 @@ impl Compiler {
match self.triple.as_str() {
"aarch64-unknown-linux-gnu" => {
// We are cross-compiling for aarch64. Use the correct linker and run tests in qemu.
- self.rustflags += " -Clinker=aarch64-linux-gnu-gcc";
- self.rustdocflags += " -Clinker=aarch64-linux-gnu-gcc";
+ self.rustflags.push("-Clinker=aarch64-linux-gnu-gcc".to_owned());
+ self.rustdocflags.push("-Clinker=aarch64-linux-gnu-gcc".to_owned());
self.runner = vec![
"qemu-aarch64".to_owned(),
"-L".to_owned(),
@@ -32,8 +34,8 @@ impl Compiler {
}
"s390x-unknown-linux-gnu" => {
// We are cross-compiling for s390x. Use the correct linker and run tests in qemu.
- self.rustflags += " -Clinker=s390x-linux-gnu-gcc";
- self.rustdocflags += " -Clinker=s390x-linux-gnu-gcc";
+ self.rustflags.push("-Clinker=s390x-linux-gnu-gcc".to_owned());
+ self.rustdocflags.push("-Clinker=s390x-linux-gnu-gcc".to_owned());
self.runner = vec![
"qemu-s390x".to_owned(),
"-L".to_owned(),
@@ -99,8 +101,8 @@ impl CargoProject {
cmd.env("RUSTC", &compiler.rustc);
cmd.env("RUSTDOC", &compiler.rustdoc);
- cmd.env("RUSTFLAGS", &compiler.rustflags);
- cmd.env("RUSTDOCFLAGS", &compiler.rustdocflags);
+ rustflags_to_cmd_env(&mut cmd, "RUSTFLAGS", &compiler.rustflags);
+ rustflags_to_cmd_env(&mut cmd, "RUSTDOCFLAGS", &compiler.rustdocflags);
if !compiler.runner.is_empty() {
cmd.env(
format!("CARGO_TARGET_{}_RUNNER", compiler.triple.to_uppercase().replace('-', "_")),
@@ -136,10 +138,13 @@ pub(crate) fn hyperfine_command(
warmup: u64,
runs: u64,
prepare: Option<&str>,
- cmds: &[&str],
+ cmds: &[(&str, &str)],
+ markdown_export: &Path,
) -> Command {
let mut bench = Command::new("hyperfine");
+ bench.arg("--export-markdown").arg(markdown_export);
+
if warmup != 0 {
bench.arg("--warmup").arg(warmup.to_string());
}
@@ -152,7 +157,12 @@ pub(crate) fn hyperfine_command(
bench.arg("--prepare").arg(prepare);
}
- bench.args(cmds);
+ for &(name, cmd) in cmds {
+ if name != "" {
+ bench.arg("-n").arg(name);
+ }
+ bench.arg(cmd);
+ }
bench
}
@@ -167,6 +177,8 @@ pub(crate) fn git_command<'a>(repo_dir: impl Into<Option<&'a Path>>, cmd: &str)
.arg("user.email=dummy@example.com")
.arg("-c")
.arg("core.autocrlf=false")
+ .arg("-c")
+ .arg("commit.gpgSign=false")
.arg(cmd);
if let Some(repo_dir) = repo_dir.into() {
git_cmd.current_dir(repo_dir);
@@ -259,6 +271,33 @@ pub(crate) fn is_ci_opt() -> bool {
env::var("CI_OPT").is_ok()
}
+static IN_GROUP: AtomicBool = AtomicBool::new(false);
+pub(crate) struct LogGroup {
+ is_gha: bool,
+}
+
+impl LogGroup {
+ pub(crate) fn guard(name: &str) -> LogGroup {
+ let is_gha = env::var("GITHUB_ACTIONS").is_ok();
+
+ assert!(!IN_GROUP.swap(true, Ordering::SeqCst));
+ if is_gha {
+ eprintln!("::group::{name}");
+ }
+
+ LogGroup { is_gha }
+ }
+}
+
+impl Drop for LogGroup {
+ fn drop(&mut self) {
+ if self.is_gha {
+ eprintln!("::endgroup::");
+ }
+ IN_GROUP.store(false, Ordering::SeqCst);
+ }
+}
+
pub(crate) fn maybe_incremental(cmd: &mut Command) {
if is_ci() || std::env::var("CARGO_BUILD_INCREMENTAL").map_or(false, |val| val == "false") {
// Disabling incr comp reduces cache size and incr comp doesn't save as much on CI anyway
diff --git a/compiler/rustc_codegen_cranelift/config.txt b/compiler/rustc_codegen_cranelift/config.txt
index d6e3924a2..fa1c9f425 100644
--- a/compiler/rustc_codegen_cranelift/config.txt
+++ b/compiler/rustc_codegen_cranelift/config.txt
@@ -41,6 +41,7 @@ aot.track-caller-attribute
aot.float-minmax-pass
aot.mod_bench
aot.issue-72793
+aot.issue-59326
testsuite.extended_sysroot
test.rust-random/rand
diff --git a/compiler/rustc_codegen_cranelift/example/float-minmax-pass.rs b/compiler/rustc_codegen_cranelift/example/float-minmax-pass.rs
index b8f901d1b..80a2776ca 100644
--- a/compiler/rustc_codegen_cranelift/example/float-minmax-pass.rs
+++ b/compiler/rustc_codegen_cranelift/example/float-minmax-pass.rs
@@ -22,7 +22,7 @@ fn main() {
#[cfg(not(any(target_arch = "mips", target_arch = "mips64")))]
let nan = f32::NAN;
- // MIPS hardware treats f32::NAN as SNAN. Clear the signaling bit.
+ // MIPS hardware except MIPS R6 treats f32::NAN as SNAN. Clear the signaling bit.
// See https://github.com/rust-lang/rust/issues/52746.
#[cfg(any(target_arch = "mips", target_arch = "mips64"))]
let nan = f32::from_bits(f32::NAN.to_bits() - 1);
diff --git a/compiler/rustc_codegen_cranelift/example/issue-59326.rs b/compiler/rustc_codegen_cranelift/example/issue-59326.rs
new file mode 100644
index 000000000..70b7c94e1
--- /dev/null
+++ b/compiler/rustc_codegen_cranelift/example/issue-59326.rs
@@ -0,0 +1,27 @@
+// Based on https://github.com/rust-lang/rust/blob/689511047a75a30825e367d4fd45c74604d0b15e/tests/ui/issues/issue-59326.rs#L1
+// check-pass
+trait Service {
+ type S;
+}
+
+trait Framing {
+ type F;
+}
+
+impl Framing for () {
+ type F = ();
+}
+
+trait HttpService<F: Framing>: Service<S = F::F> {}
+
+type BoxService = Box<dyn HttpService<(), S = ()>>;
+
+fn build_server<F: FnOnce() -> BoxService>(_: F) {}
+
+fn make_server<F: Framing>() -> Box<dyn HttpService<F, S = F::F>> {
+ unimplemented!()
+}
+
+fn main() {
+ build_server(|| make_server())
+}
diff --git a/compiler/rustc_codegen_cranelift/example/mini_core.rs b/compiler/rustc_codegen_cranelift/example/mini_core.rs
index 79ca4c039..34c7e44b2 100644
--- a/compiler/rustc_codegen_cranelift/example/mini_core.rs
+++ b/compiler/rustc_codegen_cranelift/example/mini_core.rs
@@ -11,7 +11,7 @@
thread_local
)]
#![no_core]
-#![allow(dead_code)]
+#![allow(dead_code, internal_features)]
#[lang = "sized"]
pub trait Sized {}
@@ -547,7 +547,9 @@ impl<T> Box<T> {
impl<T: ?Sized, A> Drop for Box<T, A> {
fn drop(&mut self) {
// inner value is dropped by compiler
- libc::free(self.0.pointer.0 as *mut u8);
+ unsafe {
+ libc::free(self.0.pointer.0 as *mut u8);
+ }
}
}
diff --git a/compiler/rustc_codegen_cranelift/example/mini_core_hello_world.rs b/compiler/rustc_codegen_cranelift/example/mini_core_hello_world.rs
index d97fab9eb..91de04d97 100644
--- a/compiler/rustc_codegen_cranelift/example/mini_core_hello_world.rs
+++ b/compiler/rustc_codegen_cranelift/example/mini_core_hello_world.rs
@@ -1,6 +1,6 @@
#![feature(no_core, lang_items, never_type, linkage, extern_types, thread_local, repr_simd)]
#![no_core]
-#![allow(dead_code, non_camel_case_types)]
+#![allow(dead_code, non_camel_case_types, internal_features)]
extern crate mini_core;
diff --git a/compiler/rustc_codegen_cranelift/example/std_example.rs b/compiler/rustc_codegen_cranelift/example/std_example.rs
index 1bf0ff64c..490cc2404 100644
--- a/compiler/rustc_codegen_cranelift/example/std_example.rs
+++ b/compiler/rustc_codegen_cranelift/example/std_example.rs
@@ -1,4 +1,12 @@
-#![feature(core_intrinsics, generators, generator_trait, is_sorted, repr_simd)]
+#![feature(
+ core_intrinsics,
+ generators,
+ generator_trait,
+ is_sorted,
+ repr_simd,
+ tuple_trait,
+ unboxed_closures
+)]
#[cfg(target_arch = "x86_64")]
use std::arch::x86_64::*;
@@ -155,12 +163,34 @@ fn main() {
}
foo(I64X2(0, 0));
+
+ transmute_fat_pointer();
+
+ rust_call_abi();
}
fn panic(_: u128) {
panic!();
}
+use std::mem::transmute;
+
+#[cfg(target_pointer_width = "32")]
+type TwoPtrs = i64;
+#[cfg(target_pointer_width = "64")]
+type TwoPtrs = i128;
+
+fn transmute_fat_pointer() -> TwoPtrs {
+ unsafe { transmute::<_, TwoPtrs>("true !") }
+}
+
+extern "rust-call" fn rust_call_abi_callee<T: std::marker::Tuple>(_: T) {}
+
+fn rust_call_abi() {
+ rust_call_abi_callee(());
+ rust_call_abi_callee((1, 2));
+}
+
#[repr(simd)]
struct I64X2(i64, i64);
diff --git a/compiler/rustc_codegen_cranelift/patches/0001-portable-simd-Allow-internal-features.patch b/compiler/rustc_codegen_cranelift/patches/0001-portable-simd-Allow-internal-features.patch
new file mode 100644
index 000000000..87252df1e
--- /dev/null
+++ b/compiler/rustc_codegen_cranelift/patches/0001-portable-simd-Allow-internal-features.patch
@@ -0,0 +1,24 @@
+From fcf75306d88e533b83eaff3f8d0ab9f307e8a84d Mon Sep 17 00:00:00 2001
+From: bjorn3 <17426603+bjorn3@users.noreply.github.com>
+Date: Wed, 9 Aug 2023 10:01:17 +0000
+Subject: [PATCH] Allow internal features
+
+---
+ crates/core_simd/src/lib.rs | 1 +
+ 1 file changed, 1 insertion(+)
+
+diff --git a/crates/core_simd/src/lib.rs b/crates/core_simd/src/lib.rs
+index fde406b..b386116 100644
+--- a/crates/core_simd/src/lib.rs
++++ b/crates/core_simd/src/lib.rs
+@@ -19,6 +19,7 @@
+ #![warn(missing_docs, clippy::missing_inline_in_public_items)] // basically all items, really
+ #![deny(unsafe_op_in_unsafe_fn, clippy::undocumented_unsafe_blocks)]
+ #![unstable(feature = "portable_simd", issue = "86656")]
++#![allow(internal_features)]
+ //! Portable SIMD module.
+
+ #[path = "mod.rs"]
+--
+2.34.1
+
diff --git a/compiler/rustc_codegen_cranelift/patches/0027-coretests-128bit-atomic-operations.patch b/compiler/rustc_codegen_cranelift/patches/0027-coretests-128bit-atomic-operations.patch
index 1d5479bed..a650e1011 100644
--- a/compiler/rustc_codegen_cranelift/patches/0027-coretests-128bit-atomic-operations.patch
+++ b/compiler/rustc_codegen_cranelift/patches/0027-coretests-128bit-atomic-operations.patch
@@ -10,6 +10,18 @@ Cranelift doesn't support them yet
library/core/tests/atomic.rs | 4 ---
4 files changed, 4 insertions(+), 50 deletions(-)
+diff --git a/lib.rs b/lib.rs
+index 897a5e9..331f66f 100644
+--- a/lib.rs
++++ b/lib.rs
+@@ -93,7 +93,6 @@
+ #![feature(const_option)]
+ #![feature(const_option_ext)]
+ #![feature(const_result)]
+-#![cfg_attr(target_has_atomic = "128", feature(integer_atomics))]
+ #![feature(int_roundings)]
+ #![feature(slice_group_by)]
+ #![feature(split_array)]
diff --git a/atomic.rs b/atomic.rs
index b735957..ea728b6 100644
--- a/atomic.rs
diff --git a/compiler/rustc_codegen_cranelift/patches/0027-stdlib-128bit-atomic-operations.patch b/compiler/rustc_codegen_cranelift/patches/0027-stdlib-128bit-atomic-operations.patch
index 45f73f36b..646928893 100644
--- a/compiler/rustc_codegen_cranelift/patches/0027-stdlib-128bit-atomic-operations.patch
+++ b/compiler/rustc_codegen_cranelift/patches/0027-stdlib-128bit-atomic-operations.patch
@@ -38,9 +38,9 @@ diff --git a/library/core/src/sync/atomic.rs b/library/core/src/sync/atomic.rs
index d9de37e..8293fce 100644
--- a/library/core/src/sync/atomic.rs
+++ b/library/core/src/sync/atomic.rs
-@@ -2234,46 +2234,6 @@ atomic_int! {
- "AtomicU64::new(0)",
- u64 AtomicU64 ATOMIC_U64_INIT
+@@ -2996,42 +2996,6 @@ atomic_int! {
+ 8,
+ u64 AtomicU64
}
-#[cfg(target_has_atomic_load_store = "128")]
-atomic_int! {
@@ -53,14 +53,12 @@ index d9de37e..8293fce 100644
- unstable(feature = "integer_atomics", issue = "99069"),
- unstable(feature = "integer_atomics", issue = "99069"),
- rustc_const_stable(feature = "const_integer_atomics", since = "1.34.0"),
-- unstable(feature = "integer_atomics", issue = "99069"),
- cfg_attr(not(test), rustc_diagnostic_item = "AtomicI128"),
- "i128",
- "#![feature(integer_atomics)]\n\n",
- atomic_min, atomic_max,
- 16,
-- "AtomicI128::new(0)",
-- i128 AtomicI128 ATOMIC_I128_INIT
+- i128 AtomicI128
-}
-#[cfg(target_has_atomic_load_store = "128")]
-atomic_int! {
@@ -73,16 +71,15 @@ index d9de37e..8293fce 100644
- unstable(feature = "integer_atomics", issue = "99069"),
- unstable(feature = "integer_atomics", issue = "99069"),
- rustc_const_stable(feature = "const_integer_atomics", since = "1.34.0"),
-- unstable(feature = "integer_atomics", issue = "99069"),
- cfg_attr(not(test), rustc_diagnostic_item = "AtomicU128"),
- "u128",
- "#![feature(integer_atomics)]\n\n",
- atomic_umin, atomic_umax,
- 16,
-- "AtomicU128::new(0)",
-- u128 AtomicU128 ATOMIC_U128_INIT
+- u128 AtomicU128
-}
+ #[cfg(target_has_atomic_load_store = "ptr")]
macro_rules! atomic_int_ptr_sized {
( $($target_pointer_width:literal $align:literal)* ) => { $(
--
diff --git a/compiler/rustc_codegen_cranelift/patches/stdlib-lock.toml b/compiler/rustc_codegen_cranelift/patches/stdlib-lock.toml
index 1dde9e54d..fa175edca 100644
--- a/compiler/rustc_codegen_cranelift/patches/stdlib-lock.toml
+++ b/compiler/rustc_codegen_cranelift/patches/stdlib-lock.toml
@@ -4,9 +4,9 @@ version = 3
[[package]]
name = "addr2line"
-version = "0.19.0"
+version = "0.20.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "a76fd60b23679b7d19bd066031410fb7e458ccc5e958eb5c325888ce4baedc97"
+checksum = "f4fa78e18c64fce05e902adecd7a5eed15a5e0a3439f7b0e169f0252214865e3"
dependencies = [
"compiler_builtins",
"gimli",
@@ -35,6 +35,12 @@ dependencies = [
]
[[package]]
+name = "allocator-api2"
+version = "0.2.15"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "56fc6cf8dc8c4158eed8649f9b8b0ea1518eb62b544fe9490d66fa0b349eafe9"
+
+[[package]]
name = "auxv"
version = "0.3.3"
source = "registry+https://github.com/rust-lang/crates.io-index"
@@ -68,9 +74,9 @@ dependencies = [
[[package]]
name = "compiler_builtins"
-version = "0.1.93"
+version = "0.1.100"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "76630810d973ecea3dbf611e1b7aecfb1012751ef1ff8de3998f89014a166781"
+checksum = "d6c0f24437059853f0fa64afc51f338f93647a3de4cf3358ba1bb4171a199775"
dependencies = [
"cc",
"rustc-std-workspace-core",
@@ -145,10 +151,11 @@ dependencies = [
[[package]]
name = "hashbrown"
-version = "0.13.2"
+version = "0.14.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "43a3c133739dddd0d2990f9a4bdf8eb4b21ef50e4851ca85ab661199821d510e"
+checksum = "2c6201b9ff9fd90a5a3bac2e56a830d0caa509576f0e503818ee82c181b3437a"
dependencies = [
+ "allocator-api2",
"compiler_builtins",
"rustc-std-workspace-alloc",
"rustc-std-workspace-core",
@@ -156,9 +163,9 @@ dependencies = [
[[package]]
name = "hermit-abi"
-version = "0.3.1"
+version = "0.3.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "fed44880c466736ef9a5c5b5facefb5ed0785676d0c02d612db14e54f0d84286"
+checksum = "443144c8cdadd93ebf52ddb4056d257f5b52c04d3c804e657d19eb73fc33668b"
dependencies = [
"compiler_builtins",
"rustc-std-workspace-alloc",
@@ -186,9 +193,9 @@ dependencies = [
[[package]]
name = "miniz_oxide"
-version = "0.6.2"
+version = "0.7.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "b275950c28b37e794e8c55d88aeb5e139d0ce23fdbbeda68f8d7174abdf9e8fa"
+checksum = "e7810e0be55b428ada41041c41f32c9f1a42817901b4ccf45fa3d4b6561e74c7"
dependencies = [
"adler",
"compiler_builtins",
@@ -198,9 +205,9 @@ dependencies = [
[[package]]
name = "object"
-version = "0.30.4"
+version = "0.31.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "03b4680b86d9cfafba8fc491dc9b6df26b68cf40e9e6cd73909194759a63c385"
+checksum = "8bda667d9f2b5051b8833f59f3bf748b28ef54f850f4fcb389a252aa383866d1"
dependencies = [
"compiler_builtins",
"memchr",
diff --git a/compiler/rustc_codegen_cranelift/rust-toolchain b/compiler/rustc_codegen_cranelift/rust-toolchain
index fa3a10b9a..5689bdee6 100644
--- a/compiler/rustc_codegen_cranelift/rust-toolchain
+++ b/compiler/rustc_codegen_cranelift/rust-toolchain
@@ -1,3 +1,3 @@
[toolchain]
-channel = "nightly-2023-06-15"
+channel = "nightly-2023-08-08"
components = ["rust-src", "rustc-dev", "llvm-tools"]
diff --git a/compiler/rustc_codegen_cranelift/scripts/cargo-clif.rs b/compiler/rustc_codegen_cranelift/scripts/cargo-clif.rs
index 99b97be24..1e14f41d4 100644
--- a/compiler/rustc_codegen_cranelift/scripts/cargo-clif.rs
+++ b/compiler/rustc_codegen_cranelift/scripts/cargo-clif.rs
@@ -3,6 +3,8 @@ use std::env;
use std::os::unix::process::CommandExt;
use std::process::Command;
+include!("../build_system/shared_utils.rs");
+
fn main() {
let current_exe = env::current_exe().unwrap();
let mut sysroot = current_exe.parent().unwrap();
@@ -10,27 +12,19 @@ fn main() {
sysroot = sysroot.parent().unwrap();
}
- let mut rustflags = String::new();
- rustflags.push_str(" -Cpanic=abort -Zpanic-abort-tests -Zcodegen-backend=");
+ let mut rustflags = vec!["-Cpanic=abort".to_owned(), "-Zpanic-abort-tests".to_owned()];
if let Some(name) = option_env!("BUILTIN_BACKEND") {
- rustflags.push_str(name);
+ rustflags.push(format!("-Zcodegen-backend={name}"));
} else {
- rustflags.push_str(
- sysroot
- .join(if cfg!(windows) { "bin" } else { "lib" })
- .join(
- env::consts::DLL_PREFIX.to_string()
- + "rustc_codegen_cranelift"
- + env::consts::DLL_SUFFIX,
- )
- .to_str()
- .unwrap(),
+ let dylib = sysroot.join(if cfg!(windows) { "bin" } else { "lib" }).join(
+ env::consts::DLL_PREFIX.to_string()
+ + "rustc_codegen_cranelift"
+ + env::consts::DLL_SUFFIX,
);
+ rustflags.push(format!("-Zcodegen-backend={}", dylib.to_str().unwrap()));
}
- rustflags.push_str(" --sysroot ");
- rustflags.push_str(sysroot.to_str().unwrap());
- env::set_var("RUSTFLAGS", env::var("RUSTFLAGS").unwrap_or(String::new()) + &rustflags);
- env::set_var("RUSTDOCFLAGS", env::var("RUSTDOCFLAGS").unwrap_or(String::new()) + &rustflags);
+ rustflags.push("--sysroot".to_owned());
+ rustflags.push(sysroot.to_str().unwrap().to_owned());
let cargo = if let Some(cargo) = option_env!("CARGO") {
cargo
@@ -40,14 +34,19 @@ fn main() {
"cargo"
};
- let args: Vec<_> = match env::args().nth(1).as_deref() {
+ let mut args = env::args().skip(1).collect::<Vec<_>>();
+ if args.get(0).map(|arg| &**arg) == Some("clif") {
+ // Avoid infinite recursion when invoking `cargo-clif` as cargo subcommand using
+ // `cargo clif`.
+ args.remove(0);
+ }
+
+ let args: Vec<_> = match args.get(0).map(|arg| &**arg) {
Some("jit") => {
- env::set_var(
- "RUSTFLAGS",
- env::var("RUSTFLAGS").unwrap_or(String::new()) + " -Cprefer-dynamic",
- );
+ rustflags.push("-Cprefer-dynamic".to_owned());
+ args.remove(0);
IntoIterator::into_iter(["rustc".to_string()])
- .chain(env::args().skip(2))
+ .chain(args)
.chain([
"--".to_string(),
"-Zunstable-options".to_string(),
@@ -56,12 +55,10 @@ fn main() {
.collect()
}
Some("lazy-jit") => {
- env::set_var(
- "RUSTFLAGS",
- env::var("RUSTFLAGS").unwrap_or(String::new()) + " -Cprefer-dynamic",
- );
+ rustflags.push("-Cprefer-dynamic".to_owned());
+ args.remove(0);
IntoIterator::into_iter(["rustc".to_string()])
- .chain(env::args().skip(2))
+ .chain(args)
.chain([
"--".to_string(),
"-Zunstable-options".to_string(),
@@ -69,14 +66,31 @@ fn main() {
])
.collect()
}
- _ => env::args().skip(1).collect(),
+ _ => args,
};
+ let mut cmd = Command::new(cargo);
+ cmd.args(args);
+ rustflags_to_cmd_env(
+ &mut cmd,
+ "RUSTFLAGS",
+ &rustflags_from_env("RUSTFLAGS")
+ .into_iter()
+ .chain(rustflags.iter().map(|flag| flag.clone()))
+ .collect::<Vec<_>>(),
+ );
+ rustflags_to_cmd_env(
+ &mut cmd,
+ "RUSTDOCFLAGS",
+ &rustflags_from_env("RUSTDOCFLAGS")
+ .into_iter()
+ .chain(rustflags.iter().map(|flag| flag.clone()))
+ .collect::<Vec<_>>(),
+ );
+
#[cfg(unix)]
- panic!("Failed to spawn cargo: {}", Command::new(cargo).args(args).exec());
+ panic!("Failed to spawn cargo: {}", cmd.exec());
#[cfg(not(unix))]
- std::process::exit(
- Command::new(cargo).args(args).spawn().unwrap().wait().unwrap().code().unwrap_or(1),
- );
+ std::process::exit(cmd.spawn().unwrap().wait().unwrap().code().unwrap_or(1));
}
diff --git a/compiler/rustc_codegen_cranelift/scripts/setup_rust_fork.sh b/compiler/rustc_codegen_cranelift/scripts/setup_rust_fork.sh
index 15b16b42b..e6bbac647 100644
--- a/compiler/rustc_codegen_cranelift/scripts/setup_rust_fork.sh
+++ b/compiler/rustc_codegen_cranelift/scripts/setup_rust_fork.sh
@@ -10,7 +10,8 @@ git fetch
git checkout -- .
git checkout "$(rustc -V | cut -d' ' -f3 | tr -d '(')"
-git -c user.name=Dummy -c user.email=dummy@example.com am ../patches/*-stdlib-*.patch
+git -c user.name=Dummy -c user.email=dummy@example.com -c commit.gpgSign=false \
+ am ../patches/*-stdlib-*.patch
git apply - <<EOF
diff --git a/library/alloc/Cargo.toml b/library/alloc/Cargo.toml
@@ -51,7 +52,7 @@ popd
# FIXME remove once inline asm is fully supported
export RUSTFLAGS="$RUSTFLAGS --cfg=rustix_use_libc"
-export CFG_VIRTUAL_RUST_SOURCE_BASE_DIR="$(cd download/sysroot/sysroot_src; pwd)"
+export CFG_VIRTUAL_RUST_SOURCE_BASE_DIR="$(cd build/stdlib; pwd)"
# Allow the testsuite to use llvm tools
host_triple=$(rustc -vV | grep host | cut -d: -f2 | tr -d " ")
diff --git a/compiler/rustc_codegen_cranelift/scripts/test_rustc_tests.sh b/compiler/rustc_codegen_cranelift/scripts/test_rustc_tests.sh
index a7920cc54..c163b8543 100755
--- a/compiler/rustc_codegen_cranelift/scripts/test_rustc_tests.sh
+++ b/compiler/rustc_codegen_cranelift/scripts/test_rustc_tests.sh
@@ -32,6 +32,8 @@ rm tests/ui/parser/unclosed-delimiter-in-dep.rs # submodule contains //~ERROR
# missing features
# ================
+rm -r tests/run-make/comment-section # cg_clif doesn't yet write the .comment section
+
# requires stack unwinding
# FIXME add needs-unwind to this test
rm -r tests/run-make/libtest-junit
@@ -47,6 +49,8 @@ rm tests/ui/proc-macro/allowed-signatures.rs
# vendor intrinsics
rm tests/ui/sse2.rs # cpuid not supported, so sse2 not detected
rm tests/ui/simd/array-type.rs # "Index argument for `simd_insert` is not a constant"
+rm tests/ui/simd/intrinsic/generic-bswap-byte.rs # simd_bswap not yet implemented
+rm tests/ui/simd/intrinsic/generic-arithmetic-pass.rs # many missing simd intrinsics
# exotic linkages
rm tests/ui/issues/issue-33992.rs # unsupported linkages
@@ -98,8 +102,11 @@ rm -r tests/run-make/sepcomp-inlining # same
rm -r tests/run-make/sepcomp-separate # same
rm -r tests/run-make/sepcomp-cci-copies # same
rm -r tests/run-make/volatile-intrinsics # same
+rm -r tests/run-make/llvm-ident # same
+rm -r tests/run-make/no-builtins-attribute # same
rm tests/ui/abi/stack-protector.rs # requires stack protector support
rm -r tests/run-make/emit-stack-sizes # requires support for -Z emit-stack-sizes
+rm -r tests/run-make/optimization-remarks-dir # remarks are LLVM specific
# giving different but possibly correct results
# =============================================
@@ -118,6 +125,9 @@ rm tests/ui/suggestions/derive-trait-for-method-call.rs # same
rm tests/ui/typeck/issue-46112.rs # same
rm tests/ui/consts/const_cmp_type_id.rs # same
rm tests/ui/consts/issue-73976-monomorphic.rs # same
+rm tests/ui/rfcs/rfc-3348-c-string-literals/non-ascii.rs # same
+rm tests/ui/consts/const-eval/nonnull_as_ref_ub.rs # same
+rm tests/ui/consts/issue-94675.rs # same
# rustdoc-clif passes extra args, suppressing the help message when no args are passed
rm -r tests/run-make/issue-88756-default-output
@@ -143,6 +153,8 @@ rm -r tests/run-make/used # same
rm -r tests/run-make/no-alloc-shim
rm -r tests/run-make/emit-to-stdout
+rm -r tests/run-make/extern-fn-explicit-align # argument alignment not yet supported
+
# bugs in the test suite
# ======================
rm tests/ui/backtrace.rs # TODO warning
@@ -162,7 +174,7 @@ index ea06b620c4c..b969d0009c6 100644
@@ -9,7 +9,7 @@ RUSTC_ORIGINAL := \$(RUSTC)
BARE_RUSTC := \$(HOST_RPATH_ENV) '\$(RUSTC)'
BARE_RUSTDOC := \$(HOST_RPATH_ENV) '\$(RUSTDOC)'
- RUSTC := \$(BARE_RUSTC) --out-dir \$(TMPDIR) -L \$(TMPDIR) \$(RUSTFLAGS)
+ RUSTC := \$(BARE_RUSTC) --out-dir \$(TMPDIR) -L \$(TMPDIR) \$(RUSTFLAGS) -Ainternal_features
-RUSTDOC := \$(BARE_RUSTDOC) -L \$(TARGET_RPATH_DIR)
+RUSTDOC := \$(BARE_RUSTDOC)
ifdef RUSTC_LINKER
diff --git a/compiler/rustc_codegen_cranelift/src/abi/comments.rs b/compiler/rustc_codegen_cranelift/src/abi/comments.rs
index 364503fd3..ade6968de 100644
--- a/compiler/rustc_codegen_cranelift/src/abi/comments.rs
+++ b/compiler/rustc_codegen_cranelift/src/abi/comments.rs
@@ -80,14 +80,7 @@ pub(super) fn add_local_place_comments<'tcx>(
return;
}
let TyAndLayout { ty, layout } = place.layout();
- let rustc_target::abi::LayoutS {
- size,
- align,
- abi: _,
- variants: _,
- fields: _,
- largest_niche: _,
- } = layout.0.0;
+ let rustc_target::abi::LayoutS { size, align, .. } = layout.0.0;
let (kind, extra) = place.debug_comment();
diff --git a/compiler/rustc_codegen_cranelift/src/abi/mod.rs b/compiler/rustc_codegen_cranelift/src/abi/mod.rs
index 199fa6861..b7f56a298 100644
--- a/compiler/rustc_codegen_cranelift/src/abi/mod.rs
+++ b/compiler/rustc_codegen_cranelift/src/abi/mod.rs
@@ -48,7 +48,9 @@ pub(crate) fn conv_to_call_conv(sess: &Session, c: Conv, default_call_conv: Call
default_call_conv
}
- Conv::X86Intr => sess.fatal("x86-interrupt call conv not yet implemented"),
+ Conv::X86Intr | Conv::RiscvInterrupt { .. } => {
+ sess.fatal(format!("interrupt call conv {c:?} not yet implemented"))
+ }
Conv::ArmAapcs => sess.fatal("aapcs call conv not yet implemented"),
Conv::CCmseNonSecureCall => {
@@ -70,7 +72,7 @@ pub(crate) fn get_function_sig<'tcx>(
default_call_conv: CallConv,
inst: Instance<'tcx>,
) -> Signature {
- assert!(!inst.substs.has_infer());
+ assert!(!inst.args.has_infer());
clif_sig_from_fn_abi(
tcx,
default_call_conv,
@@ -377,16 +379,16 @@ pub(crate) fn codegen_terminator_call<'tcx>(
let ret_place = codegen_place(fx, destination);
// Handle special calls like intrinsics and empty drop glue.
- let instance = if let ty::FnDef(def_id, substs) = *func.layout().ty.kind() {
+ let instance = if let ty::FnDef(def_id, fn_args) = *func.layout().ty.kind() {
let instance =
- ty::Instance::expect_resolve(fx.tcx, ty::ParamEnv::reveal_all(), def_id, substs)
+ ty::Instance::expect_resolve(fx.tcx, ty::ParamEnv::reveal_all(), def_id, fn_args)
.polymorphize(fx.tcx);
if fx.tcx.symbol_name(instance).name.starts_with("llvm.") {
crate::intrinsics::codegen_llvm_intrinsic_call(
fx,
&fx.tcx.symbol_name(instance).name,
- substs,
+ fn_args,
args,
ret_place,
target,
@@ -445,9 +447,14 @@ pub(crate) fn codegen_terminator_call<'tcx>(
// Unpack arguments tuple for closures
let mut args = if fn_sig.abi() == Abi::RustCall {
- assert_eq!(args.len(), 2, "rust-call abi requires two arguments");
- let self_arg = codegen_call_argument_operand(fx, &args[0]);
- let pack_arg = codegen_call_argument_operand(fx, &args[1]);
+ let (self_arg, pack_arg) = match args {
+ [pack_arg] => (None, codegen_call_argument_operand(fx, pack_arg)),
+ [self_arg, pack_arg] => (
+ Some(codegen_call_argument_operand(fx, self_arg)),
+ codegen_call_argument_operand(fx, pack_arg),
+ ),
+ _ => panic!("rust-call abi requires one or two arguments"),
+ };
let tupled_arguments = match pack_arg.value.layout().ty.kind() {
ty::Tuple(ref tupled_arguments) => tupled_arguments,
@@ -455,7 +462,7 @@ pub(crate) fn codegen_terminator_call<'tcx>(
};
let mut args = Vec::with_capacity(1 + tupled_arguments.len());
- args.push(self_arg);
+ args.extend(self_arg);
for i in 0..tupled_arguments.len() {
args.push(CallArgument {
value: pack_arg.value.value_field(fx, FieldIdx::new(i)),
@@ -611,7 +618,7 @@ pub(crate) fn codegen_drop<'tcx>(
// `Instance::resolve_drop_in_place`?
let virtual_drop = Instance {
def: ty::InstanceDef::Virtual(drop_instance.def_id(), 0),
- substs: drop_instance.substs,
+ args: drop_instance.args,
};
let fn_abi =
RevealAllLayoutCx(fx.tcx).fn_abi_of_instance(virtual_drop, ty::List::empty());
@@ -648,7 +655,7 @@ pub(crate) fn codegen_drop<'tcx>(
let virtual_drop = Instance {
def: ty::InstanceDef::Virtual(drop_instance.def_id(), 0),
- substs: drop_instance.substs,
+ args: drop_instance.args,
};
let fn_abi =
RevealAllLayoutCx(fx.tcx).fn_abi_of_instance(virtual_drop, ty::List::empty());
diff --git a/compiler/rustc_codegen_cranelift/src/allocator.rs b/compiler/rustc_codegen_cranelift/src/allocator.rs
index e92280b26..4e4c595de 100644
--- a/compiler/rustc_codegen_cranelift/src/allocator.rs
+++ b/compiler/rustc_codegen_cranelift/src/allocator.rs
@@ -39,8 +39,8 @@ fn codegen_inner(
if kind == AllocatorKind::Default {
for method in ALLOCATOR_METHODS {
let mut arg_tys = Vec::with_capacity(method.inputs.len());
- for ty in method.inputs.iter() {
- match *ty {
+ for input in method.inputs.iter() {
+ match input.ty {
AllocatorTy::Layout => {
arg_tys.push(usize_ty); // size
arg_tys.push(usize_ty); // align
diff --git a/compiler/rustc_codegen_cranelift/src/base.rs b/compiler/rustc_codegen_cranelift/src/base.rs
index 334b2780b..522dd7189 100644
--- a/compiler/rustc_codegen_cranelift/src/base.rs
+++ b/compiler/rustc_codegen_cranelift/src/base.rs
@@ -7,6 +7,8 @@ use rustc_middle::ty::layout::FnAbiOf;
use rustc_middle::ty::print::with_no_trimmed_paths;
use cranelift_codegen::ir::UserFuncName;
+use cranelift_codegen::CodegenError;
+use cranelift_module::ModuleError;
use crate::constant::ConstantCx;
use crate::debuginfo::FunctionDebugContext;
@@ -28,7 +30,7 @@ pub(crate) fn codegen_fn<'tcx>(
module: &mut dyn Module,
instance: Instance<'tcx>,
) -> CodegenedFunction {
- debug_assert!(!instance.substs.has_infer());
+ debug_assert!(!instance.args.has_infer());
let symbol_name = tcx.symbol_name(instance).name.to_string();
let _timer = tcx.prof.generic_activity_with_arg("codegen fn", &*symbol_name);
@@ -172,7 +174,21 @@ pub(crate) fn compile_fn(
// Define function
cx.profiler.generic_activity("define function").run(|| {
context.want_disasm = cx.should_write_ir;
- module.define_function(codegened_func.func_id, context).unwrap();
+ match module.define_function(codegened_func.func_id, context) {
+ Ok(()) => {}
+ Err(ModuleError::Compilation(CodegenError::ImplLimitExceeded)) => {
+ let handler = rustc_session::EarlyErrorHandler::new(
+ rustc_session::config::ErrorOutputType::default(),
+ );
+ handler.early_error(format!(
+ "backend implementation limit exceeded while compiling {name}",
+ name = codegened_func.symbol_name
+ ));
+ }
+ Err(err) => {
+ panic!("Error while defining {name}: {err:?}", name = codegened_func.symbol_name);
+ }
+ }
});
if cx.should_write_ir {
@@ -356,7 +372,7 @@ fn codegen_fn_body(fx: &mut FunctionCx<'_, '_, '_>, start_block: Block) {
codegen_panic_inner(
fx,
- rustc_hir::LangItem::PanicBoundsCheck,
+ rustc_hir::LangItem::PanicMisalignedPointerDereference,
&[required, found, location],
source_info.span,
);
@@ -578,13 +594,13 @@ fn codegen_stmt<'tcx>(
let from_ty = fx.monomorphize(operand.ty(&fx.mir.local_decls, fx.tcx));
let to_layout = fx.layout_of(fx.monomorphize(to_ty));
match *from_ty.kind() {
- ty::FnDef(def_id, substs) => {
+ ty::FnDef(def_id, args) => {
let func_ref = fx.get_function_ref(
Instance::resolve_for_fn_ptr(
fx.tcx,
ParamEnv::reveal_all(),
def_id,
- substs,
+ args,
)
.unwrap()
.polymorphize(fx.tcx),
@@ -668,11 +684,11 @@ fn codegen_stmt<'tcx>(
) => {
let operand = codegen_operand(fx, operand);
match *operand.layout().ty.kind() {
- ty::Closure(def_id, substs) => {
+ ty::Closure(def_id, args) => {
let instance = Instance::resolve_closure(
fx.tcx,
def_id,
- substs,
+ args,
ty::ClosureKind::FnOnce,
)
.expect("failed to normalize and resolve closure during codegen")
diff --git a/compiler/rustc_codegen_cranelift/src/common.rs b/compiler/rustc_codegen_cranelift/src/common.rs
index 67ea20112..3081dcfa2 100644
--- a/compiler/rustc_codegen_cranelift/src/common.rs
+++ b/compiler/rustc_codegen_cranelift/src/common.rs
@@ -477,7 +477,7 @@ impl<'tcx> LayoutOfHelpers<'tcx> for RevealAllLayoutCx<'tcx> {
#[inline]
fn handle_layout_err(&self, err: LayoutError<'tcx>, span: Span, ty: Ty<'tcx>) -> ! {
- if let layout::LayoutError::SizeOverflow(_) = err {
+ if let LayoutError::SizeOverflow(_) | LayoutError::ReferencesError(_) = err {
self.0.sess.span_fatal(span, err.to_string())
} else {
span_bug!(span, "failed to get layout for `{}`: {}", ty, err)
diff --git a/compiler/rustc_codegen_cranelift/src/constant.rs b/compiler/rustc_codegen_cranelift/src/constant.rs
index 427340c33..c31535742 100644
--- a/compiler/rustc_codegen_cranelift/src/constant.rs
+++ b/compiler/rustc_codegen_cranelift/src/constant.rs
@@ -57,7 +57,7 @@ pub(crate) fn codegen_tls_ref<'tcx>(
let tls_ptr = if !def_id.is_local() && fx.tcx.needs_thread_local_shim(def_id) {
let instance = ty::Instance {
def: ty::InstanceDef::ThreadLocalShim(def_id),
- substs: ty::InternalSubsts::empty(),
+ args: ty::GenericArgs::empty(),
};
let func_ref = fx.get_function_ref(instance);
let call = fx.bcx.ins().call(func_ref, &[]);
diff --git a/compiler/rustc_codegen_cranelift/src/debuginfo/line_info.rs b/compiler/rustc_codegen_cranelift/src/debuginfo/line_info.rs
index 1b454b666..50bc7a127 100644
--- a/compiler/rustc_codegen_cranelift/src/debuginfo/line_info.rs
+++ b/compiler/rustc_codegen_cranelift/src/debuginfo/line_info.rs
@@ -165,7 +165,7 @@ impl FunctionDebugContext {
for &MachSrcLoc { start, end, loc } in mcr.buffer.get_srclocs_sorted() {
debug_context.dwarf.unit.line_program.row().address_offset = u64::from(start);
if !loc.is_default() {
- let source_loc = *self.source_loc_set.get_index(loc.bits() as usize).unwrap();
+ let source_loc = self.source_loc_set[loc.bits() as usize];
create_row_for_span(debug_context, source_loc);
} else {
create_row_for_span(debug_context, self.function_source_loc);
diff --git a/compiler/rustc_codegen_cranelift/src/debuginfo/mod.rs b/compiler/rustc_codegen_cranelift/src/debuginfo/mod.rs
index 3a7421d8b..8a4b1cccf 100644
--- a/compiler/rustc_codegen_cranelift/src/debuginfo/mod.rs
+++ b/compiler/rustc_codegen_cranelift/src/debuginfo/mod.rs
@@ -38,7 +38,7 @@ pub(crate) struct DebugContext {
pub(crate) struct FunctionDebugContext {
entry_id: UnitEntryId,
function_source_loc: (FileId, u64, u64),
- source_loc_set: indexmap::IndexSet<(FileId, u64, u64)>,
+ source_loc_set: IndexSet<(FileId, u64, u64)>,
}
impl DebugContext {
diff --git a/compiler/rustc_codegen_cranelift/src/driver/jit.rs b/compiler/rustc_codegen_cranelift/src/driver/jit.rs
index 41e24acef..1c606494f 100644
--- a/compiler/rustc_codegen_cranelift/src/driver/jit.rs
+++ b/compiler/rustc_codegen_cranelift/src/driver/jit.rs
@@ -98,7 +98,7 @@ pub(crate) fn run_jit(tcx: TyCtxt<'_>, backend_config: BackendConfig) -> ! {
tcx.sess.fatal("JIT mode doesn't work with `cargo check`");
}
- if !tcx.sess.crate_types().contains(&rustc_session::config::CrateType::Executable) {
+ if !tcx.crate_types().contains(&rustc_session::config::CrateType::Executable) {
tcx.sess.fatal("can't jit non-executable crate");
}
@@ -114,9 +114,9 @@ pub(crate) fn run_jit(tcx: TyCtxt<'_>, backend_config: BackendConfig) -> ! {
.iter()
.map(|cgu| cgu.items_in_deterministic_order(tcx).into_iter())
.flatten()
- .collect::<FxHashMap<_, (_, _)>>()
+ .collect::<FxHashMap<_, _>>()
.into_iter()
- .collect::<Vec<(_, (_, _))>>();
+ .collect::<Vec<(_, _)>>();
tcx.sess.time("codegen mono items", || {
super::predefine_mono_items(tcx, &mut jit_module, &mono_items);
diff --git a/compiler/rustc_codegen_cranelift/src/driver/mod.rs b/compiler/rustc_codegen_cranelift/src/driver/mod.rs
index 5c52c9c18..12e90b584 100644
--- a/compiler/rustc_codegen_cranelift/src/driver/mod.rs
+++ b/compiler/rustc_codegen_cranelift/src/driver/mod.rs
@@ -5,7 +5,7 @@
//! [`codegen_static`]: crate::constant::codegen_static
use rustc_data_structures::profiling::SelfProfilerRef;
-use rustc_middle::mir::mono::{Linkage as RLinkage, MonoItem, Visibility};
+use rustc_middle::mir::mono::{MonoItem, MonoItemData};
use crate::prelude::*;
@@ -16,11 +16,11 @@ pub(crate) mod jit;
fn predefine_mono_items<'tcx>(
tcx: TyCtxt<'tcx>,
module: &mut dyn Module,
- mono_items: &[(MonoItem<'tcx>, (RLinkage, Visibility))],
+ mono_items: &[(MonoItem<'tcx>, MonoItemData)],
) {
tcx.prof.generic_activity("predefine functions").run(|| {
let is_compiler_builtins = tcx.is_compiler_builtins(LOCAL_CRATE);
- for &(mono_item, (linkage, visibility)) in mono_items {
+ for &(mono_item, data) in mono_items {
match mono_item {
MonoItem::Fn(instance) => {
let name = tcx.symbol_name(instance).name;
@@ -29,8 +29,8 @@ fn predefine_mono_items<'tcx>(
get_function_sig(tcx, module.target_config().default_call_conv, instance);
let linkage = crate::linkage::get_clif_linkage(
mono_item,
- linkage,
- visibility,
+ data.linkage,
+ data.visibility,
is_compiler_builtins,
);
module.declare_function(name, linkage, &sig).unwrap();
diff --git a/compiler/rustc_codegen_cranelift/src/global_asm.rs b/compiler/rustc_codegen_cranelift/src/global_asm.rs
index 63a1f6959..baadd7a9e 100644
--- a/compiler/rustc_codegen_cranelift/src/global_asm.rs
+++ b/compiler/rustc_codegen_cranelift/src/global_asm.rs
@@ -42,7 +42,7 @@ pub(crate) fn codegen_global_asm_item(tcx: TyCtxt<'_>, global_asm: &mut String,
InlineAsmOperand::SymFn { anon_const } => {
let ty = tcx.typeck_body(anon_const.body).node_type(anon_const.hir_id);
let instance = match ty.kind() {
- &ty::FnDef(def_id, substs) => Instance::new(def_id, substs),
+ &ty::FnDef(def_id, args) => Instance::new(def_id, args),
_ => span_bug!(op_sp, "asm sym is not a function"),
};
let symbol = tcx.symbol_name(instance);
diff --git a/compiler/rustc_codegen_cranelift/src/inline_asm.rs b/compiler/rustc_codegen_cranelift/src/inline_asm.rs
index 3ba530c04..518e3da07 100644
--- a/compiler/rustc_codegen_cranelift/src/inline_asm.rs
+++ b/compiler/rustc_codegen_cranelift/src/inline_asm.rs
@@ -254,12 +254,12 @@ pub(crate) fn codegen_inline_asm<'tcx>(
}
InlineAsmOperand::SymFn { ref value } => {
let literal = fx.monomorphize(value.literal);
- if let ty::FnDef(def_id, substs) = *literal.ty().kind() {
+ if let ty::FnDef(def_id, args) = *literal.ty().kind() {
let instance = ty::Instance::resolve_for_fn_ptr(
fx.tcx,
ty::ParamEnv::reveal_all(),
def_id,
- substs,
+ args,
)
.unwrap();
let symbol = fx.tcx.symbol_name(instance);
diff --git a/compiler/rustc_codegen_cranelift/src/intrinsics/llvm.rs b/compiler/rustc_codegen_cranelift/src/intrinsics/llvm.rs
index f67fdb592..63b5402f2 100644
--- a/compiler/rustc_codegen_cranelift/src/intrinsics/llvm.rs
+++ b/compiler/rustc_codegen_cranelift/src/intrinsics/llvm.rs
@@ -3,23 +3,35 @@
use crate::intrinsics::*;
use crate::prelude::*;
-use rustc_middle::ty::subst::SubstsRef;
+use rustc_middle::ty::GenericArgsRef;
pub(crate) fn codegen_llvm_intrinsic_call<'tcx>(
fx: &mut FunctionCx<'_, '_, 'tcx>,
intrinsic: &str,
- substs: SubstsRef<'tcx>,
+ generic_args: GenericArgsRef<'tcx>,
args: &[mir::Operand<'tcx>],
ret: CPlace<'tcx>,
target: Option<BasicBlock>,
) {
if intrinsic.starts_with("llvm.aarch64") {
return llvm_aarch64::codegen_aarch64_llvm_intrinsic_call(
- fx, intrinsic, substs, args, ret, target,
+ fx,
+ intrinsic,
+ generic_args,
+ args,
+ ret,
+ target,
);
}
if intrinsic.starts_with("llvm.x86") {
- return llvm_x86::codegen_x86_llvm_intrinsic_call(fx, intrinsic, substs, args, ret, target);
+ return llvm_x86::codegen_x86_llvm_intrinsic_call(
+ fx,
+ intrinsic,
+ generic_args,
+ args,
+ ret,
+ target,
+ );
}
match intrinsic {
diff --git a/compiler/rustc_codegen_cranelift/src/intrinsics/llvm_aarch64.rs b/compiler/rustc_codegen_cranelift/src/intrinsics/llvm_aarch64.rs
index 33b2f4702..c20a99159 100644
--- a/compiler/rustc_codegen_cranelift/src/intrinsics/llvm_aarch64.rs
+++ b/compiler/rustc_codegen_cranelift/src/intrinsics/llvm_aarch64.rs
@@ -3,12 +3,12 @@
use crate::intrinsics::*;
use crate::prelude::*;
-use rustc_middle::ty::subst::SubstsRef;
+use rustc_middle::ty::GenericArgsRef;
pub(crate) fn codegen_aarch64_llvm_intrinsic_call<'tcx>(
fx: &mut FunctionCx<'_, '_, 'tcx>,
intrinsic: &str,
- _substs: SubstsRef<'tcx>,
+ _args: GenericArgsRef<'tcx>,
args: &[mir::Operand<'tcx>],
ret: CPlace<'tcx>,
target: Option<BasicBlock>,
diff --git a/compiler/rustc_codegen_cranelift/src/intrinsics/llvm_x86.rs b/compiler/rustc_codegen_cranelift/src/intrinsics/llvm_x86.rs
index 24ad0083a..fdd27a454 100644
--- a/compiler/rustc_codegen_cranelift/src/intrinsics/llvm_x86.rs
+++ b/compiler/rustc_codegen_cranelift/src/intrinsics/llvm_x86.rs
@@ -3,12 +3,12 @@
use crate::intrinsics::*;
use crate::prelude::*;
-use rustc_middle::ty::subst::SubstsRef;
+use rustc_middle::ty::GenericArgsRef;
pub(crate) fn codegen_x86_llvm_intrinsic_call<'tcx>(
fx: &mut FunctionCx<'_, '_, 'tcx>,
intrinsic: &str,
- _substs: SubstsRef<'tcx>,
+ _args: GenericArgsRef<'tcx>,
args: &[mir::Operand<'tcx>],
ret: CPlace<'tcx>,
target: Option<BasicBlock>,
@@ -18,6 +18,20 @@ pub(crate) fn codegen_x86_llvm_intrinsic_call<'tcx>(
// Spin loop hint
}
+ // Used by is_x86_feature_detected!();
+ "llvm.x86.xgetbv" => {
+ // FIXME use the actual xgetbv instruction
+ intrinsic_args!(fx, args => (v); intrinsic);
+
+ let v = v.load_scalar(fx);
+
+ // As of writing on XCR0 exists
+ fx.bcx.ins().trapnz(v, TrapCode::UnreachableCodeReached);
+
+ let res = fx.bcx.ins().iconst(types::I64, 1 /* bit 0 must be set */);
+ ret.write_cvalue(fx, CValue::by_val(res, fx.layout_of(fx.tcx.types.i64)));
+ }
+
// Used by `_mm_movemask_epi8` and `_mm256_movemask_epi8`
"llvm.x86.sse2.pmovmskb.128"
| "llvm.x86.avx2.pmovmskb"
@@ -53,7 +67,7 @@ pub(crate) fn codegen_x86_llvm_intrinsic_call<'tcx>(
let res = CValue::by_val(res, fx.layout_of(fx.tcx.types.i32));
ret.write_cvalue(fx, res);
}
- "llvm.x86.sse2.cmp.ps" | "llvm.x86.sse2.cmp.pd" => {
+ "llvm.x86.sse.cmp.ps" | "llvm.x86.sse2.cmp.pd" => {
let (x, y, kind) = match args {
[x, y, kind] => (x, y, kind),
_ => bug!("wrong number of args for intrinsic {intrinsic}"),
@@ -66,18 +80,95 @@ pub(crate) fn codegen_x86_llvm_intrinsic_call<'tcx>(
let flt_cc = match kind
.try_to_bits(Size::from_bytes(1))
.unwrap_or_else(|| panic!("kind not scalar: {:?}", kind))
+ .try_into()
+ .unwrap()
{
- 0 => FloatCC::Equal,
- 1 => FloatCC::LessThan,
- 2 => FloatCC::LessThanOrEqual,
- 7 => FloatCC::Ordered,
- 3 => FloatCC::Unordered,
- 4 => FloatCC::NotEqual,
- 5 => FloatCC::UnorderedOrGreaterThanOrEqual,
- 6 => FloatCC::UnorderedOrGreaterThan,
+ _CMP_EQ_OQ | _CMP_EQ_OS => FloatCC::Equal,
+ _CMP_LT_OS | _CMP_LT_OQ => FloatCC::LessThan,
+ _CMP_LE_OS | _CMP_LE_OQ => FloatCC::LessThanOrEqual,
+ _CMP_UNORD_Q | _CMP_UNORD_S => FloatCC::Unordered,
+ _CMP_NEQ_UQ | _CMP_NEQ_US => FloatCC::NotEqual,
+ _CMP_NLT_US | _CMP_NLT_UQ => FloatCC::UnorderedOrGreaterThanOrEqual,
+ _CMP_NLE_US | _CMP_NLE_UQ => FloatCC::UnorderedOrGreaterThan,
+ _CMP_ORD_Q | _CMP_ORD_S => FloatCC::Ordered,
+ _CMP_EQ_UQ | _CMP_EQ_US => FloatCC::UnorderedOrEqual,
+ _CMP_NGE_US | _CMP_NGE_UQ => FloatCC::UnorderedOrLessThan,
+ _CMP_NGT_US | _CMP_NGT_UQ => FloatCC::UnorderedOrLessThanOrEqual,
+ _CMP_FALSE_OQ | _CMP_FALSE_OS => todo!(),
+ _CMP_NEQ_OQ | _CMP_NEQ_OS => FloatCC::OrderedNotEqual,
+ _CMP_GE_OS | _CMP_GE_OQ => FloatCC::GreaterThanOrEqual,
+ _CMP_GT_OS | _CMP_GT_OQ => FloatCC::GreaterThan,
+ _CMP_TRUE_UQ | _CMP_TRUE_US => todo!(),
+
kind => unreachable!("kind {:?}", kind),
};
+ // Copied from stdarch
+ /// Equal (ordered, non-signaling)
+ const _CMP_EQ_OQ: i32 = 0x00;
+ /// Less-than (ordered, signaling)
+ const _CMP_LT_OS: i32 = 0x01;
+ /// Less-than-or-equal (ordered, signaling)
+ const _CMP_LE_OS: i32 = 0x02;
+ /// Unordered (non-signaling)
+ const _CMP_UNORD_Q: i32 = 0x03;
+ /// Not-equal (unordered, non-signaling)
+ const _CMP_NEQ_UQ: i32 = 0x04;
+ /// Not-less-than (unordered, signaling)
+ const _CMP_NLT_US: i32 = 0x05;
+ /// Not-less-than-or-equal (unordered, signaling)
+ const _CMP_NLE_US: i32 = 0x06;
+ /// Ordered (non-signaling)
+ const _CMP_ORD_Q: i32 = 0x07;
+ /// Equal (unordered, non-signaling)
+ const _CMP_EQ_UQ: i32 = 0x08;
+ /// Not-greater-than-or-equal (unordered, signaling)
+ const _CMP_NGE_US: i32 = 0x09;
+ /// Not-greater-than (unordered, signaling)
+ const _CMP_NGT_US: i32 = 0x0a;
+ /// False (ordered, non-signaling)
+ const _CMP_FALSE_OQ: i32 = 0x0b;
+ /// Not-equal (ordered, non-signaling)
+ const _CMP_NEQ_OQ: i32 = 0x0c;
+ /// Greater-than-or-equal (ordered, signaling)
+ const _CMP_GE_OS: i32 = 0x0d;
+ /// Greater-than (ordered, signaling)
+ const _CMP_GT_OS: i32 = 0x0e;
+ /// True (unordered, non-signaling)
+ const _CMP_TRUE_UQ: i32 = 0x0f;
+ /// Equal (ordered, signaling)
+ const _CMP_EQ_OS: i32 = 0x10;
+ /// Less-than (ordered, non-signaling)
+ const _CMP_LT_OQ: i32 = 0x11;
+ /// Less-than-or-equal (ordered, non-signaling)
+ const _CMP_LE_OQ: i32 = 0x12;
+ /// Unordered (signaling)
+ const _CMP_UNORD_S: i32 = 0x13;
+ /// Not-equal (unordered, signaling)
+ const _CMP_NEQ_US: i32 = 0x14;
+ /// Not-less-than (unordered, non-signaling)
+ const _CMP_NLT_UQ: i32 = 0x15;
+ /// Not-less-than-or-equal (unordered, non-signaling)
+ const _CMP_NLE_UQ: i32 = 0x16;
+ /// Ordered (signaling)
+ const _CMP_ORD_S: i32 = 0x17;
+ /// Equal (unordered, signaling)
+ const _CMP_EQ_US: i32 = 0x18;
+ /// Not-greater-than-or-equal (unordered, non-signaling)
+ const _CMP_NGE_UQ: i32 = 0x19;
+ /// Not-greater-than (unordered, non-signaling)
+ const _CMP_NGT_UQ: i32 = 0x1a;
+ /// False (ordered, signaling)
+ const _CMP_FALSE_OS: i32 = 0x1b;
+ /// Not-equal (ordered, signaling)
+ const _CMP_NEQ_OS: i32 = 0x1c;
+ /// Greater-than-or-equal (ordered, non-signaling)
+ const _CMP_GE_OQ: i32 = 0x1d;
+ /// Greater-than (ordered, non-signaling)
+ const _CMP_GT_OQ: i32 = 0x1e;
+ /// True (unordered, signaling)
+ const _CMP_TRUE_US: i32 = 0x1f;
+
simd_pair_for_each_lane(fx, x, y, ret, &|fx, lane_ty, res_lane_ty, x_lane, y_lane| {
let res_lane = match lane_ty.kind() {
ty::Float(_) => fx.bcx.ins().fcmp(flt_cc, x_lane, y_lane),
@@ -103,6 +194,23 @@ pub(crate) fn codegen_x86_llvm_intrinsic_call<'tcx>(
_ => fx.bcx.ins().iconst(types::I32, 0),
});
}
+ "llvm.x86.sse2.psrai.d" => {
+ let (a, imm8) = match args {
+ [a, imm8] => (a, imm8),
+ _ => bug!("wrong number of args for intrinsic {intrinsic}"),
+ };
+ let a = codegen_operand(fx, a);
+ let imm8 = crate::constant::mir_operand_get_const_val(fx, imm8)
+ .expect("llvm.x86.sse2.psrai.d imm8 not const");
+
+ simd_for_each_lane(fx, a, ret, &|fx, _lane_ty, _res_lane_ty, lane| match imm8
+ .try_to_bits(Size::from_bytes(4))
+ .unwrap_or_else(|| panic!("imm8 not scalar: {:?}", imm8))
+ {
+ imm8 if imm8 < 32 => fx.bcx.ins().sshr_imm(lane, i64::from(imm8 as u8)),
+ _ => fx.bcx.ins().iconst(types::I32, 0),
+ });
+ }
"llvm.x86.sse2.pslli.d" => {
let (a, imm8) = match args {
[a, imm8] => (a, imm8),
@@ -137,6 +245,23 @@ pub(crate) fn codegen_x86_llvm_intrinsic_call<'tcx>(
_ => fx.bcx.ins().iconst(types::I32, 0),
});
}
+ "llvm.x86.sse2.psrai.w" => {
+ let (a, imm8) = match args {
+ [a, imm8] => (a, imm8),
+ _ => bug!("wrong number of args for intrinsic {intrinsic}"),
+ };
+ let a = codegen_operand(fx, a);
+ let imm8 = crate::constant::mir_operand_get_const_val(fx, imm8)
+ .expect("llvm.x86.sse2.psrai.d imm8 not const");
+
+ simd_for_each_lane(fx, a, ret, &|fx, _lane_ty, _res_lane_ty, lane| match imm8
+ .try_to_bits(Size::from_bytes(4))
+ .unwrap_or_else(|| panic!("imm8 not scalar: {:?}", imm8))
+ {
+ imm8 if imm8 < 16 => fx.bcx.ins().sshr_imm(lane, i64::from(imm8 as u8)),
+ _ => fx.bcx.ins().iconst(types::I32, 0),
+ });
+ }
"llvm.x86.sse2.pslli.w" => {
let (a, imm8) = match args {
[a, imm8] => (a, imm8),
@@ -171,6 +296,57 @@ pub(crate) fn codegen_x86_llvm_intrinsic_call<'tcx>(
_ => fx.bcx.ins().iconst(types::I32, 0),
});
}
+ "llvm.x86.avx.psrai.d" => {
+ let (a, imm8) = match args {
+ [a, imm8] => (a, imm8),
+ _ => bug!("wrong number of args for intrinsic {intrinsic}"),
+ };
+ let a = codegen_operand(fx, a);
+ let imm8 = crate::constant::mir_operand_get_const_val(fx, imm8)
+ .expect("llvm.x86.avx.psrai.d imm8 not const");
+
+ simd_for_each_lane(fx, a, ret, &|fx, _lane_ty, _res_lane_ty, lane| match imm8
+ .try_to_bits(Size::from_bytes(4))
+ .unwrap_or_else(|| panic!("imm8 not scalar: {:?}", imm8))
+ {
+ imm8 if imm8 < 32 => fx.bcx.ins().sshr_imm(lane, i64::from(imm8 as u8)),
+ _ => fx.bcx.ins().iconst(types::I32, 0),
+ });
+ }
+ "llvm.x86.sse2.psrli.q" => {
+ let (a, imm8) = match args {
+ [a, imm8] => (a, imm8),
+ _ => bug!("wrong number of args for intrinsic {intrinsic}"),
+ };
+ let a = codegen_operand(fx, a);
+ let imm8 = crate::constant::mir_operand_get_const_val(fx, imm8)
+ .expect("llvm.x86.avx.psrli.q imm8 not const");
+
+ simd_for_each_lane(fx, a, ret, &|fx, _lane_ty, _res_lane_ty, lane| match imm8
+ .try_to_bits(Size::from_bytes(4))
+ .unwrap_or_else(|| panic!("imm8 not scalar: {:?}", imm8))
+ {
+ imm8 if imm8 < 64 => fx.bcx.ins().ushr_imm(lane, i64::from(imm8 as u8)),
+ _ => fx.bcx.ins().iconst(types::I32, 0),
+ });
+ }
+ "llvm.x86.sse2.pslli.q" => {
+ let (a, imm8) = match args {
+ [a, imm8] => (a, imm8),
+ _ => bug!("wrong number of args for intrinsic {intrinsic}"),
+ };
+ let a = codegen_operand(fx, a);
+ let imm8 = crate::constant::mir_operand_get_const_val(fx, imm8)
+ .expect("llvm.x86.avx.pslli.q imm8 not const");
+
+ simd_for_each_lane(fx, a, ret, &|fx, _lane_ty, _res_lane_ty, lane| match imm8
+ .try_to_bits(Size::from_bytes(4))
+ .unwrap_or_else(|| panic!("imm8 not scalar: {:?}", imm8))
+ {
+ imm8 if imm8 < 64 => fx.bcx.ins().ishl_imm(lane, i64::from(imm8 as u8)),
+ _ => fx.bcx.ins().iconst(types::I32, 0),
+ });
+ }
"llvm.x86.avx.pslli.d" => {
let (a, imm8) = match args {
[a, imm8] => (a, imm8),
@@ -205,6 +381,23 @@ pub(crate) fn codegen_x86_llvm_intrinsic_call<'tcx>(
_ => fx.bcx.ins().iconst(types::I32, 0),
});
}
+ "llvm.x86.avx2.psrai.w" => {
+ let (a, imm8) = match args {
+ [a, imm8] => (a, imm8),
+ _ => bug!("wrong number of args for intrinsic {intrinsic}"),
+ };
+ let a = codegen_operand(fx, a);
+ let imm8 = crate::constant::mir_operand_get_const_val(fx, imm8)
+ .expect("llvm.x86.avx.psrai.w imm8 not const");
+
+ simd_for_each_lane(fx, a, ret, &|fx, _lane_ty, _res_lane_ty, lane| match imm8
+ .try_to_bits(Size::from_bytes(4))
+ .unwrap_or_else(|| panic!("imm8 not scalar: {:?}", imm8))
+ {
+ imm8 if imm8 < 16 => fx.bcx.ins().sshr_imm(lane, i64::from(imm8 as u8)),
+ _ => fx.bcx.ins().iconst(types::I32, 0),
+ });
+ }
"llvm.x86.avx2.pslli.w" => {
let (a, imm8) = match args {
[a, imm8] => (a, imm8),
@@ -313,7 +506,7 @@ pub(crate) fn codegen_x86_llvm_intrinsic_call<'tcx>(
ret.place_lane(fx, 2).to_ptr().store(fx, res_2, MemFlags::trusted());
ret.place_lane(fx, 3).to_ptr().store(fx, res_3, MemFlags::trusted());
}
- "llvm.x86.sse2.storeu.dq" => {
+ "llvm.x86.sse2.storeu.dq" | "llvm.x86.sse2.storeu.pd" => {
intrinsic_args!(fx, args => (mem_addr, a); intrinsic);
let mem_addr = mem_addr.load_scalar(fx);
@@ -321,17 +514,45 @@ pub(crate) fn codegen_x86_llvm_intrinsic_call<'tcx>(
let dest = CPlace::for_ptr(Pointer::new(mem_addr), a.layout());
dest.write_cvalue(fx, a);
}
- "llvm.x86.addcarry.64" => {
+ "llvm.x86.ssse3.pabs.b.128" | "llvm.x86.ssse3.pabs.w.128" | "llvm.x86.ssse3.pabs.d.128" => {
+ let a = match args {
+ [a] => a,
+ _ => bug!("wrong number of args for intrinsic {intrinsic}"),
+ };
+ let a = codegen_operand(fx, a);
+
+ simd_for_each_lane(fx, a, ret, &|fx, _lane_ty, _res_lane_ty, lane| {
+ fx.bcx.ins().iabs(lane)
+ });
+ }
+ "llvm.x86.addcarry.32" | "llvm.x86.addcarry.64" => {
intrinsic_args!(fx, args => (c_in, a, b); intrinsic);
let c_in = c_in.load_scalar(fx);
- llvm_add_sub(fx, BinOp::Add, ret, c_in, a, b);
+ let (cb_out, c) = llvm_add_sub(fx, BinOp::Add, c_in, a, b);
+
+ let layout = fx.layout_of(Ty::new_tup(fx.tcx, &[fx.tcx.types.u8, a.layout().ty]));
+ let val = CValue::by_val_pair(cb_out, c, layout);
+ ret.write_cvalue(fx, val);
}
- "llvm.x86.subborrow.64" => {
+ "llvm.x86.addcarryx.u32" | "llvm.x86.addcarryx.u64" => {
+ intrinsic_args!(fx, args => (c_in, a, b, out); intrinsic);
+ let c_in = c_in.load_scalar(fx);
+
+ let (cb_out, c) = llvm_add_sub(fx, BinOp::Add, c_in, a, b);
+
+ Pointer::new(out.load_scalar(fx)).store(fx, c, MemFlags::trusted());
+ ret.write_cvalue(fx, CValue::by_val(cb_out, fx.layout_of(fx.tcx.types.u8)));
+ }
+ "llvm.x86.subborrow.32" | "llvm.x86.subborrow.64" => {
intrinsic_args!(fx, args => (b_in, a, b); intrinsic);
let b_in = b_in.load_scalar(fx);
- llvm_add_sub(fx, BinOp::Sub, ret, b_in, a, b);
+ let (cb_out, c) = llvm_add_sub(fx, BinOp::Sub, b_in, a, b);
+
+ let layout = fx.layout_of(Ty::new_tup(fx.tcx, &[fx.tcx.types.u8, a.layout().ty]));
+ let val = CValue::by_val_pair(cb_out, c, layout);
+ ret.write_cvalue(fx, val);
}
_ => {
fx.tcx
@@ -356,21 +577,11 @@ pub(crate) fn codegen_x86_llvm_intrinsic_call<'tcx>(
fn llvm_add_sub<'tcx>(
fx: &mut FunctionCx<'_, '_, 'tcx>,
bin_op: BinOp,
- ret: CPlace<'tcx>,
cb_in: Value,
a: CValue<'tcx>,
b: CValue<'tcx>,
-) {
- assert_eq!(
- a.layout().ty,
- fx.tcx.types.u64,
- "llvm.x86.addcarry.64/llvm.x86.subborrow.64 second operand must be u64"
- );
- assert_eq!(
- b.layout().ty,
- fx.tcx.types.u64,
- "llvm.x86.addcarry.64/llvm.x86.subborrow.64 third operand must be u64"
- );
+) -> (Value, Value) {
+ assert_eq!(a.layout().ty, b.layout().ty);
// c + carry -> c + first intermediate carry or borrow respectively
let int0 = crate::num::codegen_checked_int_binop(fx, bin_op, a, b);
@@ -378,15 +589,14 @@ fn llvm_add_sub<'tcx>(
let cb0 = int0.value_field(fx, FieldIdx::new(1)).load_scalar(fx);
// c + carry -> c + second intermediate carry or borrow respectively
- let cb_in_as_u64 = fx.bcx.ins().uextend(types::I64, cb_in);
- let cb_in_as_u64 = CValue::by_val(cb_in_as_u64, fx.layout_of(fx.tcx.types.u64));
- let int1 = crate::num::codegen_checked_int_binop(fx, bin_op, c, cb_in_as_u64);
+ let clif_ty = fx.clif_type(a.layout().ty).unwrap();
+ let cb_in_as_int = fx.bcx.ins().uextend(clif_ty, cb_in);
+ let cb_in_as_int = CValue::by_val(cb_in_as_int, fx.layout_of(a.layout().ty));
+ let int1 = crate::num::codegen_checked_int_binop(fx, bin_op, c, cb_in_as_int);
let (c, cb1) = int1.load_scalar_pair(fx);
// carry0 | carry1 -> carry or borrow respectively
let cb_out = fx.bcx.ins().bor(cb0, cb1);
- let layout = fx.layout_of(Ty::new_tup(fx.tcx, &[fx.tcx.types.u8, fx.tcx.types.u64]));
- let val = CValue::by_val_pair(cb_out, c, layout);
- ret.write_cvalue(fx, val);
+ (cb_out, c)
}
diff --git a/compiler/rustc_codegen_cranelift/src/intrinsics/mod.rs b/compiler/rustc_codegen_cranelift/src/intrinsics/mod.rs
index 5862f1829..36e9ba9c7 100644
--- a/compiler/rustc_codegen_cranelift/src/intrinsics/mod.rs
+++ b/compiler/rustc_codegen_cranelift/src/intrinsics/mod.rs
@@ -24,7 +24,7 @@ pub(crate) use llvm::codegen_llvm_intrinsic_call;
use rustc_middle::ty;
use rustc_middle::ty::layout::{HasParamEnv, ValidityRequirement};
use rustc_middle::ty::print::{with_no_trimmed_paths, with_no_visible_paths};
-use rustc_middle::ty::subst::SubstsRef;
+use rustc_middle::ty::GenericArgsRef;
use rustc_span::symbol::{kw, sym, Symbol};
use crate::prelude::*;
@@ -213,13 +213,13 @@ pub(crate) fn codegen_intrinsic_call<'tcx>(
source_info: mir::SourceInfo,
) {
let intrinsic = fx.tcx.item_name(instance.def_id());
- let substs = instance.substs;
+ let instance_args = instance.args;
if intrinsic.as_str().starts_with("simd_") {
self::simd::codegen_simd_intrinsic_call(
fx,
intrinsic,
- substs,
+ instance_args,
args,
destination,
target.expect("target for simd intrinsic"),
@@ -233,7 +233,7 @@ pub(crate) fn codegen_intrinsic_call<'tcx>(
fx,
instance,
intrinsic,
- substs,
+ instance_args,
args,
destination,
target,
@@ -365,7 +365,7 @@ fn codegen_regular_intrinsic_call<'tcx>(
fx: &mut FunctionCx<'_, '_, 'tcx>,
instance: Instance<'tcx>,
intrinsic: Symbol,
- substs: SubstsRef<'tcx>,
+ generic_args: GenericArgsRef<'tcx>,
args: &[mir::Operand<'tcx>],
ret: CPlace<'tcx>,
destination: Option<BasicBlock>,
@@ -394,7 +394,7 @@ fn codegen_regular_intrinsic_call<'tcx>(
let dst = dst.load_scalar(fx);
let count = count.load_scalar(fx);
- let elem_ty = substs.type_at(0);
+ let elem_ty = generic_args.type_at(0);
let elem_size: u64 = fx.layout_of(elem_ty).size.bytes();
assert_eq!(args.len(), 3);
let byte_amount =
@@ -410,7 +410,7 @@ fn codegen_regular_intrinsic_call<'tcx>(
let src = src.load_scalar(fx);
let count = count.load_scalar(fx);
- let elem_ty = substs.type_at(0);
+ let elem_ty = generic_args.type_at(0);
let elem_size: u64 = fx.layout_of(elem_ty).size.bytes();
assert_eq!(args.len(), 3);
let byte_amount =
@@ -428,7 +428,7 @@ fn codegen_regular_intrinsic_call<'tcx>(
sym::size_of_val => {
intrinsic_args!(fx, args => (ptr); intrinsic);
- let layout = fx.layout_of(substs.type_at(0));
+ let layout = fx.layout_of(generic_args.type_at(0));
// Note: Can't use is_unsized here as truly unsized types need to take the fixed size
// branch
let size = if let Abi::ScalarPair(_, _) = ptr.layout().abi {
@@ -443,7 +443,7 @@ fn codegen_regular_intrinsic_call<'tcx>(
sym::min_align_of_val => {
intrinsic_args!(fx, args => (ptr); intrinsic);
- let layout = fx.layout_of(substs.type_at(0));
+ let layout = fx.layout_of(generic_args.type_at(0));
// Note: Can't use is_unsized here as truly unsized types need to take the fixed size
// branch
let align = if let Abi::ScalarPair(_, _) = ptr.layout().abi {
@@ -602,7 +602,7 @@ fn codegen_regular_intrinsic_call<'tcx>(
sym::assert_inhabited | sym::assert_zero_valid | sym::assert_mem_uninitialized_valid => {
intrinsic_args!(fx, args => (); intrinsic);
- let ty = substs.type_at(0);
+ let ty = generic_args.type_at(0);
let requirement = ValidityRequirement::from_intrinsic(intrinsic);
@@ -647,12 +647,13 @@ fn codegen_regular_intrinsic_call<'tcx>(
let val = CValue::by_ref(Pointer::new(ptr.load_scalar(fx)), inner_layout);
ret.write_cvalue(fx, val);
}
- sym::volatile_store | sym::unaligned_volatile_store => {
+ sym::volatile_store | sym::unaligned_volatile_store | sym::nontemporal_store => {
intrinsic_args!(fx, args => (ptr, val); intrinsic);
let ptr = ptr.load_scalar(fx);
// Cranelift treats stores as volatile by default
// FIXME correctly handle unaligned_volatile_store
+ // FIXME actually do nontemporal stores if requested
let dest = CPlace::for_ptr(Pointer::new(ptr), val.layout());
dest.write_cvalue(fx, val);
}
@@ -674,7 +675,7 @@ fn codegen_regular_intrinsic_call<'tcx>(
intrinsic_args!(fx, args => (ptr, base); intrinsic);
let ptr = ptr.load_scalar(fx);
let base = base.load_scalar(fx);
- let ty = substs.type_at(0);
+ let ty = generic_args.type_at(0);
let pointee_size: u64 = fx.layout_of(ty).size.bytes();
let diff_bytes = fx.bcx.ins().isub(ptr, base);
@@ -720,7 +721,7 @@ fn codegen_regular_intrinsic_call<'tcx>(
intrinsic_args!(fx, args => (ptr); intrinsic);
let ptr = ptr.load_scalar(fx);
- let ty = substs.type_at(0);
+ let ty = generic_args.type_at(0);
match ty.kind() {
ty::Uint(UintTy::U128) | ty::Int(IntTy::I128) => {
// FIXME implement 128bit atomics
@@ -751,7 +752,7 @@ fn codegen_regular_intrinsic_call<'tcx>(
intrinsic_args!(fx, args => (ptr, val); intrinsic);
let ptr = ptr.load_scalar(fx);
- let ty = substs.type_at(0);
+ let ty = generic_args.type_at(0);
match ty.kind() {
ty::Uint(UintTy::U128) | ty::Int(IntTy::I128) => {
// FIXME implement 128bit atomics
@@ -1128,7 +1129,7 @@ fn codegen_regular_intrinsic_call<'tcx>(
let lhs_ref = lhs_ref.load_scalar(fx);
let rhs_ref = rhs_ref.load_scalar(fx);
- let size = fx.layout_of(substs.type_at(0)).layout.size();
+ let size = fx.layout_of(generic_args.type_at(0)).layout.size();
// FIXME add and use emit_small_memcmp
let is_eq_value = if size == Size::ZERO {
// No bytes means they're trivially equal
@@ -1154,6 +1155,20 @@ fn codegen_regular_intrinsic_call<'tcx>(
ret.write_cvalue(fx, CValue::by_val(is_eq_value, ret.layout()));
}
+ sym::compare_bytes => {
+ intrinsic_args!(fx, args => (lhs_ptr, rhs_ptr, bytes_val); intrinsic);
+ let lhs_ptr = lhs_ptr.load_scalar(fx);
+ let rhs_ptr = rhs_ptr.load_scalar(fx);
+ let bytes_val = bytes_val.load_scalar(fx);
+
+ let params = vec![AbiParam::new(fx.pointer_type); 3];
+ let returns = vec![AbiParam::new(types::I32)];
+ let args = &[lhs_ptr, rhs_ptr, bytes_val];
+ // Here we assume that the `memcmp` provided by the target is a NOP for size 0.
+ let cmp = fx.lib_call("memcmp", params, returns, args)[0];
+ ret.write_cvalue(fx, CValue::by_val(cmp, ret.layout()));
+ }
+
sym::const_allocate => {
intrinsic_args!(fx, args => (_size, _align); intrinsic);
diff --git a/compiler/rustc_codegen_cranelift/src/intrinsics/simd.rs b/compiler/rustc_codegen_cranelift/src/intrinsics/simd.rs
index 6741362e8..9863e40b5 100644
--- a/compiler/rustc_codegen_cranelift/src/intrinsics/simd.rs
+++ b/compiler/rustc_codegen_cranelift/src/intrinsics/simd.rs
@@ -1,6 +1,6 @@
//! Codegen `extern "platform-intrinsic"` intrinsics.
-use rustc_middle::ty::subst::SubstsRef;
+use rustc_middle::ty::GenericArgsRef;
use rustc_span::Symbol;
use rustc_target::abi::Endian;
@@ -21,7 +21,7 @@ fn report_simd_type_validation_error(
pub(super) fn codegen_simd_intrinsic_call<'tcx>(
fx: &mut FunctionCx<'_, '_, 'tcx>,
intrinsic: Symbol,
- _substs: SubstsRef<'tcx>,
+ _args: GenericArgsRef<'tcx>,
args: &[mir::Operand<'tcx>],
ret: CPlace<'tcx>,
target: BasicBlock,
@@ -117,8 +117,8 @@ pub(super) fn codegen_simd_intrinsic_call<'tcx>(
});
}
- // simd_shuffle32<T, U>(x: T, y: T, idx: [u32; 32]) -> U
- _ if intrinsic.as_str().starts_with("simd_shuffle") => {
+ // simd_shuffle<T, I, U>(x: T, y: T, idx: I) -> U
+ sym::simd_shuffle => {
let (x, y, idx) = match args {
[x, y, idx] => (x, y, idx),
_ => {
@@ -133,36 +133,26 @@ pub(super) fn codegen_simd_intrinsic_call<'tcx>(
return;
}
- // If this intrinsic is the older "simd_shuffleN" form, simply parse the integer.
- // If there is no suffix, use the index array length.
- let n: u16 = if intrinsic == sym::simd_shuffle {
- // Make sure this is actually an array, since typeck only checks the length-suffixed
- // version of this intrinsic.
- let idx_ty = fx.monomorphize(idx.ty(fx.mir, fx.tcx));
- match idx_ty.kind() {
- ty::Array(ty, len) if matches!(ty.kind(), ty::Uint(ty::UintTy::U32)) => len
- .try_eval_target_usize(fx.tcx, ty::ParamEnv::reveal_all())
- .unwrap_or_else(|| {
- span_bug!(span, "could not evaluate shuffle index array length")
- })
- .try_into()
- .unwrap(),
- _ => {
- fx.tcx.sess.span_err(
- span,
- format!(
- "simd_shuffle index must be an array of `u32`, got `{}`",
- idx_ty,
- ),
- );
- // Prevent verifier error
- fx.bcx.ins().trap(TrapCode::UnreachableCodeReached);
- return;
- }
+ // Make sure this is actually an array, since typeck only checks the length-suffixed
+ // version of this intrinsic.
+ let idx_ty = fx.monomorphize(idx.ty(fx.mir, fx.tcx));
+ let n: u16 = match idx_ty.kind() {
+ ty::Array(ty, len) if matches!(ty.kind(), ty::Uint(ty::UintTy::U32)) => len
+ .try_eval_target_usize(fx.tcx, ty::ParamEnv::reveal_all())
+ .unwrap_or_else(|| {
+ span_bug!(span, "could not evaluate shuffle index array length")
+ })
+ .try_into()
+ .unwrap(),
+ _ => {
+ fx.tcx.sess.span_err(
+ span,
+ format!("simd_shuffle index must be an array of `u32`, got `{}`", idx_ty),
+ );
+ // Prevent verifier error
+ fx.bcx.ins().trap(TrapCode::UnreachableCodeReached);
+ return;
}
- } else {
- // FIXME remove this case
- intrinsic.as_str()["simd_shuffle".len()..].parse().unwrap()
};
assert_eq!(x.layout(), y.layout());
@@ -179,7 +169,7 @@ pub(super) fn codegen_simd_intrinsic_call<'tcx>(
let indexes = {
use rustc_middle::mir::interpret::*;
let idx_const = crate::constant::mir_operand_get_const_val(fx, idx)
- .expect("simd_shuffle* idx not const");
+ .expect("simd_shuffle idx not const");
let idx_bytes = match idx_const {
ConstValue::ByRef { alloc, offset } => {
diff --git a/compiler/rustc_codegen_cranelift/src/lib.rs b/compiler/rustc_codegen_cranelift/src/lib.rs
index 0de2dccda..d01ded8ab 100644
--- a/compiler/rustc_codegen_cranelift/src/lib.rs
+++ b/compiler/rustc_codegen_cranelift/src/lib.rs
@@ -260,6 +260,13 @@ fn build_isa(sess: &Session, backend_config: &BackendConfig) -> Arc<dyn isa::Tar
flags_builder.set("enable_verifier", enable_verifier).unwrap();
flags_builder.set("regalloc_checker", enable_verifier).unwrap();
+ let preserve_frame_pointer = sess.target.options.frame_pointer
+ != rustc_target::spec::FramePointer::MayOmit
+ || matches!(sess.opts.cg.force_frame_pointers, Some(true));
+ if preserve_frame_pointer {
+ flags_builder.set("preserve_frame_pointers", "true").unwrap();
+ }
+
let tls_model = match target_triple.binary_format {
BinaryFormat::Elf => "elf_gd",
BinaryFormat::Macho => "macho",
@@ -268,8 +275,6 @@ fn build_isa(sess: &Session, backend_config: &BackendConfig) -> Arc<dyn isa::Tar
};
flags_builder.set("tls_model", tls_model).unwrap();
- flags_builder.set("enable_simd", "true").unwrap();
-
flags_builder.set("enable_llvm_abi_extensions", "true").unwrap();
use rustc_session::config::OptLevel;
diff --git a/compiler/rustc_codegen_cranelift/src/main_shim.rs b/compiler/rustc_codegen_cranelift/src/main_shim.rs
index 20ba73f38..b5efe44d8 100644
--- a/compiler/rustc_codegen_cranelift/src/main_shim.rs
+++ b/compiler/rustc_codegen_cranelift/src/main_shim.rs
@@ -1,6 +1,6 @@
use rustc_hir::LangItem;
-use rustc_middle::ty::subst::GenericArg;
use rustc_middle::ty::AssocKind;
+use rustc_middle::ty::GenericArg;
use rustc_session::config::{sigpipe, EntryFnType};
use rustc_span::symbol::Ident;
@@ -119,7 +119,7 @@ pub(crate) fn maybe_create_entry_wrapper(
tcx,
ParamEnv::reveal_all(),
report.def_id,
- tcx.mk_substs(&[GenericArg::from(main_ret_ty)]),
+ tcx.mk_args(&[GenericArg::from(main_ret_ty)]),
)
.unwrap()
.unwrap()
@@ -146,7 +146,7 @@ pub(crate) fn maybe_create_entry_wrapper(
tcx,
ParamEnv::reveal_all(),
start_def_id,
- tcx.mk_substs(&[main_ret_ty.into()]),
+ tcx.mk_args(&[main_ret_ty.into()]),
)
.unwrap()
.unwrap()
diff --git a/compiler/rustc_codegen_cranelift/src/pretty_clif.rs b/compiler/rustc_codegen_cranelift/src/pretty_clif.rs
index 5a4f9e804..0ead50c34 100644
--- a/compiler/rustc_codegen_cranelift/src/pretty_clif.rs
+++ b/compiler/rustc_codegen_cranelift/src/pretty_clif.rs
@@ -9,7 +9,7 @@
//!
//! function u0:22(i64) -> i8, i8 system_v {
//! ; symbol _ZN97_$LT$example..IsNotEmpty$u20$as$u20$mini_core..FnOnce$LT$$LP$$RF$$RF$$u5b$u16$u5d$$C$$RP$$GT$$GT$9call_once17hd517c453d67c0915E
-//! ; instance Instance { def: Item(WithOptConstParam { did: DefId(0:42 ~ example[4e51]::{impl#0}::call_once), const_param_did: None }), substs: [ReErased, ReErased] }
+//! ; instance Instance { def: Item(WithOptConstParam { did: DefId(0:42 ~ example[4e51]::{impl#0}::call_once), const_param_did: None }), args: [ReErased, ReErased] }
//! ; abi FnAbi { args: [ArgAbi { layout: TyAndLayout { ty: IsNotEmpty, layout: Layout { size: Size(0 bytes), align: AbiAndPrefAlign { abi: Align(1 bytes), pref: Align(8 bytes) }, abi: Aggregate { sized: true }, fields: Arbitrary { offsets: [], memory_index: [] }, largest_niche: None, variants: Single { index: 0 } } }, mode: Ignore }, ArgAbi { layout: TyAndLayout { ty: &&[u16], layout: Layout { size: Size(8 bytes), align: AbiAndPrefAlign { abi: Align(8 bytes), pref: Align(8 bytes) }, abi: Scalar(Initialized { value: Pointer(AddressSpace(0)), valid_range: 1..=18446744073709551615 }), fields: Primitive, largest_niche: Some(Niche { offset: Size(0 bytes), value: Pointer(AddressSpace(0)), valid_range: 1..=18446744073709551615 }), variants: Single { index: 0 } } }, mode: Direct(ArgAttributes { regular: NonNull | NoUndef, arg_ext: None, pointee_size: Size(0 bytes), pointee_align: Some(Align(8 bytes)) }) }], ret: ArgAbi { layout: TyAndLayout { ty: (u8, u8), layout: Layout { size: Size(2 bytes), align: AbiAndPrefAlign { abi: Align(1 bytes), pref: Align(8 bytes) }, abi: ScalarPair(Initialized { value: Int(I8, false), valid_range: 0..=255 }, Initialized { value: Int(I8, false), valid_range: 0..=255 }), fields: Arbitrary { offsets: [Size(0 bytes), Size(1 bytes)], memory_index: [0, 1] }, largest_niche: None, variants: Single { index: 0 } } }, mode: Pair(ArgAttributes { regular: NoUndef, arg_ext: None, pointee_size: Size(0 bytes), pointee_align: None }, ArgAttributes { regular: NoUndef, arg_ext: None, pointee_size: Size(0 bytes), pointee_align: None }) }, c_variadic: false, fixed_count: 1, conv: Rust, can_unwind: false }
//!
//! ; kind loc.idx param pass mode ty
@@ -25,7 +25,7 @@
//!
//! ss0 = explicit_slot 16
//! sig0 = (i64, i64) -> i8, i8 system_v
-//! fn0 = colocated u0:23 sig0 ; Instance { def: Item(WithOptConstParam { did: DefId(0:46 ~ example[4e51]::{impl#1}::call_mut), const_param_did: None }), substs: [ReErased, ReErased] }
+//! fn0 = colocated u0:23 sig0 ; Instance { def: Item(WithOptConstParam { did: DefId(0:46 ~ example[4e51]::{impl#1}::call_mut), const_param_did: None }), args: [ReErased, ReErased] }
//!
//! block0(v0: i64):
//! nop
@@ -261,7 +261,7 @@ pub(crate) fn write_clif_file(
impl fmt::Debug for FunctionCx<'_, '_, '_> {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
- writeln!(f, "{:?}", self.instance.substs)?;
+ writeln!(f, "{:?}", self.instance.args)?;
writeln!(f, "{:?}", self.local_map)?;
let mut clif = String::new();
diff --git a/compiler/rustc_codegen_cranelift/src/value_and_place.rs b/compiler/rustc_codegen_cranelift/src/value_and_place.rs
index 133c989b6..ff95141ce 100644
--- a/compiler/rustc_codegen_cranelift/src/value_and_place.rs
+++ b/compiler/rustc_codegen_cranelift/src/value_and_place.rs
@@ -2,6 +2,8 @@
use crate::prelude::*;
+use rustc_middle::ty::FnSig;
+
use cranelift_codegen::entity::EntityRef;
use cranelift_codegen::ir::immediates::Offset32;
@@ -160,6 +162,7 @@ impl<'tcx> CValue<'tcx> {
}
/// Load a value with layout.abi of scalar
+ #[track_caller]
pub(crate) fn load_scalar(self, fx: &mut FunctionCx<'_, '_, 'tcx>) -> Value {
let layout = self.1;
match self.0 {
@@ -182,6 +185,7 @@ impl<'tcx> CValue<'tcx> {
}
/// Load a value pair with layout.abi of scalar pair
+ #[track_caller]
pub(crate) fn load_scalar_pair(self, fx: &mut FunctionCx<'_, '_, 'tcx>) -> (Value, Value) {
let layout = self.1;
match self.0 {
@@ -583,17 +587,25 @@ impl<'tcx> CPlace<'tcx> {
let dst_layout = self.layout();
match self.inner {
CPlaceInner::Var(_local, var) => {
- let data = CValue(from.0, dst_layout).load_scalar(fx);
+ let data = match from.1.abi {
+ Abi::Scalar(_) => CValue(from.0, dst_layout).load_scalar(fx),
+ _ => {
+ let (ptr, meta) = from.force_stack(fx);
+ assert!(meta.is_none());
+ CValue(CValueInner::ByRef(ptr, None), dst_layout).load_scalar(fx)
+ }
+ };
let dst_ty = fx.clif_type(self.layout().ty).unwrap();
transmute_scalar(fx, var, data, dst_ty);
}
CPlaceInner::VarPair(_local, var1, var2) => {
- let (data1, data2) = if from.layout().ty == dst_layout.ty {
- CValue(from.0, dst_layout).load_scalar_pair(fx)
- } else {
- let (ptr, meta) = from.force_stack(fx);
- assert!(meta.is_none());
- CValue(CValueInner::ByRef(ptr, None), dst_layout).load_scalar_pair(fx)
+ let (data1, data2) = match from.1.abi {
+ Abi::ScalarPair(_, _) => CValue(from.0, dst_layout).load_scalar_pair(fx),
+ _ => {
+ let (ptr, meta) = from.force_stack(fx);
+ assert!(meta.is_none());
+ CValue(CValueInner::ByRef(ptr, None), dst_layout).load_scalar_pair(fx)
+ }
};
let (dst_ty1, dst_ty2) = fx.clif_pair_type(self.layout().ty).unwrap();
transmute_scalar(fx, var1, data1, dst_ty1);
@@ -607,30 +619,38 @@ impl<'tcx> CPlace<'tcx> {
let mut flags = MemFlags::new();
flags.set_notrap();
- match from.layout().abi {
- Abi::Scalar(_) => {
- let val = from.load_scalar(fx);
- to_ptr.store(fx, val, flags);
- return;
- }
- Abi::ScalarPair(a_scalar, b_scalar) => {
- let (value, extra) = from.load_scalar_pair(fx);
- let b_offset = scalar_pair_calculate_b_offset(fx.tcx, a_scalar, b_scalar);
- to_ptr.store(fx, value, flags);
- to_ptr.offset(fx, b_offset).store(fx, extra, flags);
- return;
- }
- _ => {}
- }
match from.0 {
CValueInner::ByVal(val) => {
to_ptr.store(fx, val, flags);
}
- CValueInner::ByValPair(_, _) => {
- bug!("Non ScalarPair abi {:?} for ByValPair CValue", dst_layout.abi);
- }
+ CValueInner::ByValPair(val1, val2) => match from.layout().abi {
+ Abi::ScalarPair(a_scalar, b_scalar) => {
+ let b_offset =
+ scalar_pair_calculate_b_offset(fx.tcx, a_scalar, b_scalar);
+ to_ptr.store(fx, val1, flags);
+ to_ptr.offset(fx, b_offset).store(fx, val2, flags);
+ }
+ _ => bug!("Non ScalarPair abi {:?} for ByValPair CValue", dst_layout.abi),
+ },
CValueInner::ByRef(from_ptr, None) => {
+ match from.layout().abi {
+ Abi::Scalar(_) => {
+ let val = from.load_scalar(fx);
+ to_ptr.store(fx, val, flags);
+ return;
+ }
+ Abi::ScalarPair(a_scalar, b_scalar) => {
+ let b_offset =
+ scalar_pair_calculate_b_offset(fx.tcx, a_scalar, b_scalar);
+ let (val1, val2) = from.load_scalar_pair(fx);
+ to_ptr.store(fx, val1, flags);
+ to_ptr.offset(fx, b_offset).store(fx, val2, flags);
+ return;
+ }
+ _ => {}
+ }
+
let from_addr = from_ptr.get_addr(fx);
let to_addr = to_ptr.get_addr(fx);
let src_layout = from.1;
@@ -815,11 +835,42 @@ pub(crate) fn assert_assignable<'tcx>(
ParamEnv::reveal_all(),
from_ty.fn_sig(fx.tcx),
);
+ let FnSig {
+ inputs_and_output: types_from,
+ c_variadic: c_variadic_from,
+ unsafety: unsafety_from,
+ abi: abi_from,
+ } = from_sig;
let to_sig = fx
.tcx
.normalize_erasing_late_bound_regions(ParamEnv::reveal_all(), to_ty.fn_sig(fx.tcx));
+ let FnSig {
+ inputs_and_output: types_to,
+ c_variadic: c_variadic_to,
+ unsafety: unsafety_to,
+ abi: abi_to,
+ } = to_sig;
+ let mut types_from = types_from.iter();
+ let mut types_to = types_to.iter();
+ loop {
+ match (types_from.next(), types_to.next()) {
+ (Some(a), Some(b)) => assert_assignable(fx, a, b, limit - 1),
+ (None, None) => break,
+ (Some(_), None) | (None, Some(_)) => panic!("{:#?}/{:#?}", from_ty, to_ty),
+ }
+ }
+ assert_eq!(
+ c_variadic_from, c_variadic_to,
+ "Can't write fn ptr with incompatible sig {:?} to place with sig {:?}\n\n{:#?}",
+ from_sig, to_sig, fx,
+ );
+ assert_eq!(
+ unsafety_from, unsafety_to,
+ "Can't write fn ptr with incompatible sig {:?} to place with sig {:?}\n\n{:#?}",
+ from_sig, to_sig, fx,
+ );
assert_eq!(
- from_sig, to_sig,
+ abi_from, abi_to,
"Can't write fn ptr with incompatible sig {:?} to place with sig {:?}\n\n{:#?}",
from_sig, to_sig, fx,
);
@@ -850,11 +901,11 @@ pub(crate) fn assert_assignable<'tcx>(
}
}
}
- (&ty::Adt(adt_def_a, substs_a), &ty::Adt(adt_def_b, substs_b))
+ (&ty::Adt(adt_def_a, args_a), &ty::Adt(adt_def_b, args_b))
if adt_def_a.did() == adt_def_b.did() =>
{
- let mut types_a = substs_a.types();
- let mut types_b = substs_b.types();
+ let mut types_a = args_a.types();
+ let mut types_b = args_b.types();
loop {
match (types_a.next(), types_b.next()) {
(Some(a), Some(b)) => assert_assignable(fx, a, b, limit - 1),
@@ -864,11 +915,11 @@ pub(crate) fn assert_assignable<'tcx>(
}
}
(ty::Array(a, _), ty::Array(b, _)) => assert_assignable(fx, *a, *b, limit - 1),
- (&ty::Closure(def_id_a, substs_a), &ty::Closure(def_id_b, substs_b))
+ (&ty::Closure(def_id_a, args_a), &ty::Closure(def_id_b, args_b))
if def_id_a == def_id_b =>
{
- let mut types_a = substs_a.types();
- let mut types_b = substs_b.types();
+ let mut types_a = args_a.types();
+ let mut types_b = args_b.types();
loop {
match (types_a.next(), types_b.next()) {
(Some(a), Some(b)) => assert_assignable(fx, a, b, limit - 1),
diff --git a/compiler/rustc_codegen_gcc/example/alloc_system.rs b/compiler/rustc_codegen_gcc/example/alloc_system.rs
index 046903fe5..3deef419f 100644
--- a/compiler/rustc_codegen_gcc/example/alloc_system.rs
+++ b/compiler/rustc_codegen_gcc/example/alloc_system.rs
@@ -10,13 +10,16 @@
#[cfg(any(target_arch = "x86",
target_arch = "arm",
target_arch = "mips",
+ target_arch = "mips32r6",
target_arch = "powerpc",
+ target_arch = "csky"
target_arch = "powerpc64"))]
const MIN_ALIGN: usize = 8;
#[cfg(any(target_arch = "x86_64",
target_arch = "aarch64",
target_arch = "loongarch64",
target_arch = "mips64",
+ target_arch = "mips64r6",
target_arch = "s390x",
target_arch = "sparc64"))]
const MIN_ALIGN: usize = 16;
diff --git a/compiler/rustc_codegen_gcc/messages.ftl b/compiler/rustc_codegen_gcc/messages.ftl
index 97bc8ef9d..2fd0daee3 100644
--- a/compiler/rustc_codegen_gcc/messages.ftl
+++ b/compiler/rustc_codegen_gcc/messages.ftl
@@ -1,63 +1,6 @@
codegen_gcc_invalid_minimum_alignment =
invalid minimum global alignment: {$err}
-codegen_gcc_invalid_monomorphization_basic_integer =
- invalid monomorphization of `{$name}` intrinsic: expected basic integer type, found `{$ty}`
-
-codegen_gcc_invalid_monomorphization_expected_signed_unsigned =
- invalid monomorphization of `{$name}` intrinsic: expected element type `{$elem_ty}` of vector type `{$vec_ty}` to be a signed or unsigned integer type
-
-codegen_gcc_invalid_monomorphization_expected_simd =
- invalid monomorphization of `{$name}` intrinsic: expected SIMD {$expected_ty} type, found non-SIMD `{$found_ty}`
-
-codegen_gcc_invalid_monomorphization_inserted_type =
- invalid monomorphization of `{$name}` intrinsic: expected inserted type `{$in_elem}` (element of input `{$in_ty}`), found `{$out_ty}`
-
-codegen_gcc_invalid_monomorphization_invalid_bitmask =
- invalid monomorphization of `{$name}` intrinsic: invalid bitmask `{$ty}`, expected `u{$expected_int_bits}` or `[u8; {$expected_bytes}]`
-
-codegen_gcc_invalid_monomorphization_invalid_float_vector =
- invalid monomorphization of `{$name}` intrinsic: unsupported element type `{$elem_ty}` of floating-point vector `{$vec_ty}`
-
-codegen_gcc_invalid_monomorphization_mask_type =
- invalid monomorphization of `{$name}` intrinsic: mask element type is `{$ty}`, expected `i_`
-
-codegen_gcc_invalid_monomorphization_mismatched_lengths =
- invalid monomorphization of `{$name}` intrinsic: mismatched lengths: mask length `{$m_len}` != other vector length `{$v_len}`
-
-codegen_gcc_invalid_monomorphization_not_float =
- invalid monomorphization of `{$name}` intrinsic: `{$ty}` is not a floating-point type
-
-codegen_gcc_invalid_monomorphization_return_element =
- invalid monomorphization of `{$name}` intrinsic: expected return element type `{$in_elem}` (element of input `{$in_ty}`), found `{$ret_ty}` with element type `{$out_ty}`
-
-codegen_gcc_invalid_monomorphization_return_integer_type =
- invalid monomorphization of `{$name}` intrinsic: expected return type with integer elements, found `{$ret_ty}` with non-integer `{$out_ty}`
-
-codegen_gcc_invalid_monomorphization_return_length =
- invalid monomorphization of `{$name}` intrinsic: expected return type of length {$in_len}, found `{$ret_ty}` with length {$out_len}
-
-codegen_gcc_invalid_monomorphization_return_length_input_type =
- invalid monomorphization of `{$name}` intrinsic: expected return type with length {$in_len} (same as input type `{$in_ty}`), found `{$ret_ty}` with length {$out_len}
-
-codegen_gcc_invalid_monomorphization_return_type =
- invalid monomorphization of `{$name}` intrinsic: expected return type `{$in_elem}` (element of input `{$in_ty}`), found `{$ret_ty}`
-
-codegen_gcc_invalid_monomorphization_simd_shuffle =
- invalid monomorphization of `{$name}` intrinsic: simd_shuffle index must be an array of `u32`, got `{$ty}`
-
-codegen_gcc_invalid_monomorphization_unrecognized =
- invalid monomorphization of `{$name}` intrinsic: unrecognized intrinsic `{$name}`
-
-codegen_gcc_invalid_monomorphization_unsupported_cast =
- invalid monomorphization of `{$name}` intrinsic: unsupported cast from `{$in_ty}` with element `{$in_elem}` to `{$ret_ty}` with element `{$out_elem}`
-
-codegen_gcc_invalid_monomorphization_unsupported_element =
- invalid monomorphization of `{$name}` intrinsic: unsupported {$name} from `{$in_ty}` with element `{$elem_ty}` to `{$ret_ty}`
-
-codegen_gcc_invalid_monomorphization_unsupported_operation =
- invalid monomorphization of `{$name}` intrinsic: unsupported operation on `{$in_ty}` with element `{$in_elem}`
-
codegen_gcc_lto_not_supported =
LTO is not supported. You may get a linker error.
diff --git a/compiler/rustc_codegen_gcc/src/allocator.rs b/compiler/rustc_codegen_gcc/src/allocator.rs
index 13f88192b..edd7ab722 100644
--- a/compiler/rustc_codegen_gcc/src/allocator.rs
+++ b/compiler/rustc_codegen_gcc/src/allocator.rs
@@ -27,8 +27,8 @@ pub(crate) unsafe fn codegen(tcx: TyCtxt<'_>, mods: &mut GccContext, _module_nam
if kind == AllocatorKind::Default {
for method in ALLOCATOR_METHODS {
let mut types = Vec::with_capacity(method.inputs.len());
- for ty in method.inputs.iter() {
- match *ty {
+ for input in method.inputs.iter() {
+ match input.ty {
AllocatorTy::Layout => {
types.push(usize);
types.push(usize);
diff --git a/compiler/rustc_codegen_gcc/src/asm.rs b/compiler/rustc_codegen_gcc/src/asm.rs
index 4c3b7f503..905fdac92 100644
--- a/compiler/rustc_codegen_gcc/src/asm.rs
+++ b/compiler/rustc_codegen_gcc/src/asm.rs
@@ -107,7 +107,7 @@ enum ConstraintOrRegister {
impl<'a, 'gcc, 'tcx> AsmBuilderMethods<'tcx> for Builder<'a, 'gcc, 'tcx> {
- fn codegen_inline_asm(&mut self, template: &[InlineAsmTemplatePiece], rust_operands: &[InlineAsmOperandRef<'tcx, Self>], options: InlineAsmOptions, span: &[Span], _instance: Instance<'_>, _dest_catch_funclet: Option<(Self::BasicBlock, Self::BasicBlock, Option<&Self::Funclet>)>) {
+ fn codegen_inline_asm(&mut self, template: &[InlineAsmTemplatePiece], rust_operands: &[InlineAsmOperandRef<'tcx, Self>], options: InlineAsmOptions, span: &[Span], instance: Instance<'_>, _dest_catch_funclet: Option<(Self::BasicBlock, Self::BasicBlock, Option<&Self::Funclet>)>) {
if options.contains(InlineAsmOptions::MAY_UNWIND) {
self.sess()
.create_err(UnwindingInlineAsm { span: span[0] })
@@ -173,7 +173,7 @@ impl<'a, 'gcc, 'tcx> AsmBuilderMethods<'tcx> for Builder<'a, 'gcc, 'tcx> {
let is_target_supported = reg.reg_class().supported_types(asm_arch).iter()
.any(|&(_, feature)| {
if let Some(feature) = feature {
- self.tcx.sess.target_features.contains(&feature)
+ self.tcx.asm_target_features(instance.def_id()).contains(&feature)
} else {
true // Register class is unconditionally supported
}
@@ -597,6 +597,8 @@ fn reg_to_gcc(reg: InlineAsmRegOrRegClass) -> ConstraintOrRegister {
InlineAsmRegClass::M68k(M68kInlineAsmRegClass::reg) => "r",
InlineAsmRegClass::M68k(M68kInlineAsmRegClass::reg_addr) => "a",
InlineAsmRegClass::M68k(M68kInlineAsmRegClass::reg_data) => "d",
+ InlineAsmRegClass::CSKY(CSKYInlineAsmRegClass::reg) => "r",
+ InlineAsmRegClass::CSKY(CSKYInlineAsmRegClass::freg) => "f",
InlineAsmRegClass::Mips(MipsInlineAsmRegClass::reg) => "d", // more specific than "r"
InlineAsmRegClass::Mips(MipsInlineAsmRegClass::freg) => "f",
InlineAsmRegClass::Msp430(Msp430InlineAsmRegClass::reg) => "r",
@@ -673,6 +675,8 @@ fn dummy_output_type<'gcc, 'tcx>(cx: &CodegenCx<'gcc, 'tcx>, reg: InlineAsmRegCl
InlineAsmRegClass::M68k(M68kInlineAsmRegClass::reg) => cx.type_i32(),
InlineAsmRegClass::M68k(M68kInlineAsmRegClass::reg_addr) => cx.type_i32(),
InlineAsmRegClass::M68k(M68kInlineAsmRegClass::reg_data) => cx.type_i32(),
+ InlineAsmRegClass::CSKY(CSKYInlineAsmRegClass::reg) => cx.type_i32(),
+ InlineAsmRegClass::CSKY(CSKYInlineAsmRegClass::freg) => cx.type_f32(),
InlineAsmRegClass::Mips(MipsInlineAsmRegClass::reg) => cx.type_i32(),
InlineAsmRegClass::Mips(MipsInlineAsmRegClass::freg) => cx.type_f32(),
InlineAsmRegClass::Msp430(_) => unimplemented!(),
@@ -860,6 +864,7 @@ fn modifier_to_gcc(arch: InlineAsmArch, reg: InlineAsmRegClass, modifier: Option
InlineAsmRegClass::S390x(_) => None,
InlineAsmRegClass::Msp430(_) => None,
InlineAsmRegClass::M68k(_) => None,
+ InlineAsmRegClass::CSKY(_) => None,
InlineAsmRegClass::SpirV(SpirVInlineAsmRegClass::reg) => {
bug!("LLVM backend does not support SPIR-V")
}
diff --git a/compiler/rustc_codegen_gcc/src/base.rs b/compiler/rustc_codegen_gcc/src/base.rs
index dcd560b3d..9e614ca4a 100644
--- a/compiler/rustc_codegen_gcc/src/base.rs
+++ b/compiler/rustc_codegen_gcc/src/base.rs
@@ -159,8 +159,8 @@ pub fn compile_codegen_unit(tcx: TyCtxt<'_>, cgu_name: Symbol, supports_128bit_i
let cx = CodegenCx::new(&context, cgu, tcx, supports_128bit_integers);
let mono_items = cgu.items_in_deterministic_order(tcx);
- for &(mono_item, (linkage, visibility)) in &mono_items {
- mono_item.predefine::<Builder<'_, '_, '_>>(&cx, linkage, visibility);
+ for &(mono_item, data) in &mono_items {
+ mono_item.predefine::<Builder<'_, '_, '_>>(&cx, data.linkage, data.visibility);
}
// ... and now that we have everything pre-defined, fill out those definitions.
diff --git a/compiler/rustc_codegen_gcc/src/builder.rs b/compiler/rustc_codegen_gcc/src/builder.rs
index 43d0aafbd..0b1f2fe6a 100644
--- a/compiler/rustc_codegen_gcc/src/builder.rs
+++ b/compiler/rustc_codegen_gcc/src/builder.rs
@@ -27,7 +27,6 @@ use rustc_codegen_ssa::traits::{
BaseTypeMethods,
BuilderMethods,
ConstMethods,
- DerivedTypeMethods,
LayoutTypeMethods,
HasCodegen,
OverflowOp,
diff --git a/compiler/rustc_codegen_gcc/src/callee.rs b/compiler/rustc_codegen_gcc/src/callee.rs
index 433b2585f..a96bd66ba 100644
--- a/compiler/rustc_codegen_gcc/src/callee.rs
+++ b/compiler/rustc_codegen_gcc/src/callee.rs
@@ -17,8 +17,8 @@ use crate::context::CodegenCx;
pub fn get_fn<'gcc, 'tcx>(cx: &CodegenCx<'gcc, 'tcx>, instance: Instance<'tcx>) -> Function<'gcc> {
let tcx = cx.tcx();
- assert!(!instance.substs.has_infer());
- assert!(!instance.substs.has_escaping_bound_vars());
+ assert!(!instance.args.has_infer());
+ assert!(!instance.args.has_escaping_bound_vars());
let sym = tcx.symbol_name(instance).name;
@@ -100,7 +100,7 @@ pub fn get_fn<'gcc, 'tcx>(cx: &CodegenCx<'gcc, 'tcx>, instance: Instance<'tcx>)
// whether we are sharing generics or not. The important thing here is
// that the visibility we apply to the declaration is the same one that
// has been applied to the definition (wherever that definition may be).
- let is_generic = instance.substs.non_erasable_generics().next().is_some();
+ let is_generic = instance.args.non_erasable_generics().next().is_some();
if is_generic {
// This is a monomorphization. Its expected visibility depends
diff --git a/compiler/rustc_codegen_gcc/src/common.rs b/compiler/rustc_codegen_gcc/src/common.rs
index b62f4676f..5f54cb16d 100644
--- a/compiler/rustc_codegen_gcc/src/common.rs
+++ b/compiler/rustc_codegen_gcc/src/common.rs
@@ -16,6 +16,10 @@ use crate::context::CodegenCx;
use crate::type_of::LayoutGccExt;
impl<'gcc, 'tcx> CodegenCx<'gcc, 'tcx> {
+ pub fn const_ptrcast(&self, val: RValue<'gcc>, ty: Type<'gcc>) -> RValue<'gcc> {
+ self.context.new_cast(None, val, ty)
+ }
+
pub fn const_bytes(&self, bytes: &[u8]) -> RValue<'gcc> {
bytes_in_context(self, bytes)
}
@@ -242,10 +246,6 @@ impl<'gcc, 'tcx> ConstMethods<'tcx> for CodegenCx<'gcc, 'tcx> {
const_alloc_to_gcc(self, alloc)
}
- fn const_ptrcast(&self, val: RValue<'gcc>, ty: Type<'gcc>) -> RValue<'gcc> {
- self.context.new_cast(None, val, ty)
- }
-
fn const_bitcast(&self, value: RValue<'gcc>, typ: Type<'gcc>) -> RValue<'gcc> {
if value.get_type() == self.bool_type.make_pointer() {
if let Some(pointee) = typ.get_pointee() {
diff --git a/compiler/rustc_codegen_gcc/src/context.rs b/compiler/rustc_codegen_gcc/src/context.rs
index 08507e196..88dcafa73 100644
--- a/compiler/rustc_codegen_gcc/src/context.rs
+++ b/compiler/rustc_codegen_gcc/src/context.rs
@@ -476,7 +476,7 @@ impl<'gcc, 'tcx> LayoutOfHelpers<'tcx> for CodegenCx<'gcc, 'tcx> {
#[inline]
fn handle_layout_err(&self, err: LayoutError<'tcx>, span: Span, ty: Ty<'tcx>) -> ! {
- if let LayoutError::SizeOverflow(_) = err {
+ if let LayoutError::SizeOverflow(_) | LayoutError::ReferencesError(_) = err {
self.sess().emit_fatal(respan(span, err.into_diagnostic()))
} else {
span_bug!(span, "failed to get layout for `{}`: {}", ty, err)
diff --git a/compiler/rustc_codegen_gcc/src/errors.rs b/compiler/rustc_codegen_gcc/src/errors.rs
index 9305bd1e0..693367192 100644
--- a/compiler/rustc_codegen_gcc/src/errors.rs
+++ b/compiler/rustc_codegen_gcc/src/errors.rs
@@ -1,7 +1,6 @@
use rustc_errors::{DiagnosticArgValue, IntoDiagnosticArg};
use rustc_macros::Diagnostic;
-use rustc_middle::ty::Ty;
-use rustc_span::{Span, Symbol};
+use rustc_span::Span;
use std::borrow::Cow;
struct ExitCode(Option<i32>);
@@ -17,201 +16,6 @@ impl IntoDiagnosticArg for ExitCode {
}
#[derive(Diagnostic)]
-#[diag(codegen_gcc_invalid_monomorphization_basic_integer, code = "E0511")]
-pub(crate) struct InvalidMonomorphizationBasicInteger<'a> {
- #[primary_span]
- pub span: Span,
- pub name: Symbol,
- pub ty: Ty<'a>,
-}
-
-#[derive(Diagnostic)]
-#[diag(codegen_gcc_invalid_monomorphization_invalid_float_vector, code = "E0511")]
-pub(crate) struct InvalidMonomorphizationInvalidFloatVector<'a> {
- #[primary_span]
- pub span: Span,
- pub name: Symbol,
- pub elem_ty: &'a str,
- pub vec_ty: Ty<'a>,
-}
-
-#[derive(Diagnostic)]
-#[diag(codegen_gcc_invalid_monomorphization_not_float, code = "E0511")]
-pub(crate) struct InvalidMonomorphizationNotFloat<'a> {
- #[primary_span]
- pub span: Span,
- pub name: Symbol,
- pub ty: Ty<'a>,
-}
-
-#[derive(Diagnostic)]
-#[diag(codegen_gcc_invalid_monomorphization_unrecognized, code = "E0511")]
-pub(crate) struct InvalidMonomorphizationUnrecognized {
- #[primary_span]
- pub span: Span,
- pub name: Symbol,
-}
-
-#[derive(Diagnostic)]
-#[diag(codegen_gcc_invalid_monomorphization_expected_signed_unsigned, code = "E0511")]
-pub(crate) struct InvalidMonomorphizationExpectedSignedUnsigned<'a> {
- #[primary_span]
- pub span: Span,
- pub name: Symbol,
- pub elem_ty: Ty<'a>,
- pub vec_ty: Ty<'a>,
-}
-
-#[derive(Diagnostic)]
-#[diag(codegen_gcc_invalid_monomorphization_unsupported_element, code = "E0511")]
-pub(crate) struct InvalidMonomorphizationUnsupportedElement<'a> {
- #[primary_span]
- pub span: Span,
- pub name: Symbol,
- pub in_ty: Ty<'a>,
- pub elem_ty: Ty<'a>,
- pub ret_ty: Ty<'a>,
-}
-
-#[derive(Diagnostic)]
-#[diag(codegen_gcc_invalid_monomorphization_invalid_bitmask, code = "E0511")]
-pub(crate) struct InvalidMonomorphizationInvalidBitmask<'a> {
- #[primary_span]
- pub span: Span,
- pub name: Symbol,
- pub ty: Ty<'a>,
- pub expected_int_bits: u64,
- pub expected_bytes: u64,
-}
-
-#[derive(Diagnostic)]
-#[diag(codegen_gcc_invalid_monomorphization_simd_shuffle, code = "E0511")]
-pub(crate) struct InvalidMonomorphizationSimdShuffle<'a> {
- #[primary_span]
- pub span: Span,
- pub name: Symbol,
- pub ty: Ty<'a>,
-}
-
-#[derive(Diagnostic)]
-#[diag(codegen_gcc_invalid_monomorphization_expected_simd, code = "E0511")]
-pub(crate) struct InvalidMonomorphizationExpectedSimd<'a> {
- #[primary_span]
- pub span: Span,
- pub name: Symbol,
- pub position: &'a str,
- pub found_ty: Ty<'a>,
-}
-
-#[derive(Diagnostic)]
-#[diag(codegen_gcc_invalid_monomorphization_mask_type, code = "E0511")]
-pub(crate) struct InvalidMonomorphizationMaskType<'a> {
- #[primary_span]
- pub span: Span,
- pub name: Symbol,
- pub ty: Ty<'a>,
-}
-
-#[derive(Diagnostic)]
-#[diag(codegen_gcc_invalid_monomorphization_return_length, code = "E0511")]
-pub(crate) struct InvalidMonomorphizationReturnLength<'a> {
- #[primary_span]
- pub span: Span,
- pub name: Symbol,
- pub in_len: u64,
- pub ret_ty: Ty<'a>,
- pub out_len: u64,
-}
-
-#[derive(Diagnostic)]
-#[diag(codegen_gcc_invalid_monomorphization_return_length_input_type, code = "E0511")]
-pub(crate) struct InvalidMonomorphizationReturnLengthInputType<'a> {
- #[primary_span]
- pub span: Span,
- pub name: Symbol,
- pub in_len: u64,
- pub in_ty: Ty<'a>,
- pub ret_ty: Ty<'a>,
- pub out_len: u64,
-}
-
-#[derive(Diagnostic)]
-#[diag(codegen_gcc_invalid_monomorphization_return_element, code = "E0511")]
-pub(crate) struct InvalidMonomorphizationReturnElement<'a> {
- #[primary_span]
- pub span: Span,
- pub name: Symbol,
- pub in_elem: Ty<'a>,
- pub in_ty: Ty<'a>,
- pub ret_ty: Ty<'a>,
- pub out_ty: Ty<'a>,
-}
-
-#[derive(Diagnostic)]
-#[diag(codegen_gcc_invalid_monomorphization_return_type, code = "E0511")]
-pub(crate) struct InvalidMonomorphizationReturnType<'a> {
- #[primary_span]
- pub span: Span,
- pub name: Symbol,
- pub in_elem: Ty<'a>,
- pub in_ty: Ty<'a>,
- pub ret_ty: Ty<'a>,
-}
-
-#[derive(Diagnostic)]
-#[diag(codegen_gcc_invalid_monomorphization_inserted_type, code = "E0511")]
-pub(crate) struct InvalidMonomorphizationInsertedType<'a> {
- #[primary_span]
- pub span: Span,
- pub name: Symbol,
- pub in_elem: Ty<'a>,
- pub in_ty: Ty<'a>,
- pub out_ty: Ty<'a>,
-}
-
-#[derive(Diagnostic)]
-#[diag(codegen_gcc_invalid_monomorphization_return_integer_type, code = "E0511")]
-pub(crate) struct InvalidMonomorphizationReturnIntegerType<'a> {
- #[primary_span]
- pub span: Span,
- pub name: Symbol,
- pub ret_ty: Ty<'a>,
- pub out_ty: Ty<'a>,
-}
-
-#[derive(Diagnostic)]
-#[diag(codegen_gcc_invalid_monomorphization_mismatched_lengths, code = "E0511")]
-pub(crate) struct InvalidMonomorphizationMismatchedLengths {
- #[primary_span]
- pub span: Span,
- pub name: Symbol,
- pub m_len: u64,
- pub v_len: u64,
-}
-
-#[derive(Diagnostic)]
-#[diag(codegen_gcc_invalid_monomorphization_unsupported_cast, code = "E0511")]
-pub(crate) struct InvalidMonomorphizationUnsupportedCast<'a> {
- #[primary_span]
- pub span: Span,
- pub name: Symbol,
- pub in_ty: Ty<'a>,
- pub in_elem: Ty<'a>,
- pub ret_ty: Ty<'a>,
- pub out_elem: Ty<'a>,
-}
-
-#[derive(Diagnostic)]
-#[diag(codegen_gcc_invalid_monomorphization_unsupported_operation, code = "E0511")]
-pub(crate) struct InvalidMonomorphizationUnsupportedOperation<'a> {
- #[primary_span]
- pub span: Span,
- pub name: Symbol,
- pub in_ty: Ty<'a>,
- pub in_elem: Ty<'a>,
-}
-
-#[derive(Diagnostic)]
#[diag(codegen_gcc_lto_not_supported)]
pub(crate) struct LTONotSupported;
diff --git a/compiler/rustc_codegen_gcc/src/intrinsic/mod.rs b/compiler/rustc_codegen_gcc/src/intrinsic/mod.rs
index 0b208be4e..f8c32c6db 100644
--- a/compiler/rustc_codegen_gcc/src/intrinsic/mod.rs
+++ b/compiler/rustc_codegen_gcc/src/intrinsic/mod.rs
@@ -12,7 +12,8 @@ use rustc_codegen_ssa::mir::operand::{OperandRef, OperandValue};
use rustc_codegen_ssa::mir::place::PlaceRef;
use rustc_codegen_ssa::traits::{ArgAbiMethods, BaseTypeMethods, BuilderMethods, ConstMethods, IntrinsicCallMethods};
#[cfg(feature="master")]
-use rustc_codegen_ssa::traits::{DerivedTypeMethods, MiscMethods};
+use rustc_codegen_ssa::traits::MiscMethods;
+use rustc_codegen_ssa::errors::InvalidMonomorphization;
use rustc_middle::bug;
use rustc_middle::ty::{self, Instance, Ty};
use rustc_middle::ty::layout::LayoutOf;
@@ -31,7 +32,6 @@ use crate::abi::FnAbiGccExt;
use crate::builder::Builder;
use crate::common::{SignType, TypeReflection};
use crate::context::CodegenCx;
-use crate::errors::InvalidMonomorphizationBasicInteger;
use crate::type_of::LayoutGccExt;
use crate::intrinsic::simd::generic_simd_intrinsic;
@@ -92,8 +92,8 @@ impl<'a, 'gcc, 'tcx> IntrinsicCallMethods<'tcx> for Builder<'a, 'gcc, 'tcx> {
let tcx = self.tcx;
let callee_ty = instance.ty(tcx, ty::ParamEnv::reveal_all());
- let (def_id, substs) = match *callee_ty.kind() {
- ty::FnDef(def_id, substs) => (def_id, substs),
+ let (def_id, fn_args) = match *callee_ty.kind() {
+ ty::FnDef(def_id, fn_args) => (def_id, fn_args),
_ => bug!("expected fn item type, found {}", callee_ty),
};
@@ -142,7 +142,7 @@ impl<'a, 'gcc, 'tcx> IntrinsicCallMethods<'tcx> for Builder<'a, 'gcc, 'tcx> {
}
sym::volatile_load | sym::unaligned_volatile_load => {
- let tp_ty = substs.type_at(0);
+ let tp_ty = fn_args.type_at(0);
let mut ptr = args[0].immediate();
if let PassMode::Cast(ty, _) = &fn_abi.ret.mode {
ptr = self.pointercast(ptr, self.type_ptr_to(ty.gcc_type(self)));
@@ -256,7 +256,7 @@ impl<'a, 'gcc, 'tcx> IntrinsicCallMethods<'tcx> for Builder<'a, 'gcc, 'tcx> {
_ => bug!(),
},
None => {
- tcx.sess.emit_err(InvalidMonomorphizationBasicInteger { span, name, ty });
+ tcx.sess.emit_err(InvalidMonomorphization::BasicIntegerType { span, name, ty });
return;
}
}
@@ -264,7 +264,7 @@ impl<'a, 'gcc, 'tcx> IntrinsicCallMethods<'tcx> for Builder<'a, 'gcc, 'tcx> {
sym::raw_eq => {
use rustc_target::abi::Abi::*;
- let tp_ty = substs.type_at(0);
+ let tp_ty = fn_args.type_at(0);
let layout = self.layout_of(tp_ty).layout;
let _use_integer_compare = match layout.abi() {
Scalar(_) | ScalarPair(_, _) => true,
@@ -302,6 +302,21 @@ impl<'a, 'gcc, 'tcx> IntrinsicCallMethods<'tcx> for Builder<'a, 'gcc, 'tcx> {
}
}
+ sym::compare_bytes => {
+ let a = args[0].immediate();
+ let b = args[1].immediate();
+ let n = args[2].immediate();
+
+ let void_ptr_type = self.context.new_type::<*const ()>();
+ let a_ptr = self.bitcast(a, void_ptr_type);
+ let b_ptr = self.bitcast(b, void_ptr_type);
+
+ // Here we assume that the `memcmp` provided by the target is a NOP for size 0.
+ let builtin = self.context.get_builtin_function("memcmp");
+ let cmp = self.context.new_call(None, builtin, &[a_ptr, b_ptr, n]);
+ self.sext(cmp, self.type_ix(32))
+ }
+
sym::black_box => {
args[0].val.store(self, result);
diff --git a/compiler/rustc_codegen_gcc/src/intrinsic/simd.rs b/compiler/rustc_codegen_gcc/src/intrinsic/simd.rs
index 9115cf971..85d3e7234 100644
--- a/compiler/rustc_codegen_gcc/src/intrinsic/simd.rs
+++ b/compiler/rustc_codegen_gcc/src/intrinsic/simd.rs
@@ -1,11 +1,11 @@
-#[cfg(feature="master")]
-use gccjit::{ComparisonOp, UnaryOp};
use gccjit::ToRValue;
use gccjit::{BinaryOp, RValue, Type};
+#[cfg(feature = "master")]
+use gccjit::{ComparisonOp, UnaryOp};
use rustc_codegen_ssa::base::compare_simd_types;
use rustc_codegen_ssa::common::{IntPredicate, TypeKind};
-#[cfg(feature="master")]
+#[cfg(feature = "master")]
use rustc_codegen_ssa::errors::ExpectedPointerMutability;
use rustc_codegen_ssa::errors::InvalidMonomorphization;
use rustc_codegen_ssa::mir::operand::OperandRef;
@@ -19,21 +19,8 @@ use rustc_span::{sym, Span, Symbol};
use rustc_target::abi::Align;
use crate::builder::Builder;
-#[cfg(feature="master")]
+#[cfg(feature = "master")]
use crate::context::CodegenCx;
-#[cfg(feature="master")]
-use crate::errors::{InvalidMonomorphizationExpectedSignedUnsigned, InvalidMonomorphizationInsertedType};
-use crate::errors::{
- InvalidMonomorphizationExpectedSimd,
- InvalidMonomorphizationInvalidBitmask,
- InvalidMonomorphizationInvalidFloatVector, InvalidMonomorphizationMaskType,
- InvalidMonomorphizationMismatchedLengths, InvalidMonomorphizationNotFloat,
- InvalidMonomorphizationReturnElement, InvalidMonomorphizationReturnIntegerType,
- InvalidMonomorphizationReturnLength, InvalidMonomorphizationReturnLengthInputType,
- InvalidMonomorphizationReturnType, InvalidMonomorphizationSimdShuffle,
- InvalidMonomorphizationUnrecognized, InvalidMonomorphizationUnsupportedElement,
- InvalidMonomorphizationUnsupportedOperation,
-};
pub fn generic_simd_intrinsic<'a, 'gcc, 'tcx>(
bx: &mut Builder<'a, 'gcc, 'tcx>,
@@ -59,16 +46,8 @@ pub fn generic_simd_intrinsic<'a, 'gcc, 'tcx>(
};
}
macro_rules! require_simd {
- ($ty: expr, $position: expr) => {
- require!(
- $ty.is_simd(),
- InvalidMonomorphizationExpectedSimd {
- span,
- name,
- position: $position,
- found_ty: $ty
- }
- )
+ ($ty: expr, $diag: expr) => {
+ require!($ty.is_simd(), $diag)
};
}
@@ -78,7 +57,10 @@ pub fn generic_simd_intrinsic<'a, 'gcc, 'tcx>(
let arg_tys = sig.inputs();
if name == sym::simd_select_bitmask {
- require_simd!(arg_tys[1], "argument");
+ require_simd!(
+ arg_tys[1],
+ InvalidMonomorphization::SimdArgument { span, name, ty: arg_tys[1] }
+ );
let (len, _) = arg_tys[1].simd_size_and_type(bx.tcx());
let expected_int_bits = (len.max(8) - 1).next_power_of_two();
@@ -99,10 +81,10 @@ pub fn generic_simd_intrinsic<'a, 'gcc, 'tcx>(
let ptr = bx.pointercast(place.llval, bx.cx.type_ptr_to(int_ty));
bx.load(int_ty, ptr, Align::ONE)
}
- _ => return_error!(InvalidMonomorphizationInvalidBitmask {
+ _ => return_error!(InvalidMonomorphization::InvalidBitmask {
span,
name,
- ty: mask_ty,
+ mask_ty,
expected_int_bits,
expected_bytes
}),
@@ -116,7 +98,8 @@ pub fn generic_simd_intrinsic<'a, 'gcc, 'tcx>(
// NOTE: since the arguments can be vectors of floats, make sure the mask is a vector of
// integer.
let mask_element_type = bx.type_ix(arg1_element_type.get_size() as u64 * 8);
- let vector_mask_type = bx.context.new_vector_type(mask_element_type, arg1_vector_type.get_num_units() as u64);
+ let vector_mask_type =
+ bx.context.new_vector_type(mask_element_type, arg1_vector_type.get_num_units() as u64);
let mut elements = vec![];
let one = bx.context.new_rvalue_one(mask.get_type());
@@ -131,7 +114,7 @@ pub fn generic_simd_intrinsic<'a, 'gcc, 'tcx>(
}
// every intrinsic below takes a SIMD vector as its first argument
- require_simd!(arg_tys[0], "input");
+ require_simd!(arg_tys[0], InvalidMonomorphization::SimdInput { span, name, ty: arg_tys[0] });
let in_ty = arg_tys[0];
let comparison = match name {
@@ -146,12 +129,12 @@ pub fn generic_simd_intrinsic<'a, 'gcc, 'tcx>(
let (in_len, in_elem) = arg_tys[0].simd_size_and_type(bx.tcx());
if let Some(cmp_op) = comparison {
- require_simd!(ret_ty, "return");
+ require_simd!(ret_ty, InvalidMonomorphization::SimdReturn { span, name, ty: ret_ty });
let (out_len, out_ty) = ret_ty.simd_size_and_type(bx.tcx());
require!(
in_len == out_len,
- InvalidMonomorphizationReturnLengthInputType {
+ InvalidMonomorphization::ReturnLengthInputType {
span,
name,
in_len,
@@ -162,7 +145,7 @@ pub fn generic_simd_intrinsic<'a, 'gcc, 'tcx>(
);
require!(
bx.type_kind(bx.element_type(llret_ty)) == TypeKind::Integer,
- InvalidMonomorphizationReturnIntegerType { span, name, ret_ty, out_ty }
+ InvalidMonomorphization::ReturnIntegerType { span, name, ret_ty, out_ty }
);
let arg1 = args[0].immediate();
@@ -170,48 +153,34 @@ pub fn generic_simd_intrinsic<'a, 'gcc, 'tcx>(
// compare them as equal, so bitcast.
// FIXME(antoyo): allow comparing vector types as equal in libgccjit.
let arg2 = bx.context.new_bitcast(None, args[1].immediate(), arg1.get_type());
- return Ok(compare_simd_types(
- bx,
- arg1,
- arg2,
- in_elem,
- llret_ty,
- cmp_op,
- ));
+ return Ok(compare_simd_types(bx, arg1, arg2, in_elem, llret_ty, cmp_op));
}
- if let Some(stripped) = name.as_str().strip_prefix("simd_shuffle") {
- let n: u64 = if stripped.is_empty() {
- // Make sure this is actually an array, since typeck only checks the length-suffixed
- // version of this intrinsic.
- match args[2].layout.ty.kind() {
- ty::Array(ty, len) if matches!(ty.kind(), ty::Uint(ty::UintTy::U32)) => {
- len.try_eval_target_usize(bx.cx.tcx, ty::ParamEnv::reveal_all()).unwrap_or_else(
- || span_bug!(span, "could not evaluate shuffle index array length"),
- )
- }
- _ => return_error!(InvalidMonomorphizationSimdShuffle {
- span,
- name,
- ty: args[2].layout.ty
- }),
+ if name == sym::simd_shuffle {
+ // Make sure this is actually an array, since typeck only checks the length-suffixed
+ // version of this intrinsic.
+ let n: u64 = match args[2].layout.ty.kind() {
+ ty::Array(ty, len) if matches!(ty.kind(), ty::Uint(ty::UintTy::U32)) => {
+ len.try_eval_target_usize(bx.cx.tcx, ty::ParamEnv::reveal_all()).unwrap_or_else(
+ || span_bug!(span, "could not evaluate shuffle index array length"),
+ )
}
- } else {
- stripped.parse().unwrap_or_else(|_| {
- span_bug!(span, "bad `simd_shuffle` instruction only caught in codegen?")
- })
+ _ => return_error!(InvalidMonomorphization::SimdShuffle {
+ span,
+ name,
+ ty: args[2].layout.ty
+ }),
};
-
- require_simd!(ret_ty, "return");
+ require_simd!(ret_ty, InvalidMonomorphization::SimdReturn { span, name, ty: ret_ty });
let (out_len, out_ty) = ret_ty.simd_size_and_type(bx.tcx());
require!(
out_len == n,
- InvalidMonomorphizationReturnLength { span, name, in_len: n, ret_ty, out_len }
+ InvalidMonomorphization::ReturnLength { span, name, in_len: n, ret_ty, out_len }
);
require!(
in_elem == out_ty,
- InvalidMonomorphizationReturnElement { span, name, in_elem, in_ty, ret_ty, out_ty }
+ InvalidMonomorphization::ReturnElement { span, name, in_elem, in_ty, ret_ty, out_ty }
);
let vector = args[2].immediate();
@@ -223,7 +192,13 @@ pub fn generic_simd_intrinsic<'a, 'gcc, 'tcx>(
if name == sym::simd_insert {
require!(
in_elem == arg_tys[2],
- InvalidMonomorphizationInsertedType { span, name, in_elem, in_ty, out_ty: arg_tys[2] }
+ InvalidMonomorphization::InsertedType {
+ span,
+ name,
+ in_elem,
+ in_ty,
+ out_ty: arg_tys[2]
+ }
);
let vector = args[0].immediate();
let index = args[1].immediate();
@@ -240,7 +215,7 @@ pub fn generic_simd_intrinsic<'a, 'gcc, 'tcx>(
if name == sym::simd_extract {
require!(
ret_ty == in_elem,
- InvalidMonomorphizationReturnType { span, name, in_elem, in_ty, ret_ty }
+ InvalidMonomorphization::ReturnType { span, name, in_elem, in_ty, ret_ty }
);
let vector = args[0].immediate();
return Ok(bx.context.new_vector_access(None, vector, args[1].immediate()).to_rvalue());
@@ -249,26 +224,29 @@ pub fn generic_simd_intrinsic<'a, 'gcc, 'tcx>(
if name == sym::simd_select {
let m_elem_ty = in_elem;
let m_len = in_len;
- require_simd!(arg_tys[1], "argument");
+ require_simd!(
+ arg_tys[1],
+ InvalidMonomorphization::SimdArgument { span, name, ty: arg_tys[1] }
+ );
let (v_len, _) = arg_tys[1].simd_size_and_type(bx.tcx());
require!(
m_len == v_len,
- InvalidMonomorphizationMismatchedLengths { span, name, m_len, v_len }
+ InvalidMonomorphization::MismatchedLengths { span, name, m_len, v_len }
);
match m_elem_ty.kind() {
ty::Int(_) => {}
- _ => return_error!(InvalidMonomorphizationMaskType { span, name, ty: m_elem_ty }),
+ _ => return_error!(InvalidMonomorphization::MaskType { span, name, ty: m_elem_ty }),
}
return Ok(bx.vector_select(args[0].immediate(), args[1].immediate(), args[2].immediate()));
}
- #[cfg(feature="master")]
+ #[cfg(feature = "master")]
if name == sym::simd_cast || name == sym::simd_as {
- require_simd!(ret_ty, "return");
+ require_simd!(ret_ty, InvalidMonomorphization::SimdReturn { span, name, ty: ret_ty });
let (out_len, out_elem) = ret_ty.simd_size_and_type(bx.tcx());
require!(
in_len == out_len,
- InvalidMonomorphizationReturnLengthInputType {
+ InvalidMonomorphization::ReturnLengthInputType {
span,
name,
in_len,
@@ -288,19 +266,17 @@ pub fn generic_simd_intrinsic<'a, 'gcc, 'tcx>(
Unsupported,
}
- let in_style =
- match in_elem.kind() {
- ty::Int(_) | ty::Uint(_) => Style::Int,
- ty::Float(_) => Style::Float,
- _ => Style::Unsupported,
- };
+ let in_style = match in_elem.kind() {
+ ty::Int(_) | ty::Uint(_) => Style::Int,
+ ty::Float(_) => Style::Float,
+ _ => Style::Unsupported,
+ };
- let out_style =
- match out_elem.kind() {
- ty::Int(_) | ty::Uint(_) => Style::Int,
- ty::Float(_) => Style::Float,
- _ => Style::Unsupported,
- };
+ let out_style = match out_elem.kind() {
+ ty::Int(_) | ty::Uint(_) => Style::Int,
+ ty::Float(_) => Style::Float,
+ _ => Style::Unsupported,
+ };
match (in_style, out_style) {
(Style::Unsupported, Style::Unsupported) => {
@@ -315,7 +291,7 @@ pub fn generic_simd_intrinsic<'a, 'gcc, 'tcx>(
out_elem
}
);
- },
+ }
_ => return Ok(bx.context.convert_vector(None, args[0].immediate(), llret_ty)),
}
}
@@ -329,7 +305,7 @@ pub fn generic_simd_intrinsic<'a, 'gcc, 'tcx>(
})*
_ => {},
}
- return_error!(InvalidMonomorphizationUnsupportedOperation { span, name, in_ty, in_elem })
+ return_error!(InvalidMonomorphization::UnsupportedOperation { span, name, in_ty, in_elem })
})*
}
}
@@ -363,10 +339,13 @@ pub fn generic_simd_intrinsic<'a, 'gcc, 'tcx>(
let mut shift = 0;
for i in 0..in_len {
- let elem = bx.extract_element(vector, bx.context.new_rvalue_from_int(bx.int_type, i as i32));
+ let elem =
+ bx.extract_element(vector, bx.context.new_rvalue_from_int(bx.int_type, i as i32));
let shifted = elem >> sign_shift;
let masked = shifted & one;
- result = result | (bx.context.new_cast(None, masked, result_type) << bx.context.new_rvalue_from_int(result_type, shift));
+ result = result
+ | (bx.context.new_cast(None, masked, result_type)
+ << bx.context.new_rvalue_from_int(result_type, shift));
shift += 1;
}
@@ -415,46 +394,50 @@ pub fn generic_simd_intrinsic<'a, 'gcc, 'tcx>(
return Err(());
}};
}
- let (elem_ty_str, elem_ty) =
- if let ty::Float(f) = in_elem.kind() {
- let elem_ty = bx.cx.type_float_from_ty(*f);
- match f.bit_width() {
- 32 => ("f", elem_ty),
- 64 => ("", elem_ty),
- _ => {
- return_error!(InvalidMonomorphizationInvalidFloatVector { span, name, elem_ty: f.name_str(), vec_ty: in_ty });
- }
+ let (elem_ty_str, elem_ty) = if let ty::Float(f) = in_elem.kind() {
+ let elem_ty = bx.cx.type_float_from_ty(*f);
+ match f.bit_width() {
+ 32 => ("f", elem_ty),
+ 64 => ("", elem_ty),
+ _ => {
+ return_error!(InvalidMonomorphization::FloatingPointVector {
+ span,
+ name,
+ f_ty: *f,
+ in_ty
+ });
}
}
- else {
- return_error!(InvalidMonomorphizationNotFloat { span, name, ty: in_ty });
- };
+ } else {
+ return_error!(InvalidMonomorphization::FloatingPointType { span, name, in_ty });
+ };
let vec_ty = bx.cx.type_vector(elem_ty, in_len);
- let intr_name =
- match name {
- sym::simd_ceil => "ceil",
- sym::simd_fabs => "fabs", // TODO(antoyo): pand with 170141183420855150465331762880109871103
- sym::simd_fcos => "cos",
- sym::simd_fexp2 => "exp2",
- sym::simd_fexp => "exp",
- sym::simd_flog10 => "log10",
- sym::simd_flog2 => "log2",
- sym::simd_flog => "log",
- sym::simd_floor => "floor",
- sym::simd_fma => "fma",
- sym::simd_fpowi => "__builtin_powi",
- sym::simd_fpow => "pow",
- sym::simd_fsin => "sin",
- sym::simd_fsqrt => "sqrt",
- sym::simd_round => "round",
- sym::simd_trunc => "trunc",
- _ => return_error!(InvalidMonomorphizationUnrecognized { span, name })
- };
+ let intr_name = match name {
+ sym::simd_ceil => "ceil",
+ sym::simd_fabs => "fabs", // TODO(antoyo): pand with 170141183420855150465331762880109871103
+ sym::simd_fcos => "cos",
+ sym::simd_fexp2 => "exp2",
+ sym::simd_fexp => "exp",
+ sym::simd_flog10 => "log10",
+ sym::simd_flog2 => "log2",
+ sym::simd_flog => "log",
+ sym::simd_floor => "floor",
+ sym::simd_fma => "fma",
+ sym::simd_fpowi => "__builtin_powi",
+ sym::simd_fpow => "pow",
+ sym::simd_fsin => "sin",
+ sym::simd_fsqrt => "sqrt",
+ sym::simd_round => "round",
+ sym::simd_trunc => "trunc",
+ _ => return_error!(InvalidMonomorphization::UnrecognizedIntrinsic { span, name }),
+ };
let builtin_name = format!("{}{}", intr_name, elem_ty_str);
let funcs = bx.cx.functions.borrow();
- let function = funcs.get(&builtin_name).unwrap_or_else(|| panic!("unable to find builtin function {}", builtin_name));
+ let function = funcs
+ .get(&builtin_name)
+ .unwrap_or_else(|| panic!("unable to find builtin function {}", builtin_name));
// TODO(antoyo): add platform-specific behavior here for architectures that have these
// intrinsics as instructions (for instance, gpus)
@@ -500,8 +483,12 @@ pub fn generic_simd_intrinsic<'a, 'gcc, 'tcx>(
return simd_simple_float_intrinsic(name, in_elem, in_ty, in_len, bx, span, args);
}
- #[cfg(feature="master")]
- fn vector_ty<'gcc, 'tcx>(cx: &CodegenCx<'gcc, 'tcx>, elem_ty: Ty<'tcx>, vec_len: u64) -> Type<'gcc> {
+ #[cfg(feature = "master")]
+ fn vector_ty<'gcc, 'tcx>(
+ cx: &CodegenCx<'gcc, 'tcx>,
+ elem_ty: Ty<'tcx>,
+ vec_len: u64,
+ ) -> Type<'gcc> {
// FIXME: use cx.layout_of(ty).llvm_type() ?
let elem_ty = match *elem_ty.kind() {
ty::Int(v) => cx.type_int_from_ty(v),
@@ -512,15 +499,22 @@ pub fn generic_simd_intrinsic<'a, 'gcc, 'tcx>(
cx.type_vector(elem_ty, vec_len)
}
- #[cfg(feature="master")]
- fn gather<'a, 'gcc, 'tcx>(default: RValue<'gcc>, pointers: RValue<'gcc>, mask: RValue<'gcc>, pointer_count: usize, bx: &mut Builder<'a, 'gcc, 'tcx>, in_len: u64, underlying_ty: Ty<'tcx>, invert: bool) -> RValue<'gcc> {
- let vector_type =
- if pointer_count > 1 {
- bx.context.new_vector_type(bx.usize_type, in_len)
- }
- else {
- vector_ty(bx, underlying_ty, in_len)
- };
+ #[cfg(feature = "master")]
+ fn gather<'a, 'gcc, 'tcx>(
+ default: RValue<'gcc>,
+ pointers: RValue<'gcc>,
+ mask: RValue<'gcc>,
+ pointer_count: usize,
+ bx: &mut Builder<'a, 'gcc, 'tcx>,
+ in_len: u64,
+ underlying_ty: Ty<'tcx>,
+ invert: bool,
+ ) -> RValue<'gcc> {
+ let vector_type = if pointer_count > 1 {
+ bx.context.new_vector_type(bx.usize_type, in_len)
+ } else {
+ vector_ty(bx, underlying_ty, in_len)
+ };
let elem_type = vector_type.dyncast_vector().expect("vector type").get_element_type();
let mut values = vec![];
@@ -551,13 +545,12 @@ pub fn generic_simd_intrinsic<'a, 'gcc, 'tcx>(
if invert {
bx.shuffle_vector(vector, default, mask)
- }
- else {
+ } else {
bx.shuffle_vector(default, vector, mask)
}
}
- #[cfg(feature="master")]
+ #[cfg(feature = "master")]
if name == sym::simd_gather {
// simd_gather(values: <N x T>, pointers: <N x *_ T>,
// mask: <N x i{M}>) -> <N x T>
@@ -566,10 +559,16 @@ pub fn generic_simd_intrinsic<'a, 'gcc, 'tcx>(
// * M: any integer width is supported, will be truncated to i1
// All types must be simd vector types
- require_simd!(in_ty, "first");
- require_simd!(arg_tys[1], "second");
- require_simd!(arg_tys[2], "third");
- require_simd!(ret_ty, "return");
+ require_simd!(in_ty, InvalidMonomorphization::SimdFirst { span, name, ty: in_ty });
+ require_simd!(
+ arg_tys[1],
+ InvalidMonomorphization::SimdSecond { span, name, ty: arg_tys[1] }
+ );
+ require_simd!(
+ arg_tys[2],
+ InvalidMonomorphization::SimdThird { span, name, ty: arg_tys[2] }
+ );
+ require_simd!(ret_ty, InvalidMonomorphization::SimdReturn { span, name, ty: ret_ty });
// Of the same length:
let (out_len, _) = arg_tys[1].simd_size_and_type(bx.tcx());
@@ -662,10 +661,19 @@ pub fn generic_simd_intrinsic<'a, 'gcc, 'tcx>(
}
}
- return Ok(gather(args[0].immediate(), args[1].immediate(), args[2].immediate(), pointer_count, bx, in_len, underlying_ty, false));
+ return Ok(gather(
+ args[0].immediate(),
+ args[1].immediate(),
+ args[2].immediate(),
+ pointer_count,
+ bx,
+ in_len,
+ underlying_ty,
+ false,
+ ));
}
- #[cfg(feature="master")]
+ #[cfg(feature = "master")]
if name == sym::simd_scatter {
// simd_scatter(values: <N x T>, pointers: <N x *mut T>,
// mask: <N x i{M}>) -> ()
@@ -674,9 +682,15 @@ pub fn generic_simd_intrinsic<'a, 'gcc, 'tcx>(
// * M: any integer width is supported, will be truncated to i1
// All types must be simd vector types
- require_simd!(in_ty, "first");
- require_simd!(arg_tys[1], "second");
- require_simd!(arg_tys[2], "third");
+ require_simd!(in_ty, InvalidMonomorphization::SimdFirst { span, name, ty: in_ty });
+ require_simd!(
+ arg_tys[1],
+ InvalidMonomorphization::SimdSecond { span, name, ty: arg_tys[1] }
+ );
+ require_simd!(
+ arg_tys[2],
+ InvalidMonomorphization::SimdThird { span, name, ty: arg_tys[2] }
+ );
// Of the same length:
let (element_len1, _) = arg_tys[1].simd_size_and_type(bx.tcx());
@@ -765,17 +779,24 @@ pub fn generic_simd_intrinsic<'a, 'gcc, 'tcx>(
}
}
- let result = gather(args[0].immediate(), args[1].immediate(), args[2].immediate(), pointer_count, bx, in_len, underlying_ty, true);
+ let result = gather(
+ args[0].immediate(),
+ args[1].immediate(),
+ args[2].immediate(),
+ pointer_count,
+ bx,
+ in_len,
+ underlying_ty,
+ true,
+ );
let pointers = args[1].immediate();
- let vector_type =
- if pointer_count > 1 {
- bx.context.new_vector_type(bx.usize_type, in_len)
- }
- else {
- vector_ty(bx, underlying_ty, in_len)
- };
+ let vector_type = if pointer_count > 1 {
+ bx.context.new_vector_type(bx.usize_type, in_len)
+ } else {
+ vector_ty(bx, underlying_ty, in_len)
+ };
let elem_type = vector_type.dyncast_vector().expect("vector type").get_element_type();
for i in 0..in_len {
@@ -815,7 +836,7 @@ pub fn generic_simd_intrinsic<'a, 'gcc, 'tcx>(
})*
_ => {},
}
- return_error!(InvalidMonomorphizationUnsupportedOperation { span, name, in_ty, in_elem })
+ return_error!(InvalidMonomorphization::UnsupportedOperation { span, name, in_ty, in_elem })
})*
}
}
@@ -830,91 +851,97 @@ pub fn generic_simd_intrinsic<'a, 'gcc, 'tcx>(
let rhs = args[1].immediate();
let is_add = name == sym::simd_saturating_add;
let ptr_bits = bx.tcx().data_layout.pointer_size.bits() as _;
- let (signed, elem_width, elem_ty) =
- match *in_elem.kind() {
- ty::Int(i) => (true, i.bit_width().unwrap_or(ptr_bits) / 8, bx.cx.type_int_from_ty(i)),
- ty::Uint(i) => (false, i.bit_width().unwrap_or(ptr_bits) / 8, bx.cx.type_uint_from_ty(i)),
- _ => {
- return_error!(InvalidMonomorphizationExpectedSignedUnsigned {
+ let (signed, elem_width, elem_ty) = match *in_elem.kind() {
+ ty::Int(i) => (true, i.bit_width().unwrap_or(ptr_bits) / 8, bx.cx.type_int_from_ty(i)),
+ ty::Uint(i) => {
+ (false, i.bit_width().unwrap_or(ptr_bits) / 8, bx.cx.type_uint_from_ty(i))
+ }
+ _ => {
+ return_error!(InvalidMonomorphization::ExpectedVectorElementType {
span,
name,
- elem_ty: arg_tys[0].simd_size_and_type(bx.tcx()).1,
- vec_ty: arg_tys[0],
+ expected_element: arg_tys[0].simd_size_and_type(bx.tcx()).1,
+ vector_type: arg_tys[0],
});
}
};
- let result =
- match (signed, is_add) {
- (false, true) => {
- let res = lhs + rhs;
- let cmp = bx.context.new_comparison(None, ComparisonOp::LessThan, res, lhs);
- res | cmp
- },
- (true, true) => {
- // Algorithm from: https://codereview.stackexchange.com/questions/115869/saturated-signed-addition
- // TODO(antoyo): improve using conditional operators if possible.
- // TODO(antoyo): dyncast_vector should not require a call to unqualified.
- let arg_type = lhs.get_type().unqualified();
- // TODO(antoyo): convert lhs and rhs to unsigned.
- let sum = lhs + rhs;
- let vector_type = arg_type.dyncast_vector().expect("vector type");
- let unit = vector_type.get_num_units();
- let a = bx.context.new_rvalue_from_int(elem_ty, ((elem_width as i32) << 3) - 1);
- let width = bx.context.new_rvalue_from_vector(None, lhs.get_type(), &vec![a; unit]);
-
- let xor1 = lhs ^ rhs;
- let xor2 = lhs ^ sum;
- let and = bx.context.new_unary_op(None, UnaryOp::BitwiseNegate, arg_type, xor1) & xor2;
- let mask = and >> width;
-
- let one = bx.context.new_rvalue_one(elem_ty);
- let ones = bx.context.new_rvalue_from_vector(None, lhs.get_type(), &vec![one; unit]);
- let shift1 = ones << width;
- let shift2 = sum >> width;
- let mask_min = shift1 ^ shift2;
-
- let and1 = bx.context.new_unary_op(None, UnaryOp::BitwiseNegate, arg_type, mask) & sum;
- let and2 = mask & mask_min;
-
- and1 + and2
- },
- (false, false) => {
- let res = lhs - rhs;
- let cmp = bx.context.new_comparison(None, ComparisonOp::LessThanEquals, res, lhs);
- res & cmp
- },
- (true, false) => {
- // TODO(antoyo): dyncast_vector should not require a call to unqualified.
- let arg_type = lhs.get_type().unqualified();
- // TODO(antoyo): this uses the same algorithm from saturating add, but add the
- // negative of the right operand. Find a proper subtraction algorithm.
- let rhs = bx.context.new_unary_op(None, UnaryOp::Minus, arg_type, rhs);
-
- // TODO(antoyo): convert lhs and rhs to unsigned.
- let sum = lhs + rhs;
- let vector_type = arg_type.dyncast_vector().expect("vector type");
- let unit = vector_type.get_num_units();
- let a = bx.context.new_rvalue_from_int(elem_ty, ((elem_width as i32) << 3) - 1);
- let width = bx.context.new_rvalue_from_vector(None, lhs.get_type(), &vec![a; unit]);
-
- let xor1 = lhs ^ rhs;
- let xor2 = lhs ^ sum;
- let and = bx.context.new_unary_op(None, UnaryOp::BitwiseNegate, arg_type, xor1) & xor2;
- let mask = and >> width;
-
- let one = bx.context.new_rvalue_one(elem_ty);
- let ones = bx.context.new_rvalue_from_vector(None, lhs.get_type(), &vec![one; unit]);
- let shift1 = ones << width;
- let shift2 = sum >> width;
- let mask_min = shift1 ^ shift2;
-
- let and1 = bx.context.new_unary_op(None, UnaryOp::BitwiseNegate, arg_type, mask) & sum;
- let and2 = mask & mask_min;
-
- and1 + and2
- }
- };
+ let result = match (signed, is_add) {
+ (false, true) => {
+ let res = lhs + rhs;
+ let cmp = bx.context.new_comparison(None, ComparisonOp::LessThan, res, lhs);
+ res | cmp
+ }
+ (true, true) => {
+ // Algorithm from: https://codereview.stackexchange.com/questions/115869/saturated-signed-addition
+ // TODO(antoyo): improve using conditional operators if possible.
+ // TODO(antoyo): dyncast_vector should not require a call to unqualified.
+ let arg_type = lhs.get_type().unqualified();
+ // TODO(antoyo): convert lhs and rhs to unsigned.
+ let sum = lhs + rhs;
+ let vector_type = arg_type.dyncast_vector().expect("vector type");
+ let unit = vector_type.get_num_units();
+ let a = bx.context.new_rvalue_from_int(elem_ty, ((elem_width as i32) << 3) - 1);
+ let width = bx.context.new_rvalue_from_vector(None, lhs.get_type(), &vec![a; unit]);
+
+ let xor1 = lhs ^ rhs;
+ let xor2 = lhs ^ sum;
+ let and =
+ bx.context.new_unary_op(None, UnaryOp::BitwiseNegate, arg_type, xor1) & xor2;
+ let mask = and >> width;
+
+ let one = bx.context.new_rvalue_one(elem_ty);
+ let ones =
+ bx.context.new_rvalue_from_vector(None, lhs.get_type(), &vec![one; unit]);
+ let shift1 = ones << width;
+ let shift2 = sum >> width;
+ let mask_min = shift1 ^ shift2;
+
+ let and1 =
+ bx.context.new_unary_op(None, UnaryOp::BitwiseNegate, arg_type, mask) & sum;
+ let and2 = mask & mask_min;
+
+ and1 + and2
+ }
+ (false, false) => {
+ let res = lhs - rhs;
+ let cmp = bx.context.new_comparison(None, ComparisonOp::LessThanEquals, res, lhs);
+ res & cmp
+ }
+ (true, false) => {
+ // TODO(antoyo): dyncast_vector should not require a call to unqualified.
+ let arg_type = lhs.get_type().unqualified();
+ // TODO(antoyo): this uses the same algorithm from saturating add, but add the
+ // negative of the right operand. Find a proper subtraction algorithm.
+ let rhs = bx.context.new_unary_op(None, UnaryOp::Minus, arg_type, rhs);
+
+ // TODO(antoyo): convert lhs and rhs to unsigned.
+ let sum = lhs + rhs;
+ let vector_type = arg_type.dyncast_vector().expect("vector type");
+ let unit = vector_type.get_num_units();
+ let a = bx.context.new_rvalue_from_int(elem_ty, ((elem_width as i32) << 3) - 1);
+ let width = bx.context.new_rvalue_from_vector(None, lhs.get_type(), &vec![a; unit]);
+
+ let xor1 = lhs ^ rhs;
+ let xor2 = lhs ^ sum;
+ let and =
+ bx.context.new_unary_op(None, UnaryOp::BitwiseNegate, arg_type, xor1) & xor2;
+ let mask = and >> width;
+
+ let one = bx.context.new_rvalue_one(elem_ty);
+ let ones =
+ bx.context.new_rvalue_from_vector(None, lhs.get_type(), &vec![one; unit]);
+ let shift1 = ones << width;
+ let shift2 = sum >> width;
+ let mask_min = shift1 ^ shift2;
+
+ let and1 =
+ bx.context.new_unary_op(None, UnaryOp::BitwiseNegate, arg_type, mask) & sum;
+ let and2 = mask & mask_min;
+
+ and1 + and2
+ }
+ };
return Ok(result);
}
@@ -925,7 +952,7 @@ pub fn generic_simd_intrinsic<'a, 'gcc, 'tcx>(
if name == sym::$name {
require!(
ret_ty == in_elem,
- InvalidMonomorphizationReturnType { span, name, in_elem, in_ty, ret_ty }
+ InvalidMonomorphization::ReturnType { span, name, in_elem, in_ty, ret_ty }
);
return match in_elem.kind() {
ty::Int(_) | ty::Uint(_) => {
@@ -947,11 +974,12 @@ pub fn generic_simd_intrinsic<'a, 'gcc, 'tcx>(
Ok(bx.vector_reduce_op(args[0].immediate(), $vec_op))
}
}
- _ => return_error!(InvalidMonomorphizationUnsupportedElement {
+ _ => return_error!(InvalidMonomorphization::UnsupportedSymbol {
span,
name,
+ symbol: sym::$name,
in_ty,
- elem_ty: in_elem,
+ in_elem,
ret_ty
}),
};
@@ -988,18 +1016,24 @@ pub fn generic_simd_intrinsic<'a, 'gcc, 'tcx>(
1.0
);
-
macro_rules! minmax_red {
($name:ident: $int_red:ident, $float_red:ident) => {
if name == sym::$name {
require!(
ret_ty == in_elem,
- InvalidMonomorphizationReturnType { span, name, in_elem, in_ty, ret_ty }
+ InvalidMonomorphization::ReturnType { span, name, in_elem, in_ty, ret_ty }
);
return match in_elem.kind() {
ty::Int(_) | ty::Uint(_) => Ok(bx.$int_red(args[0].immediate())),
ty::Float(_) => Ok(bx.$float_red(args[0].immediate())),
- _ => return_error!(InvalidMonomorphizationUnsupportedElement { span, name, in_ty, elem_ty: in_elem, ret_ty }),
+ _ => return_error!(InvalidMonomorphization::UnsupportedSymbol {
+ span,
+ name,
+ symbol: sym::$name,
+ in_ty,
+ in_elem,
+ ret_ty
+ }),
};
}
};
@@ -1017,17 +1051,18 @@ pub fn generic_simd_intrinsic<'a, 'gcc, 'tcx>(
let input = if !$boolean {
require!(
ret_ty == in_elem,
- InvalidMonomorphizationReturnType { span, name, in_elem, in_ty, ret_ty }
+ InvalidMonomorphization::ReturnType { span, name, in_elem, in_ty, ret_ty }
);
args[0].immediate()
} else {
match in_elem.kind() {
ty::Int(_) | ty::Uint(_) => {}
- _ => return_error!(InvalidMonomorphizationUnsupportedElement {
+ _ => return_error!(InvalidMonomorphization::UnsupportedSymbol {
span,
name,
+ symbol: sym::$name,
in_ty,
- elem_ty: in_elem,
+ in_elem,
ret_ty
}),
}
@@ -1037,13 +1072,22 @@ pub fn generic_simd_intrinsic<'a, 'gcc, 'tcx>(
return match in_elem.kind() {
ty::Int(_) | ty::Uint(_) => {
let r = bx.vector_reduce_op(input, $op);
- Ok(if !$boolean { r } else { bx.icmp(IntPredicate::IntNE, r, bx.context.new_rvalue_zero(r.get_type())) })
+ Ok(if !$boolean {
+ r
+ } else {
+ bx.icmp(
+ IntPredicate::IntNE,
+ r,
+ bx.context.new_rvalue_zero(r.get_type()),
+ )
+ })
}
- _ => return_error!(InvalidMonomorphizationUnsupportedElement {
+ _ => return_error!(InvalidMonomorphization::UnsupportedSymbol {
span,
name,
+ symbol: sym::$name,
in_ty,
- elem_ty: in_elem,
+ in_elem,
ret_ty
}),
};
diff --git a/compiler/rustc_codegen_gcc/src/lib.rs b/compiler/rustc_codegen_gcc/src/lib.rs
index 2a6b64278..697ae015f 100644
--- a/compiler/rustc_codegen_gcc/src/lib.rs
+++ b/compiler/rustc_codegen_gcc/src/lib.rs
@@ -71,7 +71,7 @@ use gccjit::{Context, OptimizationLevel, CType};
use rustc_ast::expand::allocator::AllocatorKind;
use rustc_codegen_ssa::{CodegenResults, CompiledModule, ModuleCodegen};
use rustc_codegen_ssa::base::codegen_crate;
-use rustc_codegen_ssa::back::write::{CodegenContext, FatLTOInput, ModuleConfig, TargetMachineFactoryFn};
+use rustc_codegen_ssa::back::write::{CodegenContext, FatLtoInput, ModuleConfig, TargetMachineFactoryFn};
use rustc_codegen_ssa::back::lto::{LtoModuleCodegen, SerializedModule, ThinModule};
use rustc_codegen_ssa::target_features::supported_target_features;
use rustc_codegen_ssa::traits::{CodegenBackend, ExtraBackendMethods, ModuleBufferMethods, ThinBufferMethods, WriteBackendMethods};
@@ -217,14 +217,14 @@ impl WriteBackendMethods for GccCodegenBackend {
type ThinData = ();
type ThinBuffer = ThinBuffer;
- fn run_fat_lto(_cgcx: &CodegenContext<Self>, mut modules: Vec<FatLTOInput<Self>>, _cached_modules: Vec<(SerializedModule<Self::ModuleBuffer>, WorkProduct)>) -> Result<LtoModuleCodegen<Self>, FatalError> {
+ fn run_fat_lto(_cgcx: &CodegenContext<Self>, mut modules: Vec<FatLtoInput<Self>>, _cached_modules: Vec<(SerializedModule<Self::ModuleBuffer>, WorkProduct)>) -> Result<LtoModuleCodegen<Self>, FatalError> {
// TODO(antoyo): implement LTO by sending -flto to libgccjit and adding the appropriate gcc linker plugins.
// NOTE: implemented elsewhere.
// TODO(antoyo): what is implemented elsewhere ^ ?
let module =
match modules.remove(0) {
- FatLTOInput::InMemory(module) => module,
- FatLTOInput::Serialized { .. } => {
+ FatLtoInput::InMemory(module) => module,
+ FatLtoInput::Serialized { .. } => {
unimplemented!();
}
};
@@ -239,6 +239,10 @@ impl WriteBackendMethods for GccCodegenBackend {
unimplemented!();
}
+ fn print_statistics(&self) {
+ unimplemented!()
+ }
+
unsafe fn optimize(_cgcx: &CodegenContext<Self>, _diag_handler: &Handler, module: &ModuleCodegen<Self::Module>, config: &ModuleConfig) -> Result<(), FatalError> {
module.module_llvm.context.set_optimization_level(to_gcc_opt_level(config.opt_level));
Ok(())
diff --git a/compiler/rustc_codegen_gcc/src/mono_item.rs b/compiler/rustc_codegen_gcc/src/mono_item.rs
index 342b830ce..3322d5651 100644
--- a/compiler/rustc_codegen_gcc/src/mono_item.rs
+++ b/compiler/rustc_codegen_gcc/src/mono_item.rs
@@ -31,7 +31,7 @@ impl<'gcc, 'tcx> PreDefineMethods<'tcx> for CodegenCx<'gcc, 'tcx> {
#[cfg_attr(not(feature="master"), allow(unused_variables))]
fn predefine_fn(&self, instance: Instance<'tcx>, linkage: Linkage, visibility: Visibility, symbol_name: &str) {
- assert!(!instance.substs.has_infer());
+ assert!(!instance.args.has_infer());
let fn_abi = self.fn_abi_of_instance(instance, ty::List::empty());
self.linkage.set(base::linkage_to_gcc(linkage));
diff --git a/compiler/rustc_codegen_gcc/src/type_.rs b/compiler/rustc_codegen_gcc/src/type_.rs
index 521b64ad3..318997405 100644
--- a/compiler/rustc_codegen_gcc/src/type_.rs
+++ b/compiler/rustc_codegen_gcc/src/type_.rs
@@ -54,6 +54,23 @@ impl<'gcc, 'tcx> CodegenCx<'gcc, 'tcx> {
self.u128_type
}
+ pub fn type_ptr_to(&self, ty: Type<'gcc>) -> Type<'gcc> {
+ ty.make_pointer()
+ }
+
+ pub fn type_ptr_to_ext(&self, ty: Type<'gcc>, _address_space: AddressSpace) -> Type<'gcc> {
+ // TODO(antoyo): use address_space, perhaps with TYPE_ADDR_SPACE?
+ ty.make_pointer()
+ }
+
+ pub fn type_i8p(&self) -> Type<'gcc> {
+ self.type_ptr_to(self.type_i8())
+ }
+
+ pub fn type_i8p_ext(&self, address_space: AddressSpace) -> Type<'gcc> {
+ self.type_ptr_to_ext(self.type_i8(), address_space)
+ }
+
pub fn type_pointee_for_align(&self, align: Align) -> Type<'gcc> {
// FIXME(eddyb) We could find a better approximation if ity.align < align.
let ity = Integer::approximate_align(self, align);
@@ -149,13 +166,12 @@ impl<'gcc, 'tcx> BaseTypeMethods<'tcx> for CodegenCx<'gcc, 'tcx> {
}
}
- fn type_ptr_to(&self, ty: Type<'gcc>) -> Type<'gcc> {
- ty.make_pointer()
+ fn type_ptr(&self) -> Type<'gcc> {
+ self.type_ptr_to(self.type_void())
}
- fn type_ptr_to_ext(&self, ty: Type<'gcc>, _address_space: AddressSpace) -> Type<'gcc> {
- // TODO(antoyo): use address_space, perhaps with TYPE_ADDR_SPACE?
- ty.make_pointer()
+ fn type_ptr_ext(&self, address_space: AddressSpace) -> Type<'gcc> {
+ self.type_ptr_to_ext(self.type_void(), address_space)
}
fn element_type(&self, ty: Type<'gcc>) -> Type<'gcc> {
diff --git a/compiler/rustc_codegen_gcc/src/type_of.rs b/compiler/rustc_codegen_gcc/src/type_of.rs
index e0823888f..84d578385 100644
--- a/compiler/rustc_codegen_gcc/src/type_of.rs
+++ b/compiler/rustc_codegen_gcc/src/type_of.rs
@@ -101,7 +101,7 @@ fn uncached_gcc_type<'gcc, 'tcx>(cx: &CodegenCx<'gcc, 'tcx>, layout: TyAndLayout
if let (&ty::Generator(_, _, _), &Variants::Single { index }) =
(layout.ty.kind(), &layout.variants)
{
- write!(&mut name, "::{}", ty::GeneratorSubsts::variant_name(index)).unwrap();
+ write!(&mut name, "::{}", ty::GeneratorArgs::variant_name(index)).unwrap();
}
Some(name)
}
@@ -282,7 +282,7 @@ impl<'tcx> LayoutGccExt<'tcx> for TyAndLayout<'tcx> {
}
// only wide pointer boxes are handled as pointers
// thin pointer boxes with scalar allocators are handled by the general logic below
- ty::Adt(def, substs) if def.is_box() && cx.layout_of(substs.type_at(1)).is_zst() => {
+ ty::Adt(def, args) if def.is_box() && cx.layout_of(args.type_at(1)).is_zst() => {
let ptr_ty = Ty::new_mut_ptr(cx.tcx,self.ty.boxed_ty());
return cx.layout_of(ptr_ty).scalar_pair_element_gcc_type(cx, index, immediate);
}
diff --git a/compiler/rustc_codegen_llvm/Cargo.toml b/compiler/rustc_codegen_llvm/Cargo.toml
index ad51f2d09..be09820d0 100644
--- a/compiler/rustc_codegen_llvm/Cargo.toml
+++ b/compiler/rustc_codegen_llvm/Cargo.toml
@@ -11,7 +11,7 @@ bitflags = "1.0"
cstr = "0.2"
libc = "0.2"
measureme = "10.0.0"
-object = { version = "0.31.1", default-features = false, features = [
+object = { version = "0.32.0", default-features = false, features = [
"std",
"read",
] }
diff --git a/compiler/rustc_codegen_llvm/messages.ftl b/compiler/rustc_codegen_llvm/messages.ftl
index de1622951..aed4a8f3c 100644
--- a/compiler/rustc_codegen_llvm/messages.ftl
+++ b/compiler/rustc_codegen_llvm/messages.ftl
@@ -1,7 +1,8 @@
codegen_llvm_copy_bitcode = failed to copy bitcode to object file: {$err}
codegen_llvm_dlltool_fail_import_library =
- Dlltool could not create import library: {$stdout}
+ Dlltool could not create import library with {$dlltool_path} {$dlltool_args}:
+ {$stdout}
{$stderr}
codegen_llvm_dynamic_linking_with_lto =
diff --git a/compiler/rustc_codegen_llvm/src/abi.rs b/compiler/rustc_codegen_llvm/src/abi.rs
index d221bad28..c6a7dc95d 100644
--- a/compiler/rustc_codegen_llvm/src/abi.rs
+++ b/compiler/rustc_codegen_llvm/src/abi.rs
@@ -216,9 +216,7 @@ impl<'ll, 'tcx> ArgAbiExt<'ll, 'tcx> for ArgAbi<'tcx, Ty<'tcx>> {
// uses it for i16 -> {i8, i8}, but not for i24 -> {i8, i8, i8}.
let can_store_through_cast_ptr = false;
if can_store_through_cast_ptr {
- let cast_ptr_llty = bx.type_ptr_to(cast.llvm_type(bx));
- let cast_dst = bx.pointercast(dst.llval, cast_ptr_llty);
- bx.store(val, cast_dst, self.layout.align.abi);
+ bx.store(val, dst.llval, self.layout.align.abi);
} else {
// The actual return type is a struct, but the ABI
// adaptation code has cast it into some scalar type. The
@@ -336,7 +334,7 @@ impl<'ll, 'tcx> FnAbiLlvmExt<'ll, 'tcx> for FnAbi<'tcx, Ty<'tcx>> {
PassMode::Direct(_) | PassMode::Pair(..) => self.ret.layout.immediate_llvm_type(cx),
PassMode::Cast(cast, _) => cast.llvm_type(cx),
PassMode::Indirect { .. } => {
- llargument_tys.push(cx.type_ptr_to(self.ret.memory_ty(cx)));
+ llargument_tys.push(cx.type_ptr());
cx.type_void()
}
};
@@ -364,9 +362,7 @@ impl<'ll, 'tcx> FnAbiLlvmExt<'ll, 'tcx> for FnAbi<'tcx, Ty<'tcx>> {
}
cast.llvm_type(cx)
}
- PassMode::Indirect { attrs: _, extra_attrs: None, on_stack: _ } => {
- cx.type_ptr_to(arg.memory_ty(cx))
- }
+ PassMode::Indirect { attrs: _, extra_attrs: None, on_stack: _ } => cx.type_ptr(),
};
llargument_tys.push(llarg_ty);
}
@@ -379,12 +375,7 @@ impl<'ll, 'tcx> FnAbiLlvmExt<'ll, 'tcx> for FnAbi<'tcx, Ty<'tcx>> {
}
fn ptr_to_llvm_type(&self, cx: &CodegenCx<'ll, 'tcx>) -> &'ll Type {
- unsafe {
- llvm::LLVMPointerType(
- self.llvm_type(cx),
- cx.data_layout().instruction_address_space.0 as c_uint,
- )
- }
+ cx.type_ptr_ext(cx.data_layout().instruction_address_space)
}
fn llvm_cconv(&self) -> llvm::CallConv {
@@ -392,13 +383,16 @@ impl<'ll, 'tcx> FnAbiLlvmExt<'ll, 'tcx> for FnAbi<'tcx, Ty<'tcx>> {
}
fn apply_attrs_llfn(&self, cx: &CodegenCx<'ll, 'tcx>, llfn: &'ll Value) {
- let mut func_attrs = SmallVec::<[_; 2]>::new();
+ let mut func_attrs = SmallVec::<[_; 3]>::new();
if self.ret.layout.abi.is_uninhabited() {
func_attrs.push(llvm::AttributeKind::NoReturn.create_attr(cx.llcx));
}
if !self.can_unwind {
func_attrs.push(llvm::AttributeKind::NoUnwind.create_attr(cx.llcx));
}
+ if let Conv::RiscvInterrupt { kind } = self.conv {
+ func_attrs.push(llvm::CreateAttrStringValue(cx.llcx, "interrupt", kind.as_str()));
+ }
attributes::apply_to_llfn(llfn, llvm::AttributePlace::Function, &{ func_attrs });
let mut i = 0;
@@ -574,7 +568,9 @@ impl<'tcx> AbiBuilderMethods<'tcx> for Builder<'_, '_, 'tcx> {
impl From<Conv> for llvm::CallConv {
fn from(conv: Conv) -> Self {
match conv {
- Conv::C | Conv::Rust | Conv::CCmseNonSecureCall => llvm::CCallConv,
+ Conv::C | Conv::Rust | Conv::CCmseNonSecureCall | Conv::RiscvInterrupt { .. } => {
+ llvm::CCallConv
+ }
Conv::RustCold => llvm::ColdCallConv,
Conv::AmdGpuKernel => llvm::AmdGpuKernel,
Conv::AvrInterrupt => llvm::AvrInterrupt,
diff --git a/compiler/rustc_codegen_llvm/src/allocator.rs b/compiler/rustc_codegen_llvm/src/allocator.rs
index a57508815..db5c1388e 100644
--- a/compiler/rustc_codegen_llvm/src/allocator.rs
+++ b/compiler/rustc_codegen_llvm/src/allocator.rs
@@ -9,7 +9,7 @@ use rustc_middle::ty::TyCtxt;
use rustc_session::config::{DebugInfo, OomStrategy};
use crate::debuginfo;
-use crate::llvm::{self, False, True};
+use crate::llvm::{self, Context, False, Module, True, Type};
use crate::ModuleLlvm;
pub(crate) unsafe fn codegen(
@@ -28,14 +28,13 @@ pub(crate) unsafe fn codegen(
tws => bug!("Unsupported target word size for int: {}", tws),
};
let i8 = llvm::LLVMInt8TypeInContext(llcx);
- let i8p = llvm::LLVMPointerType(i8, 0);
- let void = llvm::LLVMVoidTypeInContext(llcx);
+ let i8p = llvm::LLVMPointerTypeInContext(llcx, 0);
if kind == AllocatorKind::Default {
for method in ALLOCATOR_METHODS {
let mut args = Vec::with_capacity(method.inputs.len());
- for ty in method.inputs.iter() {
- match *ty {
+ for input in method.inputs.iter() {
+ match input.ty {
AllocatorTy::Layout => {
args.push(usize); // size
args.push(usize); // align
@@ -54,102 +53,25 @@ pub(crate) unsafe fn codegen(
panic!("invalid allocator output")
}
};
- let ty = llvm::LLVMFunctionType(
- output.unwrap_or(void),
- args.as_ptr(),
- args.len() as c_uint,
- False,
- );
- let name = global_fn_name(method.name);
- let llfn =
- llvm::LLVMRustGetOrInsertFunction(llmod, name.as_ptr().cast(), name.len(), ty);
-
- if tcx.sess.target.default_hidden_visibility {
- llvm::LLVMRustSetVisibility(llfn, llvm::Visibility::Hidden);
- }
- if tcx.sess.must_emit_unwind_tables() {
- let uwtable = attributes::uwtable_attr(llcx);
- attributes::apply_to_llfn(llfn, llvm::AttributePlace::Function, &[uwtable]);
- }
- let callee = default_fn_name(method.name);
- let callee =
- llvm::LLVMRustGetOrInsertFunction(llmod, callee.as_ptr().cast(), callee.len(), ty);
- llvm::LLVMRustSetVisibility(callee, llvm::Visibility::Hidden);
-
- let llbb = llvm::LLVMAppendBasicBlockInContext(llcx, llfn, "entry\0".as_ptr().cast());
-
- let llbuilder = llvm::LLVMCreateBuilderInContext(llcx);
- llvm::LLVMPositionBuilderAtEnd(llbuilder, llbb);
- let args = args
- .iter()
- .enumerate()
- .map(|(i, _)| llvm::LLVMGetParam(llfn, i as c_uint))
- .collect::<Vec<_>>();
- let ret = llvm::LLVMRustBuildCall(
- llbuilder,
- ty,
- callee,
- args.as_ptr(),
- args.len() as c_uint,
- [].as_ptr(),
- 0 as c_uint,
- );
- llvm::LLVMSetTailCall(ret, True);
- if output.is_some() {
- llvm::LLVMBuildRet(llbuilder, ret);
- } else {
- llvm::LLVMBuildRetVoid(llbuilder);
- }
- llvm::LLVMDisposeBuilder(llbuilder);
+ let from_name = global_fn_name(method.name);
+ let to_name = default_fn_name(method.name);
+
+ create_wrapper_function(tcx, llcx, llmod, &from_name, &to_name, &args, output, false);
}
}
// rust alloc error handler
- let args = [usize, usize]; // size, align
-
- let ty = llvm::LLVMFunctionType(void, args.as_ptr(), args.len() as c_uint, False);
- let name = "__rust_alloc_error_handler";
- let llfn = llvm::LLVMRustGetOrInsertFunction(llmod, name.as_ptr().cast(), name.len(), ty);
- // -> ! DIFlagNoReturn
- let no_return = llvm::AttributeKind::NoReturn.create_attr(llcx);
- attributes::apply_to_llfn(llfn, llvm::AttributePlace::Function, &[no_return]);
-
- if tcx.sess.target.default_hidden_visibility {
- llvm::LLVMRustSetVisibility(llfn, llvm::Visibility::Hidden);
- }
- if tcx.sess.must_emit_unwind_tables() {
- let uwtable = attributes::uwtable_attr(llcx);
- attributes::apply_to_llfn(llfn, llvm::AttributePlace::Function, &[uwtable]);
- }
-
- let callee = alloc_error_handler_name(alloc_error_handler_kind);
- let callee = llvm::LLVMRustGetOrInsertFunction(llmod, callee.as_ptr().cast(), callee.len(), ty);
- // -> ! DIFlagNoReturn
- attributes::apply_to_llfn(callee, llvm::AttributePlace::Function, &[no_return]);
- llvm::LLVMRustSetVisibility(callee, llvm::Visibility::Hidden);
-
- let llbb = llvm::LLVMAppendBasicBlockInContext(llcx, llfn, "entry\0".as_ptr().cast());
-
- let llbuilder = llvm::LLVMCreateBuilderInContext(llcx);
- llvm::LLVMPositionBuilderAtEnd(llbuilder, llbb);
- let args = args
- .iter()
- .enumerate()
- .map(|(i, _)| llvm::LLVMGetParam(llfn, i as c_uint))
- .collect::<Vec<_>>();
- let ret = llvm::LLVMRustBuildCall(
- llbuilder,
- ty,
- callee,
- args.as_ptr(),
- args.len() as c_uint,
- [].as_ptr(),
- 0 as c_uint,
+ create_wrapper_function(
+ tcx,
+ llcx,
+ llmod,
+ "__rust_alloc_error_handler",
+ &alloc_error_handler_name(alloc_error_handler_kind),
+ &[usize, usize], // size, align
+ None,
+ true,
);
- llvm::LLVMSetTailCall(ret, True);
- llvm::LLVMBuildRetVoid(llbuilder);
- llvm::LLVMDisposeBuilder(llbuilder);
// __rust_alloc_error_handler_should_panic
let name = OomStrategy::SYMBOL;
@@ -175,3 +97,79 @@ pub(crate) unsafe fn codegen(
dbg_cx.finalize(tcx.sess);
}
}
+
+fn create_wrapper_function(
+ tcx: TyCtxt<'_>,
+ llcx: &Context,
+ llmod: &Module,
+ from_name: &str,
+ to_name: &str,
+ args: &[&Type],
+ output: Option<&Type>,
+ no_return: bool,
+) {
+ unsafe {
+ let ty = llvm::LLVMFunctionType(
+ output.unwrap_or_else(|| llvm::LLVMVoidTypeInContext(llcx)),
+ args.as_ptr(),
+ args.len() as c_uint,
+ False,
+ );
+ let llfn = llvm::LLVMRustGetOrInsertFunction(
+ llmod,
+ from_name.as_ptr().cast(),
+ from_name.len(),
+ ty,
+ );
+ let no_return = if no_return {
+ // -> ! DIFlagNoReturn
+ let no_return = llvm::AttributeKind::NoReturn.create_attr(llcx);
+ attributes::apply_to_llfn(llfn, llvm::AttributePlace::Function, &[no_return]);
+ Some(no_return)
+ } else {
+ None
+ };
+
+ if tcx.sess.target.default_hidden_visibility {
+ llvm::LLVMRustSetVisibility(llfn, llvm::Visibility::Hidden);
+ }
+ if tcx.sess.must_emit_unwind_tables() {
+ let uwtable = attributes::uwtable_attr(llcx);
+ attributes::apply_to_llfn(llfn, llvm::AttributePlace::Function, &[uwtable]);
+ }
+
+ let callee =
+ llvm::LLVMRustGetOrInsertFunction(llmod, to_name.as_ptr().cast(), to_name.len(), ty);
+ if let Some(no_return) = no_return {
+ // -> ! DIFlagNoReturn
+ attributes::apply_to_llfn(callee, llvm::AttributePlace::Function, &[no_return]);
+ }
+ llvm::LLVMRustSetVisibility(callee, llvm::Visibility::Hidden);
+
+ let llbb = llvm::LLVMAppendBasicBlockInContext(llcx, llfn, "entry\0".as_ptr().cast());
+
+ let llbuilder = llvm::LLVMCreateBuilderInContext(llcx);
+ llvm::LLVMPositionBuilderAtEnd(llbuilder, llbb);
+ let args = args
+ .iter()
+ .enumerate()
+ .map(|(i, _)| llvm::LLVMGetParam(llfn, i as c_uint))
+ .collect::<Vec<_>>();
+ let ret = llvm::LLVMRustBuildCall(
+ llbuilder,
+ ty,
+ callee,
+ args.as_ptr(),
+ args.len() as c_uint,
+ [].as_ptr(),
+ 0 as c_uint,
+ );
+ llvm::LLVMSetTailCall(ret, True);
+ if output.is_some() {
+ llvm::LLVMBuildRet(llbuilder, ret);
+ } else {
+ llvm::LLVMBuildRetVoid(llbuilder);
+ }
+ llvm::LLVMDisposeBuilder(llbuilder);
+ }
+}
diff --git a/compiler/rustc_codegen_llvm/src/asm.rs b/compiler/rustc_codegen_llvm/src/asm.rs
index 2a6ad1be7..1323261ae 100644
--- a/compiler/rustc_codegen_llvm/src/asm.rs
+++ b/compiler/rustc_codegen_llvm/src/asm.rs
@@ -44,9 +44,10 @@ impl<'ll, 'tcx> AsmBuilderMethods<'tcx> for Builder<'_, 'll, 'tcx> {
let is_target_supported = |reg_class: InlineAsmRegClass| {
for &(_, feature) in reg_class.supported_types(asm_arch) {
if let Some(feature) = feature {
- let codegen_fn_attrs = self.tcx.codegen_fn_attrs(instance.def_id());
- if self.tcx.sess.target_features.contains(&feature)
- || codegen_fn_attrs.target_features.contains(&feature)
+ if self
+ .tcx
+ .asm_target_features(instance.def_id())
+ .contains(&feature)
{
return true;
}
@@ -261,6 +262,7 @@ impl<'ll, 'tcx> AsmBuilderMethods<'tcx> for Builder<'_, 'll, 'tcx> {
InlineAsmArch::M68k => {
constraints.push("~{ccr}".to_string());
}
+ InlineAsmArch::CSKY => {}
}
}
if !options.contains(InlineAsmOptions::NOMEM) {
@@ -693,6 +695,8 @@ fn reg_to_llvm(reg: InlineAsmRegOrRegClass, layout: Option<&TyAndLayout<'_>>) ->
InlineAsmRegClass::M68k(M68kInlineAsmRegClass::reg) => "r",
InlineAsmRegClass::M68k(M68kInlineAsmRegClass::reg_addr) => "a",
InlineAsmRegClass::M68k(M68kInlineAsmRegClass::reg_data) => "d",
+ InlineAsmRegClass::CSKY(CSKYInlineAsmRegClass::reg) => "r",
+ InlineAsmRegClass::CSKY(CSKYInlineAsmRegClass::freg) => "f",
InlineAsmRegClass::SpirV(SpirVInlineAsmRegClass::reg) => {
bug!("LLVM backend does not support SPIR-V")
}
@@ -792,6 +796,7 @@ fn modifier_to_llvm(
bug!("LLVM backend does not support SPIR-V")
}
InlineAsmRegClass::M68k(_) => None,
+ InlineAsmRegClass::CSKY(_) => None,
InlineAsmRegClass::Err => unreachable!(),
}
}
@@ -868,6 +873,8 @@ fn dummy_output_type<'ll>(cx: &CodegenCx<'ll, '_>, reg: InlineAsmRegClass) -> &'
InlineAsmRegClass::M68k(M68kInlineAsmRegClass::reg) => cx.type_i32(),
InlineAsmRegClass::M68k(M68kInlineAsmRegClass::reg_addr) => cx.type_i32(),
InlineAsmRegClass::M68k(M68kInlineAsmRegClass::reg_data) => cx.type_i32(),
+ InlineAsmRegClass::CSKY(CSKYInlineAsmRegClass::reg) => cx.type_i32(),
+ InlineAsmRegClass::CSKY(CSKYInlineAsmRegClass::freg) => cx.type_f32(),
InlineAsmRegClass::SpirV(SpirVInlineAsmRegClass::reg) => {
bug!("LLVM backend does not support SPIR-V")
}
diff --git a/compiler/rustc_codegen_llvm/src/attributes.rs b/compiler/rustc_codegen_llvm/src/attributes.rs
index 39275272e..b6c01545f 100644
--- a/compiler/rustc_codegen_llvm/src/attributes.rs
+++ b/compiler/rustc_codegen_llvm/src/attributes.rs
@@ -128,7 +128,10 @@ fn instrument_function_attr<'ll>(cx: &CodegenCx<'ll, '_>) -> SmallVec<[&'ll Attr
// The function name varies on platforms.
// See test/CodeGen/mcount.c in clang.
- let mcount_name = cx.sess().target.mcount.as_ref();
+ let mcount_name = match &cx.sess().target.llvm_mcount_intrinsic {
+ Some(llvm_mcount_intrinsic) => llvm_mcount_intrinsic.as_ref(),
+ None => cx.sess().target.mcount.as_ref(),
+ };
attrs.push(llvm::CreateAttrStringValue(
cx.llcx,
@@ -335,6 +338,10 @@ pub fn from_fn_attrs<'ll, 'tcx>(
to_add.extend(probestack_attr(cx));
to_add.extend(stackprotector_attr(cx));
+ if codegen_fn_attrs.flags.contains(CodegenFnAttrFlags::NO_BUILTINS) {
+ to_add.push(llvm::CreateAttrString(cx.llcx, "no-builtins"));
+ }
+
if codegen_fn_attrs.flags.contains(CodegenFnAttrFlags::COLD) {
to_add.push(AttributeKind::Cold.create_attr(cx.llcx));
}
@@ -359,50 +366,44 @@ pub fn from_fn_attrs<'ll, 'tcx>(
if codegen_fn_attrs.flags.contains(CodegenFnAttrFlags::ALLOCATOR)
|| codegen_fn_attrs.flags.contains(CodegenFnAttrFlags::ALLOCATOR_ZEROED)
{
- if llvm_util::get_version() >= (15, 0, 0) {
- to_add.push(create_alloc_family_attr(cx.llcx));
- // apply to argument place instead of function
- let alloc_align = AttributeKind::AllocAlign.create_attr(cx.llcx);
- attributes::apply_to_llfn(llfn, AttributePlace::Argument(1), &[alloc_align]);
- to_add.push(llvm::CreateAllocSizeAttr(cx.llcx, 0));
- let mut flags = AllocKindFlags::Alloc | AllocKindFlags::Aligned;
- if codegen_fn_attrs.flags.contains(CodegenFnAttrFlags::ALLOCATOR) {
- flags |= AllocKindFlags::Uninitialized;
- } else {
- flags |= AllocKindFlags::Zeroed;
- }
- to_add.push(llvm::CreateAllocKindAttr(cx.llcx, flags));
+ to_add.push(create_alloc_family_attr(cx.llcx));
+ // apply to argument place instead of function
+ let alloc_align = AttributeKind::AllocAlign.create_attr(cx.llcx);
+ attributes::apply_to_llfn(llfn, AttributePlace::Argument(1), &[alloc_align]);
+ to_add.push(llvm::CreateAllocSizeAttr(cx.llcx, 0));
+ let mut flags = AllocKindFlags::Alloc | AllocKindFlags::Aligned;
+ if codegen_fn_attrs.flags.contains(CodegenFnAttrFlags::ALLOCATOR) {
+ flags |= AllocKindFlags::Uninitialized;
+ } else {
+ flags |= AllocKindFlags::Zeroed;
}
+ to_add.push(llvm::CreateAllocKindAttr(cx.llcx, flags));
// apply to return place instead of function (unlike all other attributes applied in this function)
let no_alias = AttributeKind::NoAlias.create_attr(cx.llcx);
attributes::apply_to_llfn(llfn, AttributePlace::ReturnValue, &[no_alias]);
}
if codegen_fn_attrs.flags.contains(CodegenFnAttrFlags::REALLOCATOR) {
- if llvm_util::get_version() >= (15, 0, 0) {
- to_add.push(create_alloc_family_attr(cx.llcx));
- to_add.push(llvm::CreateAllocKindAttr(
- cx.llcx,
- AllocKindFlags::Realloc | AllocKindFlags::Aligned,
- ));
- // applies to argument place instead of function place
- let allocated_pointer = AttributeKind::AllocatedPointer.create_attr(cx.llcx);
- attributes::apply_to_llfn(llfn, AttributePlace::Argument(0), &[allocated_pointer]);
- // apply to argument place instead of function
- let alloc_align = AttributeKind::AllocAlign.create_attr(cx.llcx);
- attributes::apply_to_llfn(llfn, AttributePlace::Argument(2), &[alloc_align]);
- to_add.push(llvm::CreateAllocSizeAttr(cx.llcx, 3));
- }
+ to_add.push(create_alloc_family_attr(cx.llcx));
+ to_add.push(llvm::CreateAllocKindAttr(
+ cx.llcx,
+ AllocKindFlags::Realloc | AllocKindFlags::Aligned,
+ ));
+ // applies to argument place instead of function place
+ let allocated_pointer = AttributeKind::AllocatedPointer.create_attr(cx.llcx);
+ attributes::apply_to_llfn(llfn, AttributePlace::Argument(0), &[allocated_pointer]);
+ // apply to argument place instead of function
+ let alloc_align = AttributeKind::AllocAlign.create_attr(cx.llcx);
+ attributes::apply_to_llfn(llfn, AttributePlace::Argument(2), &[alloc_align]);
+ to_add.push(llvm::CreateAllocSizeAttr(cx.llcx, 3));
let no_alias = AttributeKind::NoAlias.create_attr(cx.llcx);
attributes::apply_to_llfn(llfn, AttributePlace::ReturnValue, &[no_alias]);
}
if codegen_fn_attrs.flags.contains(CodegenFnAttrFlags::DEALLOCATOR) {
- if llvm_util::get_version() >= (15, 0, 0) {
- to_add.push(create_alloc_family_attr(cx.llcx));
- to_add.push(llvm::CreateAllocKindAttr(cx.llcx, AllocKindFlags::Free));
- // applies to argument place instead of function place
- let allocated_pointer = AttributeKind::AllocatedPointer.create_attr(cx.llcx);
- attributes::apply_to_llfn(llfn, AttributePlace::Argument(0), &[allocated_pointer]);
- }
+ to_add.push(create_alloc_family_attr(cx.llcx));
+ to_add.push(llvm::CreateAllocKindAttr(cx.llcx, AllocKindFlags::Free));
+ // applies to argument place instead of function place
+ let allocated_pointer = AttributeKind::AllocatedPointer.create_attr(cx.llcx);
+ attributes::apply_to_llfn(llfn, AttributePlace::Argument(0), &[allocated_pointer]);
}
if codegen_fn_attrs.flags.contains(CodegenFnAttrFlags::CMSE_NONSECURE_ENTRY) {
to_add.push(llvm::CreateAttrString(cx.llcx, "cmse_nonsecure_entry"));
@@ -446,7 +447,7 @@ pub fn from_fn_attrs<'ll, 'tcx>(
let mut function_features = function_features
.iter()
.flat_map(|feat| {
- llvm_util::to_llvm_features(cx.tcx.sess, feat).into_iter().map(|f| format!("+{}", f))
+ llvm_util::to_llvm_features(cx.tcx.sess, feat).into_iter().map(|f| format!("+{f}"))
})
.chain(codegen_fn_attrs.instruction_set.iter().map(|x| match x {
InstructionSetAttr::ArmA32 => "-thumb-mode".to_string(),
diff --git a/compiler/rustc_codegen_llvm/src/back/archive.rs b/compiler/rustc_codegen_llvm/src/back/archive.rs
index a6416e954..a82d2c577 100644
--- a/compiler/rustc_codegen_llvm/src/back/archive.rs
+++ b/compiler/rustc_codegen_llvm/src/back/archive.rs
@@ -56,7 +56,7 @@ fn llvm_machine_type(cpu: &str) -> LLVMMachineType {
"x86" => LLVMMachineType::I386,
"aarch64" => LLVMMachineType::ARM64,
"arm" => LLVMMachineType::ARM,
- _ => panic!("unsupported cpu type {}", cpu),
+ _ => panic!("unsupported cpu type {cpu}"),
}
}
@@ -128,7 +128,7 @@ impl ArchiveBuilderBuilder for LlvmArchiveBuilderBuilder {
let name_suffix = if is_direct_dependency { "_imports" } else { "_imports_indirect" };
let output_path = {
let mut output_path: PathBuf = tmpdir.to_path_buf();
- output_path.push(format!("{}{}", lib_name, name_suffix));
+ output_path.push(format!("{lib_name}{name_suffix}"));
output_path.with_extension("lib")
};
@@ -156,7 +156,7 @@ impl ArchiveBuilderBuilder for LlvmArchiveBuilderBuilder {
// functions. Therefore, use binutils to create the import library instead,
// by writing a .DEF file to the temp dir and calling binutils's dlltool.
let def_file_path =
- tmpdir.join(format!("{}{}", lib_name, name_suffix)).with_extension("def");
+ tmpdir.join(format!("{lib_name}{name_suffix}")).with_extension("def");
let def_file_content = format!(
"EXPORTS\n{}",
@@ -164,7 +164,7 @@ impl ArchiveBuilderBuilder for LlvmArchiveBuilderBuilder {
.into_iter()
.map(|(name, ordinal)| {
match ordinal {
- Some(n) => format!("{} @{} NONAME", name, n),
+ Some(n) => format!("{name} @{n} NONAME"),
None => name,
}
})
@@ -198,25 +198,24 @@ impl ArchiveBuilderBuilder for LlvmArchiveBuilderBuilder {
"arm" => ("arm", "--32"),
_ => panic!("unsupported arch {}", sess.target.arch),
};
- let result = std::process::Command::new(&dlltool)
- .args([
- "-d",
- def_file_path.to_str().unwrap(),
- "-D",
- lib_name,
- "-l",
- output_path.to_str().unwrap(),
- "-m",
- dlltool_target_arch,
- "-f",
- dlltool_target_bitness,
- "--no-leading-underscore",
- "--temp-prefix",
- temp_prefix.to_str().unwrap(),
- ])
- .output();
-
- match result {
+ let mut dlltool_cmd = std::process::Command::new(&dlltool);
+ dlltool_cmd.args([
+ "-d",
+ def_file_path.to_str().unwrap(),
+ "-D",
+ lib_name,
+ "-l",
+ output_path.to_str().unwrap(),
+ "-m",
+ dlltool_target_arch,
+ "-f",
+ dlltool_target_bitness,
+ "--no-leading-underscore",
+ "--temp-prefix",
+ temp_prefix.to_str().unwrap(),
+ ]);
+
+ match dlltool_cmd.output() {
Err(e) => {
sess.emit_fatal(ErrorCallingDllTool {
dlltool_path: dlltool.to_string_lossy(),
@@ -226,6 +225,12 @@ impl ArchiveBuilderBuilder for LlvmArchiveBuilderBuilder {
// dlltool returns '0' on failure, so check for error output instead.
Ok(output) if !output.stderr.is_empty() => {
sess.emit_fatal(DlltoolFailImportLibrary {
+ dlltool_path: dlltool.to_string_lossy(),
+ dlltool_args: dlltool_cmd
+ .get_args()
+ .map(|arg| arg.to_string_lossy())
+ .collect::<Vec<_>>()
+ .join(" "),
stdout: String::from_utf8_lossy(&output.stdout),
stderr: String::from_utf8_lossy(&output.stderr),
})
@@ -430,7 +435,7 @@ impl<'a> LlvmArchiveBuilder<'a> {
}
fn string_to_io_error(s: String) -> io::Error {
- io::Error::new(io::ErrorKind::Other, format!("bad archive: {}", s))
+ io::Error::new(io::ErrorKind::Other, format!("bad archive: {s}"))
}
fn find_binutils_dlltool(sess: &Session) -> OsString {
diff --git a/compiler/rustc_codegen_llvm/src/back/lto.rs b/compiler/rustc_codegen_llvm/src/back/lto.rs
index d7dd98d79..b2d28cef8 100644
--- a/compiler/rustc_codegen_llvm/src/back/lto.rs
+++ b/compiler/rustc_codegen_llvm/src/back/lto.rs
@@ -7,7 +7,7 @@ use crate::{LlvmCodegenBackend, ModuleLlvm};
use object::read::archive::ArchiveFile;
use rustc_codegen_ssa::back::lto::{LtoModuleCodegen, SerializedModule, ThinModule, ThinShared};
use rustc_codegen_ssa::back::symbol_export;
-use rustc_codegen_ssa::back::write::{CodegenContext, FatLTOInput, TargetMachineFactoryConfig};
+use rustc_codegen_ssa::back::write::{CodegenContext, FatLtoInput, TargetMachineFactoryConfig};
use rustc_codegen_ssa::traits::*;
use rustc_codegen_ssa::{looks_like_rust_object_file, ModuleCodegen, ModuleKind};
use rustc_data_structures::fx::FxHashMap;
@@ -166,7 +166,7 @@ fn get_bitcode_slice_from_object_data(obj: &[u8]) -> Result<&[u8], LtoBitcodeFro
/// for further optimization.
pub(crate) fn run_fat(
cgcx: &CodegenContext<LlvmCodegenBackend>,
- modules: Vec<FatLTOInput<LlvmCodegenBackend>>,
+ modules: Vec<FatLtoInput<LlvmCodegenBackend>>,
cached_modules: Vec<(SerializedModule<ModuleBuffer>, WorkProduct)>,
) -> Result<LtoModuleCodegen<LlvmCodegenBackend>, FatalError> {
let diag_handler = cgcx.create_diag_handler();
@@ -220,7 +220,7 @@ pub(crate) fn prepare_thin(module: ModuleCodegen<ModuleLlvm>) -> (String, ThinBu
fn fat_lto(
cgcx: &CodegenContext<LlvmCodegenBackend>,
diag_handler: &Handler,
- modules: Vec<FatLTOInput<LlvmCodegenBackend>>,
+ modules: Vec<FatLtoInput<LlvmCodegenBackend>>,
cached_modules: Vec<(SerializedModule<ModuleBuffer>, WorkProduct)>,
mut serialized_modules: Vec<(SerializedModule<ModuleBuffer>, CString)>,
symbols_below_threshold: &[*const libc::c_char],
@@ -245,8 +245,8 @@ fn fat_lto(
}));
for module in modules {
match module {
- FatLTOInput::InMemory(m) => in_memory.push(m),
- FatLTOInput::Serialized { name, buffer } => {
+ FatLtoInput::InMemory(m) => in_memory.push(m),
+ FatLtoInput::Serialized { name, buffer } => {
info!("pushing serialized module {:?}", name);
let buffer = SerializedModule::Local(buffer);
serialized_modules.push((buffer, CString::new(name).unwrap()));
@@ -332,7 +332,7 @@ fn fat_lto(
let _timer = cgcx
.prof
.generic_activity_with_arg_recorder("LLVM_fat_lto_link_module", |recorder| {
- recorder.record_arg(format!("{:?}", name))
+ recorder.record_arg(format!("{name:?}"))
});
info!("linking {:?}", name);
let data = bc_decoded.data();
@@ -787,7 +787,7 @@ impl ThinLTOKeysMap {
let file = File::create(path)?;
let mut writer = io::BufWriter::new(file);
for (module, key) in &self.keys {
- writeln!(writer, "{} {}", module, key)?;
+ writeln!(writer, "{module} {key}")?;
}
Ok(())
}
@@ -801,7 +801,7 @@ impl ThinLTOKeysMap {
let mut split = line.split(' ');
let module = split.next().unwrap();
let key = split.next().unwrap();
- assert_eq!(split.next(), None, "Expected two space-separated values, found {:?}", line);
+ assert_eq!(split.next(), None, "Expected two space-separated values, found {line:?}");
keys.insert(module.to_string(), key.to_string());
}
Ok(Self { keys })
diff --git a/compiler/rustc_codegen_llvm/src/back/write.rs b/compiler/rustc_codegen_llvm/src/back/write.rs
index 0f5e97544..47cc5bd52 100644
--- a/compiler/rustc_codegen_llvm/src/back/write.rs
+++ b/compiler/rustc_codegen_llvm/src/back/write.rs
@@ -4,7 +4,6 @@ use crate::back::profiling::{
};
use crate::base;
use crate::common;
-use crate::consts;
use crate::errors::{
CopyBitcode, FromLlvmDiag, FromLlvmOptimizationDiag, LlvmError, WithLlvmError, WriteBytecode,
};
@@ -259,7 +258,7 @@ pub(crate) fn save_temp_bitcode(
return;
}
unsafe {
- let ext = format!("{}.bc", name);
+ let ext = format!("{name}.bc");
let cgu = Some(&module.name[..]);
let path = cgcx.output_filenames.temp_path_ext(&ext, cgu);
let cstr = path_to_c_string(&path);
@@ -321,6 +320,7 @@ impl<'a> DiagnosticHandlers<'a> {
})
.and_then(|dir| dir.to_str().and_then(|p| CString::new(p).ok()));
+ let pgo_available = cgcx.opts.cg.profile_use.is_some();
let data = Box::into_raw(Box::new((cgcx, handler)));
unsafe {
let old_handler = llvm::LLVMRustContextGetDiagnosticHandler(llcx);
@@ -334,6 +334,7 @@ impl<'a> DiagnosticHandlers<'a> {
// The `as_ref()` is important here, otherwise the `CString` will be dropped
// too soon!
remark_file.as_ref().map(|dir| dir.as_ptr()).unwrap_or(std::ptr::null()),
+ pgo_available,
);
DiagnosticHandlers { data, llcx, old_handler }
}
@@ -382,29 +383,22 @@ unsafe extern "C" fn diagnostic_handler(info: &DiagnosticInfo, user: *mut c_void
}
llvm::diagnostic::Optimization(opt) => {
- let enabled = match cgcx.remark {
- Passes::All => true,
- Passes::Some(ref v) => v.iter().any(|s| *s == opt.pass_name),
- };
-
- if enabled {
- diag_handler.emit_note(FromLlvmOptimizationDiag {
- filename: &opt.filename,
- line: opt.line,
- column: opt.column,
- pass_name: &opt.pass_name,
- kind: match opt.kind {
- OptimizationDiagnosticKind::OptimizationRemark => "success",
- OptimizationDiagnosticKind::OptimizationMissed
- | OptimizationDiagnosticKind::OptimizationFailure => "missed",
- OptimizationDiagnosticKind::OptimizationAnalysis
- | OptimizationDiagnosticKind::OptimizationAnalysisFPCommute
- | OptimizationDiagnosticKind::OptimizationAnalysisAliasing => "analysis",
- OptimizationDiagnosticKind::OptimizationRemarkOther => "other",
- },
- message: &opt.message,
- });
- }
+ diag_handler.emit_note(FromLlvmOptimizationDiag {
+ filename: &opt.filename,
+ line: opt.line,
+ column: opt.column,
+ pass_name: &opt.pass_name,
+ kind: match opt.kind {
+ OptimizationDiagnosticKind::OptimizationRemark => "success",
+ OptimizationDiagnosticKind::OptimizationMissed
+ | OptimizationDiagnosticKind::OptimizationFailure => "missed",
+ OptimizationDiagnosticKind::OptimizationAnalysis
+ | OptimizationDiagnosticKind::OptimizationAnalysisFPCommute
+ | OptimizationDiagnosticKind::OptimizationAnalysisAliasing => "analysis",
+ OptimizationDiagnosticKind::OptimizationRemarkOther => "other",
+ },
+ message: &opt.message,
+ });
}
llvm::diagnostic::PGO(diagnostic_ref) | llvm::diagnostic::Linker(diagnostic_ref) => {
let message = llvm::build_string(|s| {
@@ -478,6 +472,8 @@ pub(crate) unsafe fn llvm_optimize(
Some(llvm::SanitizerOptions {
sanitize_address: config.sanitizer.contains(SanitizerSet::ADDRESS),
sanitize_address_recover: config.sanitizer_recover.contains(SanitizerSet::ADDRESS),
+ sanitize_cfi: config.sanitizer.contains(SanitizerSet::CFI),
+ sanitize_kcfi: config.sanitizer.contains(SanitizerSet::KCFI),
sanitize_memory: config.sanitizer.contains(SanitizerSet::MEMORY),
sanitize_memory_recover: config.sanitizer_recover.contains(SanitizerSet::MEMORY),
sanitize_memory_track_origins: config.sanitizer_memory_track_origins as c_int,
@@ -513,6 +509,7 @@ pub(crate) unsafe fn llvm_optimize(
&*module.module_llvm.tm,
to_pass_builder_opt_level(opt_level),
opt_stage,
+ cgcx.opts.cg.linker_plugin_lto.enabled(),
config.no_prepopulate_passes,
config.verify_llvm_ir,
using_thin_buffers,
@@ -713,7 +710,7 @@ pub(crate) unsafe fn codegen(
let Ok(demangled) = rustc_demangle::try_demangle(input) else { return 0 };
- if write!(cursor, "{:#}", demangled).is_err() {
+ if write!(cursor, "{demangled:#}").is_err() {
// Possible only if provided buffer is not big enough
return 0;
}
@@ -834,7 +831,7 @@ pub(crate) unsafe fn codegen(
}
fn create_section_with_flags_asm(section_name: &str, section_flags: &str, data: &[u8]) -> Vec<u8> {
- let mut asm = format!(".section {},\"{}\"\n", section_name, section_flags).into_bytes();
+ let mut asm = format!(".section {section_name},\"{section_flags}\"\n").into_bytes();
asm.extend_from_slice(b".ascii \"");
asm.reserve(data.len());
for &byte in data {
@@ -992,7 +989,7 @@ fn create_msvc_imps(
let prefix = if cgcx.target_arch == "x86" { "\x01__imp__" } else { "\x01__imp_" };
unsafe {
- let i8p_ty = Type::i8p_llcx(llcx);
+ let ptr_ty = Type::ptr_llcx(llcx);
let globals = base::iter_globals(llmod)
.filter(|&val| {
llvm::LLVMRustGetLinkage(val) == llvm::Linkage::ExternalLinkage
@@ -1012,8 +1009,8 @@ fn create_msvc_imps(
.collect::<Vec<_>>();
for (imp_name, val) in globals {
- let imp = llvm::LLVMAddGlobal(llmod, i8p_ty, imp_name.as_ptr().cast());
- llvm::LLVMSetInitializer(imp, consts::ptrcast(val, i8p_ty));
+ let imp = llvm::LLVMAddGlobal(llmod, ptr_ty, imp_name.as_ptr().cast());
+ llvm::LLVMSetInitializer(imp, val);
llvm::LLVMRustSetLinkage(imp, llvm::Linkage::ExternalLinkage);
}
}
diff --git a/compiler/rustc_codegen_llvm/src/base.rs b/compiler/rustc_codegen_llvm/src/base.rs
index 5b2bbdb4b..b659fd02e 100644
--- a/compiler/rustc_codegen_llvm/src/base.rs
+++ b/compiler/rustc_codegen_llvm/src/base.rs
@@ -86,8 +86,8 @@ pub fn compile_codegen_unit(tcx: TyCtxt<'_>, cgu_name: Symbol) -> (ModuleCodegen
{
let cx = CodegenCx::new(tcx, cgu, &llvm_module);
let mono_items = cx.codegen_unit.items_in_deterministic_order(cx.tcx);
- for &(mono_item, (linkage, visibility)) in &mono_items {
- mono_item.predefine::<Builder<'_, '_, '_>>(&cx, linkage, visibility);
+ for &(mono_item, data) in &mono_items {
+ mono_item.predefine::<Builder<'_, '_, '_>>(&cx, data.linkage, data.visibility);
}
// ... and now that we have everything pre-defined, fill out those definitions.
@@ -123,8 +123,7 @@ pub fn compile_codegen_unit(tcx: TyCtxt<'_>, cgu_name: Symbol) -> (ModuleCodegen
// happen after the llvm.used variables are created.
for &(old_g, new_g) in cx.statics_to_rauw().borrow().iter() {
unsafe {
- let bitcast = llvm::LLVMConstPointerCast(new_g, cx.val_ty(old_g));
- llvm::LLVMReplaceAllUsesWith(old_g, bitcast);
+ llvm::LLVMReplaceAllUsesWith(old_g, new_g);
llvm::LLVMDeleteGlobal(old_g);
}
}
diff --git a/compiler/rustc_codegen_llvm/src/builder.rs b/compiler/rustc_codegen_llvm/src/builder.rs
index d55992bf0..ac6d8f841 100644
--- a/compiler/rustc_codegen_llvm/src/builder.rs
+++ b/compiler/rustc_codegen_llvm/src/builder.rs
@@ -652,7 +652,7 @@ impl<'a, 'll, 'tcx> BuilderMethods<'a, 'tcx> for Builder<'a, 'll, 'tcx> {
flags: MemFlags,
) -> &'ll Value {
debug!("Store {:?} -> {:?} ({:?})", val, ptr, flags);
- let ptr = self.check_store(val, ptr);
+ assert_eq!(self.cx.type_kind(self.cx.val_ty(ptr)), TypeKind::Pointer);
unsafe {
let store = llvm::LLVMBuildStore(self.llbuilder, val, ptr);
let align =
@@ -682,7 +682,7 @@ impl<'a, 'll, 'tcx> BuilderMethods<'a, 'tcx> for Builder<'a, 'll, 'tcx> {
size: Size,
) {
debug!("Store {:?} -> {:?}", val, ptr);
- let ptr = self.check_store(val, ptr);
+ assert_eq!(self.cx.type_kind(self.cx.val_ty(ptr)), TypeKind::Pointer);
unsafe {
let store = llvm::LLVMRustBuildAtomicStore(
self.llbuilder,
@@ -873,8 +873,6 @@ impl<'a, 'll, 'tcx> BuilderMethods<'a, 'tcx> for Builder<'a, 'll, 'tcx> {
assert!(!flags.contains(MemFlags::NONTEMPORAL), "non-temporal memcpy not supported");
let size = self.intcast(size, self.type_isize(), false);
let is_volatile = flags.contains(MemFlags::VOLATILE);
- let dst = self.pointercast(dst, self.type_i8p());
- let src = self.pointercast(src, self.type_i8p());
unsafe {
llvm::LLVMRustBuildMemCpy(
self.llbuilder,
@@ -900,8 +898,6 @@ impl<'a, 'll, 'tcx> BuilderMethods<'a, 'tcx> for Builder<'a, 'll, 'tcx> {
assert!(!flags.contains(MemFlags::NONTEMPORAL), "non-temporal memmove not supported");
let size = self.intcast(size, self.type_isize(), false);
let is_volatile = flags.contains(MemFlags::VOLATILE);
- let dst = self.pointercast(dst, self.type_i8p());
- let src = self.pointercast(src, self.type_i8p());
unsafe {
llvm::LLVMRustBuildMemMove(
self.llbuilder,
@@ -924,7 +920,6 @@ impl<'a, 'll, 'tcx> BuilderMethods<'a, 'tcx> for Builder<'a, 'll, 'tcx> {
flags: MemFlags,
) {
let is_volatile = flags.contains(MemFlags::VOLATILE);
- let ptr = self.pointercast(ptr, self.type_i8p());
unsafe {
llvm::LLVMRustBuildMemSet(
self.llbuilder,
@@ -981,7 +976,7 @@ impl<'a, 'll, 'tcx> BuilderMethods<'a, 'tcx> for Builder<'a, 'll, 'tcx> {
}
fn cleanup_landing_pad(&mut self, pers_fn: &'ll Value) -> (&'ll Value, &'ll Value) {
- let ty = self.type_struct(&[self.type_i8p(), self.type_i32()], false);
+ let ty = self.type_struct(&[self.type_ptr(), self.type_i32()], false);
let landing_pad = self.landing_pad(ty, pers_fn, 0);
unsafe {
llvm::LLVMSetCleanup(landing_pad, llvm::True);
@@ -990,14 +985,14 @@ impl<'a, 'll, 'tcx> BuilderMethods<'a, 'tcx> for Builder<'a, 'll, 'tcx> {
}
fn filter_landing_pad(&mut self, pers_fn: &'ll Value) -> (&'ll Value, &'ll Value) {
- let ty = self.type_struct(&[self.type_i8p(), self.type_i32()], false);
+ let ty = self.type_struct(&[self.type_ptr(), self.type_i32()], false);
let landing_pad = self.landing_pad(ty, pers_fn, 1);
- self.add_clause(landing_pad, self.const_array(self.type_i8p(), &[]));
+ self.add_clause(landing_pad, self.const_array(self.type_ptr(), &[]));
(self.extract_value(landing_pad, 0), self.extract_value(landing_pad, 1))
}
fn resume(&mut self, exn0: &'ll Value, exn1: &'ll Value) {
- let ty = self.type_struct(&[self.type_i8p(), self.type_i32()], false);
+ let ty = self.type_struct(&[self.type_ptr(), self.type_i32()], false);
let mut exn = self.const_poison(ty);
exn = self.insert_value(exn, exn0, 0);
exn = self.insert_value(exn, exn1, 1);
@@ -1161,7 +1156,7 @@ impl<'a, 'll, 'tcx> BuilderMethods<'a, 'tcx> for Builder<'a, 'll, 'tcx> {
let llfn = unsafe { llvm::LLVMRustGetInstrProfIncrementIntrinsic(self.cx().llmod) };
let llty = self.cx.type_func(
- &[self.cx.type_i8p(), self.cx.type_i64(), self.cx.type_i32(), self.cx.type_i32()],
+ &[self.cx.type_ptr(), self.cx.type_i64(), self.cx.type_i32(), self.cx.type_i32()],
self.cx.type_void(),
);
let args = &[fn_name, hash, num_counters, index];
@@ -1387,25 +1382,6 @@ impl<'a, 'll, 'tcx> Builder<'a, 'll, 'tcx> {
ret.expect("LLVM does not have support for catchret")
}
- fn check_store(&mut self, val: &'ll Value, ptr: &'ll Value) -> &'ll Value {
- let dest_ptr_ty = self.cx.val_ty(ptr);
- let stored_ty = self.cx.val_ty(val);
- let stored_ptr_ty = self.cx.type_ptr_to(stored_ty);
-
- assert_eq!(self.cx.type_kind(dest_ptr_ty), TypeKind::Pointer);
-
- if dest_ptr_ty == stored_ptr_ty {
- ptr
- } else {
- debug!(
- "type mismatch in store. \
- Expected {:?}, got {:?}; inserting bitcast",
- dest_ptr_ty, stored_ptr_ty
- );
- self.bitcast(ptr, stored_ptr_ty)
- }
- }
-
fn check_call<'b>(
&mut self,
typ: &str,
@@ -1415,9 +1391,7 @@ impl<'a, 'll, 'tcx> Builder<'a, 'll, 'tcx> {
) -> Cow<'b, [&'ll Value]> {
assert!(
self.cx.type_kind(fn_ty) == TypeKind::Function,
- "builder::{} not passed a function, but {:?}",
- typ,
- fn_ty
+ "builder::{typ} not passed a function, but {fn_ty:?}"
);
let param_tys = self.cx.func_params_types(fn_ty);
@@ -1468,7 +1442,6 @@ impl<'a, 'll, 'tcx> Builder<'a, 'll, 'tcx> {
return;
}
- let ptr = self.pointercast(ptr, self.cx.type_i8p());
self.call_intrinsic(intrinsic, &[self.cx.const_u64(size), ptr]);
}
@@ -1509,12 +1482,9 @@ impl<'a, 'll, 'tcx> Builder<'a, 'll, 'tcx> {
let instr = if signed { "fptosi" } else { "fptoui" };
let name = if let Some(vector_length) = vector_length {
- format!(
- "llvm.{}.sat.v{}i{}.v{}f{}",
- instr, vector_length, int_width, vector_length, float_width
- )
+ format!("llvm.{instr}.sat.v{vector_length}i{int_width}.v{vector_length}f{float_width}")
} else {
- format!("llvm.{}.sat.i{}.f{}", instr, int_width, float_width)
+ format!("llvm.{instr}.sat.i{int_width}.f{float_width}")
};
let f = self.declare_cfn(&name, llvm::UnnamedAddr::No, self.type_func(&[src_ty], dest_ty));
self.call(self.type_func(&[src_ty], dest_ty), None, None, f, &[val], None)
@@ -1542,9 +1512,9 @@ impl<'a, 'll, 'tcx> Builder<'a, 'll, 'tcx> {
fn_abi: Option<&FnAbi<'tcx, Ty<'tcx>>>,
llfn: &'ll Value,
) {
- let is_indirect_call = unsafe { llvm::LLVMIsAFunction(llfn).is_none() };
- if is_indirect_call && fn_abi.is_some() && self.tcx.sess.is_sanitizer_cfi_enabled() {
- if fn_attrs.is_some() && fn_attrs.unwrap().no_sanitize.contains(SanitizerSet::CFI) {
+ let is_indirect_call = unsafe { llvm::LLVMRustIsNonGVFunctionPointerTy(llfn) };
+ if self.tcx.sess.is_sanitizer_cfi_enabled() && let Some(fn_abi) = fn_abi && is_indirect_call {
+ if let Some(fn_attrs) = fn_attrs && fn_attrs.no_sanitize.contains(SanitizerSet::CFI) {
return;
}
@@ -1556,7 +1526,7 @@ impl<'a, 'll, 'tcx> Builder<'a, 'll, 'tcx> {
options.insert(TypeIdOptions::NORMALIZE_INTEGERS);
}
- let typeid = typeid_for_fnabi(self.tcx, fn_abi.unwrap(), options);
+ let typeid = typeid_for_fnabi(self.tcx, fn_abi, options);
let typeid_metadata = self.cx.typeid_metadata(typeid).unwrap();
// Test whether the function pointer is associated with the type identifier.
@@ -1580,25 +1550,26 @@ impl<'a, 'll, 'tcx> Builder<'a, 'll, 'tcx> {
fn_abi: Option<&FnAbi<'tcx, Ty<'tcx>>>,
llfn: &'ll Value,
) -> Option<llvm::OperandBundleDef<'ll>> {
- let is_indirect_call = unsafe { llvm::LLVMIsAFunction(llfn).is_none() };
- let kcfi_bundle = if is_indirect_call && self.tcx.sess.is_sanitizer_kcfi_enabled() {
- if fn_attrs.is_some() && fn_attrs.unwrap().no_sanitize.contains(SanitizerSet::KCFI) {
- return None;
- }
+ let is_indirect_call = unsafe { llvm::LLVMRustIsNonGVFunctionPointerTy(llfn) };
+ let kcfi_bundle =
+ if self.tcx.sess.is_sanitizer_kcfi_enabled() && let Some(fn_abi) = fn_abi && is_indirect_call {
+ if let Some(fn_attrs) = fn_attrs && fn_attrs.no_sanitize.contains(SanitizerSet::KCFI) {
+ return None;
+ }
- let mut options = TypeIdOptions::empty();
- if self.tcx.sess.is_sanitizer_cfi_generalize_pointers_enabled() {
- options.insert(TypeIdOptions::GENERALIZE_POINTERS);
- }
- if self.tcx.sess.is_sanitizer_cfi_normalize_integers_enabled() {
- options.insert(TypeIdOptions::NORMALIZE_INTEGERS);
- }
+ let mut options = TypeIdOptions::empty();
+ if self.tcx.sess.is_sanitizer_cfi_generalize_pointers_enabled() {
+ options.insert(TypeIdOptions::GENERALIZE_POINTERS);
+ }
+ if self.tcx.sess.is_sanitizer_cfi_normalize_integers_enabled() {
+ options.insert(TypeIdOptions::NORMALIZE_INTEGERS);
+ }
- let kcfi_typeid = kcfi_typeid_for_fnabi(self.tcx, fn_abi.unwrap(), options);
- Some(llvm::OperandBundleDef::new("kcfi", &[self.const_u32(kcfi_typeid)]))
- } else {
- None
- };
+ let kcfi_typeid = kcfi_typeid_for_fnabi(self.tcx, fn_abi, options);
+ Some(llvm::OperandBundleDef::new("kcfi", &[self.const_u32(kcfi_typeid)]))
+ } else {
+ None
+ };
kcfi_bundle
}
}
diff --git a/compiler/rustc_codegen_llvm/src/callee.rs b/compiler/rustc_codegen_llvm/src/callee.rs
index 4b9ca2e7d..36c098218 100644
--- a/compiler/rustc_codegen_llvm/src/callee.rs
+++ b/compiler/rustc_codegen_llvm/src/callee.rs
@@ -4,13 +4,11 @@
//! and methods are represented as just a fn ptr and not a full
//! closure.
-use crate::abi::FnAbiLlvmExt;
use crate::attributes;
use crate::common;
use crate::context::CodegenCx;
use crate::llvm;
use crate::value::Value;
-use rustc_codegen_ssa::traits::*;
use rustc_middle::ty::layout::{FnAbiOf, HasTyCtxt};
use rustc_middle::ty::{self, Instance, TypeVisitableExt};
@@ -27,8 +25,8 @@ pub fn get_fn<'ll, 'tcx>(cx: &CodegenCx<'ll, 'tcx>, instance: Instance<'tcx>) ->
debug!("get_fn(instance={:?})", instance);
- assert!(!instance.substs.has_infer());
- assert!(!instance.substs.has_escaping_bound_vars());
+ assert!(!instance.args.has_infer());
+ assert!(!instance.args.has_escaping_bound_vars());
if let Some(&llfn) = cx.instances.borrow().get(&instance) {
return llfn;
@@ -45,39 +43,7 @@ pub fn get_fn<'ll, 'tcx>(cx: &CodegenCx<'ll, 'tcx>, instance: Instance<'tcx>) ->
let fn_abi = cx.fn_abi_of_instance(instance, ty::List::empty());
let llfn = if let Some(llfn) = cx.get_declared_value(sym) {
- // Create a fn pointer with the new signature.
- let llptrty = fn_abi.ptr_to_llvm_type(cx);
-
- // This is subtle and surprising, but sometimes we have to bitcast
- // the resulting fn pointer. The reason has to do with external
- // functions. If you have two crates that both bind the same C
- // library, they may not use precisely the same types: for
- // example, they will probably each declare their own structs,
- // which are distinct types from LLVM's point of view (nominal
- // types).
- //
- // Now, if those two crates are linked into an application, and
- // they contain inlined code, you can wind up with a situation
- // where both of those functions wind up being loaded into this
- // application simultaneously. In that case, the same function
- // (from LLVM's point of view) requires two types. But of course
- // LLVM won't allow one function to have two types.
- //
- // What we currently do, therefore, is declare the function with
- // one of the two types (whichever happens to come first) and then
- // bitcast as needed when the function is referenced to make sure
- // it has the type we expect.
- //
- // This can occur on either a crate-local or crate-external
- // reference. It also occurs when testing libcore and in some
- // other weird situations. Annoying.
- if cx.val_ty(llfn) != llptrty {
- debug!("get_fn: casting {:?} to {:?}", llfn, llptrty);
- cx.const_ptrcast(llfn, llptrty)
- } else {
- debug!("get_fn: not casting pointer!");
- llfn
- }
+ llfn
} else {
let instance_def_id = instance.def_id();
let llfn = if tcx.sess.target.arch == "x86" &&
@@ -129,7 +95,7 @@ pub fn get_fn<'ll, 'tcx>(cx: &CodegenCx<'ll, 'tcx>, instance: Instance<'tcx>) ->
unsafe {
llvm::LLVMRustSetLinkage(llfn, llvm::Linkage::ExternalLinkage);
- let is_generic = instance.substs.non_erasable_generics().next().is_some();
+ let is_generic = instance.args.non_erasable_generics().next().is_some();
if is_generic {
// This is a monomorphization. Its expected visibility depends
diff --git a/compiler/rustc_codegen_llvm/src/common.rs b/compiler/rustc_codegen_llvm/src/common.rs
index a2db59bd6..0b0816c27 100644
--- a/compiler/rustc_codegen_llvm/src/common.rs
+++ b/compiler/rustc_codegen_llvm/src/common.rs
@@ -1,10 +1,9 @@
//! Code that is useful in various codegen modules.
-use crate::consts::{self, const_alloc_to_llvm};
+use crate::consts::const_alloc_to_llvm;
pub use crate::context::CodegenCx;
use crate::llvm::{self, BasicBlock, Bool, ConstantInt, False, OperandBundleDef, True};
use crate::type_::Type;
-use crate::type_of::LayoutLlvmExt;
use crate::value::Value;
use rustc_ast::Mutability;
@@ -13,7 +12,6 @@ use rustc_data_structures::stable_hasher::{Hash128, HashStable, StableHasher};
use rustc_hir::def_id::DefId;
use rustc_middle::bug;
use rustc_middle::mir::interpret::{ConstAllocation, GlobalAlloc, Scalar};
-use rustc_middle::ty::layout::LayoutOf;
use rustc_middle::ty::TyCtxt;
use rustc_session::cstore::{DllCallingConvention, DllImport, PeImportNameType};
use rustc_target::abi::{self, AddressSpace, HasDataLayout, Pointer};
@@ -211,11 +209,7 @@ impl<'ll, 'tcx> ConstMethods<'tcx> for CodegenCx<'ll, 'tcx> {
})
.1;
let len = s.len();
- let cs = consts::ptrcast(
- str_global,
- self.type_ptr_to(self.layout_of(self.tcx.types.str_).llvm_type(self)),
- );
- (cs, self.const_usize(len as u64))
+ (str_global, self.const_usize(len as u64))
}
fn const_struct(&self, elts: &[&'ll Value], packed: bool) -> &'ll Value {
@@ -290,9 +284,9 @@ impl<'ll, 'tcx> ConstMethods<'tcx> for CodegenCx<'ll, 'tcx> {
}
};
let llval = unsafe {
- llvm::LLVMRustConstInBoundsGEP2(
+ llvm::LLVMConstInBoundsGEP2(
self.type_i8(),
- self.const_bitcast(base_addr, self.type_i8p_ext(base_addr_space)),
+ self.const_bitcast(base_addr, self.type_ptr_ext(base_addr_space)),
&self.const_usize(offset.bytes()),
1,
)
@@ -310,19 +304,15 @@ impl<'ll, 'tcx> ConstMethods<'tcx> for CodegenCx<'ll, 'tcx> {
const_alloc_to_llvm(self, alloc)
}
- fn const_ptrcast(&self, val: &'ll Value, ty: &'ll Type) -> &'ll Value {
- consts::ptrcast(val, ty)
- }
-
fn const_bitcast(&self, val: &'ll Value, ty: &'ll Type) -> &'ll Value {
self.const_bitcast(val, ty)
}
fn const_ptr_byte_offset(&self, base_addr: Self::Value, offset: abi::Size) -> Self::Value {
unsafe {
- llvm::LLVMRustConstInBoundsGEP2(
+ llvm::LLVMConstInBoundsGEP2(
self.type_i8(),
- self.const_bitcast(base_addr, self.type_i8p()),
+ base_addr,
&self.const_usize(offset.bytes()),
1,
)
@@ -420,10 +410,10 @@ pub(crate) fn i686_decorated_name(
DllCallingConvention::C => {}
DllCallingConvention::Stdcall(arg_list_size)
| DllCallingConvention::Fastcall(arg_list_size) => {
- write!(&mut decorated_name, "@{}", arg_list_size).unwrap();
+ write!(&mut decorated_name, "@{arg_list_size}").unwrap();
}
DllCallingConvention::Vectorcall(arg_list_size) => {
- write!(&mut decorated_name, "@@{}", arg_list_size).unwrap();
+ write!(&mut decorated_name, "@@{arg_list_size}").unwrap();
}
}
}
diff --git a/compiler/rustc_codegen_llvm/src/consts.rs b/compiler/rustc_codegen_llvm/src/consts.rs
index df52f50f8..95af2f8ef 100644
--- a/compiler/rustc_codegen_llvm/src/consts.rs
+++ b/compiler/rustc_codegen_llvm/src/consts.rs
@@ -103,7 +103,7 @@ pub fn const_alloc_to_llvm<'ll>(cx: &CodegenCx<'ll, '_>, alloc: ConstAllocation<
value: Primitive::Pointer(address_space),
valid_range: WrappingRange::full(dl.pointer_size),
},
- cx.type_i8p_ext(address_space),
+ cx.type_ptr_ext(address_space),
));
next_offset = offset + pointer_size;
}
@@ -179,7 +179,7 @@ fn check_and_apply_linkage<'ll, 'tcx>(
})
});
llvm::LLVMRustSetLinkage(g2, llvm::Linkage::InternalLinkage);
- llvm::LLVMSetInitializer(g2, cx.const_ptrcast(g1, llty));
+ llvm::LLVMSetInitializer(g2, g1);
g2
}
} else if cx.tcx.sess.target.arch == "x86" &&
@@ -193,10 +193,6 @@ fn check_and_apply_linkage<'ll, 'tcx>(
}
}
-pub fn ptrcast<'ll>(val: &'ll Value, ty: &'ll Type) -> &'ll Value {
- unsafe { llvm::LLVMConstPointerCast(val, ty) }
-}
-
impl<'ll> CodegenCx<'ll, '_> {
pub(crate) fn const_bitcast(&self, val: &'ll Value, ty: &'ll Type) -> &'ll Value {
unsafe { llvm::LLVMConstBitCast(val, ty) }
@@ -238,8 +234,7 @@ impl<'ll> CodegenCx<'ll, '_> {
assert!(
!defined_in_current_codegen_unit,
"consts::get_static() should always hit the cache for \
- statics defined in the same CGU, but did not for `{:?}`",
- def_id
+ statics defined in the same CGU, but did not for `{def_id:?}`"
);
let ty = instance.ty(self.tcx, ty::ParamEnv::reveal_all());
@@ -251,7 +246,7 @@ impl<'ll> CodegenCx<'ll, '_> {
let g = if def_id.is_local() && !self.tcx.is_foreign_item(def_id) {
let llty = self.layout_of(ty).llvm_type(self);
if let Some(g) = self.get_declared_value(sym) {
- if self.val_ty(g) != self.type_ptr_to(llty) {
+ if self.val_ty(g) != self.type_ptr() {
span_bug!(self.tcx.def_span(def_id), "Conflicting types for static");
}
}
@@ -552,16 +547,14 @@ impl<'ll> StaticMethods for CodegenCx<'ll, '_> {
}
}
- /// Add a global value to a list to be stored in the `llvm.used` variable, an array of i8*.
+ /// Add a global value to a list to be stored in the `llvm.used` variable, an array of ptr.
fn add_used_global(&self, global: &'ll Value) {
- let cast = unsafe { llvm::LLVMConstPointerCast(global, self.type_i8p()) };
- self.used_statics.borrow_mut().push(cast);
+ self.used_statics.borrow_mut().push(global);
}
/// Add a global value to a list to be stored in the `llvm.compiler.used` variable,
- /// an array of i8*.
+ /// an array of ptr.
fn add_compiler_used_global(&self, global: &'ll Value) {
- let cast = unsafe { llvm::LLVMConstPointerCast(global, self.type_i8p()) };
- self.compiler_used_statics.borrow_mut().push(cast);
+ self.compiler_used_statics.borrow_mut().push(global);
}
}
diff --git a/compiler/rustc_codegen_llvm/src/context.rs b/compiler/rustc_codegen_llvm/src/context.rs
index e1e0a4428..24fd5bbf8 100644
--- a/compiler/rustc_codegen_llvm/src/context.rs
+++ b/compiler/rustc_codegen_llvm/src/context.rs
@@ -33,6 +33,7 @@ use rustc_target::abi::{
use rustc_target::spec::{HasTargetSpec, RelocModel, Target, TlsModel};
use smallvec::SmallVec;
+use libc::c_uint;
use std::cell::{Cell, RefCell};
use std::ffi::CStr;
use std::str;
@@ -58,17 +59,6 @@ pub struct CodegenCx<'ll, 'tcx> {
/// Cache of constant strings,
pub const_str_cache: RefCell<FxHashMap<String, &'ll Value>>,
- /// Reverse-direction for const ptrs cast from globals.
- ///
- /// Key is a Value holding a `*T`,
- /// Val is a Value holding a `*[T]`.
- ///
- /// Needed because LLVM loses pointer->pointee association
- /// when we ptrcast, and we have to ptrcast during codegen
- /// of a `[T]` const because we form a slice, a `(*T,usize)` pair, not
- /// a pointer to an LLVM array type. Similar for trait objects.
- pub const_unsized: RefCell<FxHashMap<&'ll Value, &'ll Value>>,
-
/// Cache of emitted const globals (value -> global)
pub const_globals: RefCell<FxHashMap<&'ll Value, &'ll Value>>,
@@ -155,6 +145,17 @@ pub unsafe fn create_module<'ll>(
target_data_layout = target_data_layout.replace("-n32:64-", "-n64-");
}
}
+ if llvm_version < (17, 0, 0) {
+ if sess.target.arch.starts_with("powerpc") {
+ // LLVM 17 specifies function pointer alignment for ppc:
+ // https://reviews.llvm.org/D147016
+ target_data_layout = target_data_layout
+ .replace("-Fn32", "")
+ .replace("-Fi32", "")
+ .replace("-Fn64", "")
+ .replace("-Fi64", "");
+ }
+ }
// Ensure the data-layout values hardcoded remain the defaults.
if sess.target.is_builtin {
@@ -208,7 +209,7 @@ pub unsafe fn create_module<'ll>(
// PIE is potentially more effective than PIC, but can only be used in executables.
// If all our outputs are executables, then we can relax PIC to PIE.
if reloc_model == RelocModel::Pie
- || sess.crate_types().iter().all(|ty| *ty == CrateType::Executable)
+ || tcx.crate_types().iter().all(|ty| *ty == CrateType::Executable)
{
llvm::LLVMRustSetModulePIELevel(llmod);
}
@@ -349,6 +350,23 @@ pub unsafe fn create_module<'ll>(
);
}
+ // Insert `llvm.ident` metadata.
+ //
+ // On the wasm targets it will get hooked up to the "producer" sections
+ // `processed-by` information.
+ let rustc_producer =
+ format!("rustc version {}", option_env!("CFG_VERSION").expect("CFG_VERSION"));
+ let name_metadata = llvm::LLVMMDStringInContext(
+ llcx,
+ rustc_producer.as_ptr().cast(),
+ rustc_producer.as_bytes().len() as c_uint,
+ );
+ llvm::LLVMAddNamedMetadataOperand(
+ llmod,
+ cstr!("llvm.ident").as_ptr(),
+ llvm::LLVMMDNodeInContext(llcx, &name_metadata, 1),
+ );
+
llmod
}
@@ -446,7 +464,6 @@ impl<'ll, 'tcx> CodegenCx<'ll, 'tcx> {
instances: Default::default(),
vtables: Default::default(),
const_str_cache: Default::default(),
- const_unsized: Default::default(),
const_globals: Default::default(),
statics_to_rauw: RefCell::new(Vec::new()),
used_statics: RefCell::new(Vec::new()),
@@ -477,7 +494,7 @@ impl<'ll, 'tcx> CodegenCx<'ll, 'tcx> {
pub(crate) fn create_used_variable_impl(&self, name: &'static CStr, values: &[&'ll Value]) {
let section = cstr!("llvm.metadata");
- let array = self.const_array(self.type_ptr_to(self.type_i8()), values);
+ let array = self.const_array(self.type_ptr(), values);
unsafe {
let g = llvm::LLVMAddGlobal(self.llmod, self.val_ty(array), name.as_ptr());
@@ -655,7 +672,7 @@ impl<'ll> CodegenCx<'ll, '_> {
($($field_ty:expr),*) => (self.type_struct( &[$($field_ty),*], false))
}
- let i8p = self.type_i8p();
+ let ptr = self.type_ptr();
let void = self.type_void();
let i1 = self.type_i1();
let t_i8 = self.type_i8();
@@ -669,7 +686,7 @@ impl<'ll> CodegenCx<'ll, '_> {
let t_metadata = self.type_metadata();
let t_token = self.type_token();
- ifn!("llvm.wasm.get.exception", fn(t_token) -> i8p);
+ ifn!("llvm.wasm.get.exception", fn(t_token) -> ptr);
ifn!("llvm.wasm.get.ehselector", fn(t_token) -> t_i32);
ifn!("llvm.wasm.trunc.unsigned.i32.f32", fn(t_f32) -> t_i32);
@@ -705,7 +722,7 @@ impl<'ll> CodegenCx<'ll, '_> {
ifn!("llvm.trap", fn() -> void);
ifn!("llvm.debugtrap", fn() -> void);
- ifn!("llvm.frameaddress", fn(t_i32) -> i8p);
+ ifn!("llvm.frameaddress", fn(t_i32) -> ptr);
ifn!("llvm.powi.f32", fn(t_f32, t_i32) -> t_f32);
ifn!("llvm.powi.f64", fn(t_f64, t_i32) -> t_f64);
@@ -872,43 +889,44 @@ impl<'ll> CodegenCx<'ll, '_> {
ifn!("llvm.usub.sat.i64", fn(t_i64, t_i64) -> t_i64);
ifn!("llvm.usub.sat.i128", fn(t_i128, t_i128) -> t_i128);
- ifn!("llvm.lifetime.start.p0i8", fn(t_i64, i8p) -> void);
- ifn!("llvm.lifetime.end.p0i8", fn(t_i64, i8p) -> void);
+ ifn!("llvm.lifetime.start.p0i8", fn(t_i64, ptr) -> void);
+ ifn!("llvm.lifetime.end.p0i8", fn(t_i64, ptr) -> void);
ifn!("llvm.expect.i1", fn(i1, i1) -> i1);
- ifn!("llvm.eh.typeid.for", fn(i8p) -> t_i32);
+ ifn!("llvm.eh.typeid.for", fn(ptr) -> t_i32);
ifn!("llvm.localescape", fn(...) -> void);
- ifn!("llvm.localrecover", fn(i8p, i8p, t_i32) -> i8p);
- ifn!("llvm.x86.seh.recoverfp", fn(i8p, i8p) -> i8p);
+ ifn!("llvm.localrecover", fn(ptr, ptr, t_i32) -> ptr);
+ ifn!("llvm.x86.seh.recoverfp", fn(ptr, ptr) -> ptr);
ifn!("llvm.assume", fn(i1) -> void);
- ifn!("llvm.prefetch", fn(i8p, t_i32, t_i32, t_i32) -> void);
+ ifn!("llvm.prefetch", fn(ptr, t_i32, t_i32, t_i32) -> void);
// This isn't an "LLVM intrinsic", but LLVM's optimization passes
- // recognize it like one and we assume it exists in `core::slice::cmp`
+ // recognize it like one (including turning it into `bcmp` sometimes)
+ // and we use it to implement intrinsics like `raw_eq` and `compare_bytes`
match self.sess().target.arch.as_ref() {
- "avr" | "msp430" => ifn!("memcmp", fn(i8p, i8p, t_isize) -> t_i16),
- _ => ifn!("memcmp", fn(i8p, i8p, t_isize) -> t_i32),
+ "avr" | "msp430" => ifn!("memcmp", fn(ptr, ptr, t_isize) -> t_i16),
+ _ => ifn!("memcmp", fn(ptr, ptr, t_isize) -> t_i32),
}
// variadic intrinsics
- ifn!("llvm.va_start", fn(i8p) -> void);
- ifn!("llvm.va_end", fn(i8p) -> void);
- ifn!("llvm.va_copy", fn(i8p, i8p) -> void);
+ ifn!("llvm.va_start", fn(ptr) -> void);
+ ifn!("llvm.va_end", fn(ptr) -> void);
+ ifn!("llvm.va_copy", fn(ptr, ptr) -> void);
if self.sess().instrument_coverage() {
- ifn!("llvm.instrprof.increment", fn(i8p, t_i64, t_i32, t_i32) -> void);
+ ifn!("llvm.instrprof.increment", fn(ptr, t_i64, t_i32, t_i32) -> void);
}
- ifn!("llvm.type.test", fn(i8p, t_metadata) -> i1);
- ifn!("llvm.type.checked.load", fn(i8p, t_i32, t_metadata) -> mk_struct! {i8p, i1});
+ ifn!("llvm.type.test", fn(ptr, t_metadata) -> i1);
+ ifn!("llvm.type.checked.load", fn(ptr, t_i32, t_metadata) -> mk_struct! {ptr, i1});
if self.sess().opts.debuginfo != DebugInfo::None {
ifn!("llvm.dbg.declare", fn(t_metadata, t_metadata) -> void);
ifn!("llvm.dbg.value", fn(t_metadata, t_i64, t_metadata) -> void);
}
- ifn!("llvm.ptrmask", fn(i8p, t_isize) -> i8p);
+ ifn!("llvm.ptrmask", fn(ptr, t_isize) -> ptr);
None
}
@@ -922,12 +940,10 @@ impl<'ll> CodegenCx<'ll, '_> {
let eh_catch_typeinfo = match tcx.lang_items().eh_catch_typeinfo() {
Some(def_id) => self.get_static(def_id),
_ => {
- let ty = self
- .type_struct(&[self.type_ptr_to(self.type_isize()), self.type_i8p()], false);
+ let ty = self.type_struct(&[self.type_ptr(), self.type_ptr()], false);
self.declare_global("rust_eh_catch_typeinfo", ty)
}
};
- let eh_catch_typeinfo = self.const_bitcast(eh_catch_typeinfo, self.type_i8p());
self.eh_catch_typeinfo.set(Some(eh_catch_typeinfo));
eh_catch_typeinfo
}
@@ -981,7 +997,7 @@ impl<'tcx> LayoutOfHelpers<'tcx> for CodegenCx<'_, 'tcx> {
#[inline]
fn handle_layout_err(&self, err: LayoutError<'tcx>, span: Span, ty: Ty<'tcx>) -> ! {
- if let LayoutError::SizeOverflow(_) = err {
+ if let LayoutError::SizeOverflow(_) | LayoutError::ReferencesError(_) = err {
self.sess().emit_fatal(Spanned { span, node: err.into_diagnostic() })
} else {
span_bug!(span, "failed to get layout for `{ty}`: {err:?}")
diff --git a/compiler/rustc_codegen_llvm/src/coverageinfo/ffi.rs b/compiler/rustc_codegen_llvm/src/coverageinfo/ffi.rs
index 1791ce4b3..7a82d05ce 100644
--- a/compiler/rustc_codegen_llvm/src/coverageinfo/ffi.rs
+++ b/compiler/rustc_codegen_llvm/src/coverageinfo/ffi.rs
@@ -1,4 +1,4 @@
-use rustc_middle::mir::coverage::{CounterValueReference, MappedExpressionIndex};
+use rustc_middle::mir::coverage::{CounterId, MappedExpressionIndex};
/// Must match the layout of `LLVMRustCounterKind`.
#[derive(Copy, Clone, Debug)]
@@ -36,11 +36,9 @@ impl Counter {
Self { kind: CounterKind::Zero, id: 0 }
}
- /// Constructs a new `Counter` of kind `CounterValueReference`, and converts
- /// the given 1-based counter_id to the required 0-based equivalent for
- /// the `Counter` encoding.
- pub fn counter_value_reference(counter_id: CounterValueReference) -> Self {
- Self { kind: CounterKind::CounterValueReference, id: counter_id.zero_based_index() }
+ /// Constructs a new `Counter` of kind `CounterValueReference`.
+ pub fn counter_value_reference(counter_id: CounterId) -> Self {
+ Self { kind: CounterKind::CounterValueReference, id: counter_id.as_u32() }
}
/// Constructs a new `Counter` of kind `Expression`.
@@ -87,3 +85,197 @@ impl CounterExpression {
Self { kind, lhs, rhs }
}
}
+
+/// Corresponds to enum `llvm::coverage::CounterMappingRegion::RegionKind`.
+///
+/// Must match the layout of `LLVMRustCounterMappingRegionKind`.
+#[derive(Copy, Clone, Debug)]
+#[repr(C)]
+pub enum RegionKind {
+ /// A CodeRegion associates some code with a counter
+ CodeRegion = 0,
+
+ /// An ExpansionRegion represents a file expansion region that associates
+ /// a source range with the expansion of a virtual source file, such as
+ /// for a macro instantiation or #include file.
+ ExpansionRegion = 1,
+
+ /// A SkippedRegion represents a source range with code that was skipped
+ /// by a preprocessor or similar means.
+ SkippedRegion = 2,
+
+ /// A GapRegion is like a CodeRegion, but its count is only set as the
+ /// line execution count when its the only region in the line.
+ GapRegion = 3,
+
+ /// A BranchRegion represents leaf-level boolean expressions and is
+ /// associated with two counters, each representing the number of times the
+ /// expression evaluates to true or false.
+ BranchRegion = 4,
+}
+
+/// This struct provides LLVM's representation of a "CoverageMappingRegion", encoded into the
+/// coverage map, in accordance with the
+/// [LLVM Code Coverage Mapping Format](https://github.com/rust-lang/llvm-project/blob/rustc/13.0-2021-09-30/llvm/docs/CoverageMappingFormat.rst#llvm-code-coverage-mapping-format).
+/// The struct composes fields representing the `Counter` type and value(s) (injected counter
+/// ID, or expression type and operands), the source file (an indirect index into a "filenames
+/// array", encoded separately), and source location (start and end positions of the represented
+/// code region).
+///
+/// Corresponds to struct `llvm::coverage::CounterMappingRegion`.
+///
+/// Must match the layout of `LLVMRustCounterMappingRegion`.
+#[derive(Copy, Clone, Debug)]
+#[repr(C)]
+pub struct CounterMappingRegion {
+ /// The counter type and type-dependent counter data, if any.
+ counter: Counter,
+
+ /// If the `RegionKind` is a `BranchRegion`, this represents the counter
+ /// for the false branch of the region.
+ false_counter: Counter,
+
+ /// An indirect reference to the source filename. In the LLVM Coverage Mapping Format, the
+ /// file_id is an index into a function-specific `virtual_file_mapping` array of indexes
+ /// that, in turn, are used to look up the filename for this region.
+ file_id: u32,
+
+ /// If the `RegionKind` is an `ExpansionRegion`, the `expanded_file_id` can be used to find
+ /// the mapping regions created as a result of macro expansion, by checking if their file id
+ /// matches the expanded file id.
+ expanded_file_id: u32,
+
+ /// 1-based starting line of the mapping region.
+ start_line: u32,
+
+ /// 1-based starting column of the mapping region.
+ start_col: u32,
+
+ /// 1-based ending line of the mapping region.
+ end_line: u32,
+
+ /// 1-based ending column of the mapping region. If the high bit is set, the current
+ /// mapping region is a gap area.
+ end_col: u32,
+
+ kind: RegionKind,
+}
+
+impl CounterMappingRegion {
+ pub(crate) fn code_region(
+ counter: Counter,
+ file_id: u32,
+ start_line: u32,
+ start_col: u32,
+ end_line: u32,
+ end_col: u32,
+ ) -> Self {
+ Self {
+ counter,
+ false_counter: Counter::zero(),
+ file_id,
+ expanded_file_id: 0,
+ start_line,
+ start_col,
+ end_line,
+ end_col,
+ kind: RegionKind::CodeRegion,
+ }
+ }
+
+ // This function might be used in the future; the LLVM API is still evolving, as is coverage
+ // support.
+ #[allow(dead_code)]
+ pub(crate) fn branch_region(
+ counter: Counter,
+ false_counter: Counter,
+ file_id: u32,
+ start_line: u32,
+ start_col: u32,
+ end_line: u32,
+ end_col: u32,
+ ) -> Self {
+ Self {
+ counter,
+ false_counter,
+ file_id,
+ expanded_file_id: 0,
+ start_line,
+ start_col,
+ end_line,
+ end_col,
+ kind: RegionKind::BranchRegion,
+ }
+ }
+
+ // This function might be used in the future; the LLVM API is still evolving, as is coverage
+ // support.
+ #[allow(dead_code)]
+ pub(crate) fn expansion_region(
+ file_id: u32,
+ expanded_file_id: u32,
+ start_line: u32,
+ start_col: u32,
+ end_line: u32,
+ end_col: u32,
+ ) -> Self {
+ Self {
+ counter: Counter::zero(),
+ false_counter: Counter::zero(),
+ file_id,
+ expanded_file_id,
+ start_line,
+ start_col,
+ end_line,
+ end_col,
+ kind: RegionKind::ExpansionRegion,
+ }
+ }
+
+ // This function might be used in the future; the LLVM API is still evolving, as is coverage
+ // support.
+ #[allow(dead_code)]
+ pub(crate) fn skipped_region(
+ file_id: u32,
+ start_line: u32,
+ start_col: u32,
+ end_line: u32,
+ end_col: u32,
+ ) -> Self {
+ Self {
+ counter: Counter::zero(),
+ false_counter: Counter::zero(),
+ file_id,
+ expanded_file_id: 0,
+ start_line,
+ start_col,
+ end_line,
+ end_col,
+ kind: RegionKind::SkippedRegion,
+ }
+ }
+
+ // This function might be used in the future; the LLVM API is still evolving, as is coverage
+ // support.
+ #[allow(dead_code)]
+ pub(crate) fn gap_region(
+ counter: Counter,
+ file_id: u32,
+ start_line: u32,
+ start_col: u32,
+ end_line: u32,
+ end_col: u32,
+ ) -> Self {
+ Self {
+ counter,
+ false_counter: Counter::zero(),
+ file_id,
+ expanded_file_id: 0,
+ start_line,
+ start_col,
+ end_line,
+ end_col: (1_u32 << 31) | end_col,
+ kind: RegionKind::GapRegion,
+ }
+ }
+}
diff --git a/compiler/rustc_codegen_llvm/src/coverageinfo/map_data.rs b/compiler/rustc_codegen_llvm/src/coverageinfo/map_data.rs
index 06844afd6..f1e68af25 100644
--- a/compiler/rustc_codegen_llvm/src/coverageinfo/map_data.rs
+++ b/compiler/rustc_codegen_llvm/src/coverageinfo/map_data.rs
@@ -1,26 +1,24 @@
-pub use super::ffi::*;
+use crate::coverageinfo::ffi::{Counter, CounterExpression, ExprKind};
use rustc_index::{IndexSlice, IndexVec};
use rustc_middle::bug;
use rustc_middle::mir::coverage::{
- CodeRegion, CounterValueReference, ExpressionOperandId, InjectedExpressionId,
- InjectedExpressionIndex, MappedExpressionIndex, Op,
+ CodeRegion, CounterId, ExpressionId, MappedExpressionIndex, Op, Operand,
};
use rustc_middle::ty::Instance;
use rustc_middle::ty::TyCtxt;
#[derive(Clone, Debug, PartialEq)]
pub struct Expression {
- lhs: ExpressionOperandId,
+ lhs: Operand,
op: Op,
- rhs: ExpressionOperandId,
+ rhs: Operand,
region: Option<CodeRegion>,
}
/// Collects all of the coverage regions associated with (a) injected counters, (b) counter
/// expressions (additions or subtraction), and (c) unreachable regions (always counted as zero),
-/// for a given Function. Counters and counter expressions have non-overlapping `id`s because they
-/// can both be operands in an expression. This struct also stores the `function_source_hash`,
+/// for a given Function. This struct also stores the `function_source_hash`,
/// computed during instrumentation, and forwarded with counters.
///
/// Note, it may be important to understand LLVM's definitions of `unreachable` regions versus "gap
@@ -34,8 +32,8 @@ pub struct FunctionCoverage<'tcx> {
instance: Instance<'tcx>,
source_hash: u64,
is_used: bool,
- counters: IndexVec<CounterValueReference, Option<CodeRegion>>,
- expressions: IndexVec<InjectedExpressionIndex, Option<Expression>>,
+ counters: IndexVec<CounterId, Option<CodeRegion>>,
+ expressions: IndexVec<ExpressionId, Option<Expression>>,
unreachable_regions: Vec<CodeRegion>,
}
@@ -82,48 +80,36 @@ impl<'tcx> FunctionCoverage<'tcx> {
}
/// Adds a code region to be counted by an injected counter intrinsic.
- pub fn add_counter(&mut self, id: CounterValueReference, region: CodeRegion) {
+ pub fn add_counter(&mut self, id: CounterId, region: CodeRegion) {
if let Some(previous_region) = self.counters[id].replace(region.clone()) {
assert_eq!(previous_region, region, "add_counter: code region for id changed");
}
}
/// Both counters and "counter expressions" (or simply, "expressions") can be operands in other
- /// expressions. Expression IDs start from `u32::MAX` and go down, so the range of expression
- /// IDs will not overlap with the range of counter IDs. Counters and expressions can be added in
- /// any order, and expressions can still be assigned contiguous (though descending) IDs, without
- /// knowing what the last counter ID will be.
- ///
- /// When storing the expression data in the `expressions` vector in the `FunctionCoverage`
- /// struct, its vector index is computed, from the given expression ID, by subtracting from
- /// `u32::MAX`.
- ///
- /// Since the expression operands (`lhs` and `rhs`) can reference either counters or
- /// expressions, an operand that references an expression also uses its original ID, descending
- /// from `u32::MAX`. Theses operands are translated only during code generation, after all
- /// counters and expressions have been added.
+ /// expressions. These are tracked as separate variants of `Operand`, so there is no ambiguity
+ /// between operands that are counter IDs and operands that are expression IDs.
pub fn add_counter_expression(
&mut self,
- expression_id: InjectedExpressionId,
- lhs: ExpressionOperandId,
+ expression_id: ExpressionId,
+ lhs: Operand,
op: Op,
- rhs: ExpressionOperandId,
+ rhs: Operand,
region: Option<CodeRegion>,
) {
debug!(
"add_counter_expression({:?}, lhs={:?}, op={:?}, rhs={:?} at {:?}",
expression_id, lhs, op, rhs, region
);
- let expression_index = self.expression_index(u32::from(expression_id));
debug_assert!(
- expression_index.as_usize() < self.expressions.len(),
- "expression_index {} is out of range for expressions.len() = {}
+ expression_id.as_usize() < self.expressions.len(),
+ "expression_id {} is out of range for expressions.len() = {}
for {:?}",
- expression_index.as_usize(),
+ expression_id.as_usize(),
self.expressions.len(),
self,
);
- if let Some(previous_expression) = self.expressions[expression_index].replace(Expression {
+ if let Some(previous_expression) = self.expressions[expression_id].replace(Expression {
lhs,
op,
rhs,
@@ -186,14 +172,11 @@ impl<'tcx> FunctionCoverage<'tcx> {
// This closure converts any `Expression` operand (`lhs` or `rhs` of the `Op::Add` or
// `Op::Subtract` operation) into its native `llvm::coverage::Counter::CounterKind` type
- // and value. Operand ID value `0` maps to `CounterKind::Zero`; values in the known range
- // of injected LLVM counters map to `CounterKind::CounterValueReference` (and the value
- // matches the injected counter index); and any other value is converted into a
- // `CounterKind::Expression` with the expression's `new_index`.
+ // and value.
//
// Expressions will be returned from this function in a sequential vector (array) of
// `CounterExpression`, so the expression IDs must be mapped from their original,
- // potentially sparse set of indexes, originally in reverse order from `u32::MAX`.
+ // potentially sparse set of indexes.
//
// An `Expression` as an operand will have already been encountered as an `Expression` with
// operands, so its new_index will already have been generated (as a 1-up index value).
@@ -206,34 +189,19 @@ impl<'tcx> FunctionCoverage<'tcx> {
// `expression_index`s lower than the referencing `Expression`. Therefore, it is
// reasonable to look up the new index of an expression operand while the `new_indexes`
// vector is only complete up to the current `ExpressionIndex`.
- let id_to_counter = |new_indexes: &IndexSlice<
- InjectedExpressionIndex,
- Option<MappedExpressionIndex>,
- >,
- id: ExpressionOperandId| {
- if id == ExpressionOperandId::ZERO {
- Some(Counter::zero())
- } else if id.index() < self.counters.len() {
- debug_assert!(
- id.index() > 0,
- "ExpressionOperandId indexes for counters are 1-based, but this id={}",
- id.index()
- );
- // Note: Some codegen-injected Counters may be only referenced by `Expression`s,
- // and may not have their own `CodeRegion`s,
- let index = CounterValueReference::from(id.index());
- // Note, the conversion to LLVM `Counter` adjusts the index to be zero-based.
- Some(Counter::counter_value_reference(index))
- } else {
- let index = self.expression_index(u32::from(id));
+ type NewIndexes = IndexSlice<ExpressionId, Option<MappedExpressionIndex>>;
+ let id_to_counter = |new_indexes: &NewIndexes, operand: Operand| match operand {
+ Operand::Zero => Some(Counter::zero()),
+ Operand::Counter(id) => Some(Counter::counter_value_reference(id)),
+ Operand::Expression(id) => {
self.expressions
- .get(index)
+ .get(id)
.expect("expression id is out of range")
.as_ref()
// If an expression was optimized out, assume it would have produced a count
// of zero. This ensures that expressions dependent on optimized-out
// expressions are still valid.
- .map_or(Some(Counter::zero()), |_| new_indexes[index].map(Counter::expression))
+ .map_or(Some(Counter::zero()), |_| new_indexes[id].map(Counter::expression))
}
};
@@ -340,9 +308,4 @@ impl<'tcx> FunctionCoverage<'tcx> {
fn unreachable_regions(&self) -> impl Iterator<Item = (Counter, &CodeRegion)> {
self.unreachable_regions.iter().map(|region| (Counter::zero(), region))
}
-
- fn expression_index(&self, id_descending_from_max: u32) -> InjectedExpressionIndex {
- debug_assert!(id_descending_from_max >= self.counters.len() as u32);
- InjectedExpressionIndex::from(u32::MAX - id_descending_from_max)
- }
}
diff --git a/compiler/rustc_codegen_llvm/src/coverageinfo/mapgen.rs b/compiler/rustc_codegen_llvm/src/coverageinfo/mapgen.rs
index a1ff2aa66..97a99e510 100644
--- a/compiler/rustc_codegen_llvm/src/coverageinfo/mapgen.rs
+++ b/compiler/rustc_codegen_llvm/src/coverageinfo/mapgen.rs
@@ -1,9 +1,8 @@
use crate::common::CodegenCx;
use crate::coverageinfo;
-use crate::coverageinfo::map_data::{Counter, CounterExpression};
+use crate::coverageinfo::ffi::{Counter, CounterExpression, CounterMappingRegion};
use crate::llvm;
-use llvm::coverageinfo::CounterMappingRegion;
use rustc_codegen_ssa::traits::ConstMethods;
use rustc_data_structures::fx::FxIndexSet;
use rustc_hir::def::DefKind;
@@ -13,8 +12,7 @@ use rustc_middle::bug;
use rustc_middle::middle::codegen_fn_attrs::CodegenFnAttrFlags;
use rustc_middle::mir::coverage::CodeRegion;
use rustc_middle::ty::TyCtxt;
-
-use std::ffi::CString;
+use rustc_span::Symbol;
/// Generates and exports the Coverage Map.
///
@@ -63,7 +61,7 @@ pub fn finalize(cx: &CodegenCx<'_, '_>) {
let mut function_data = Vec::new();
for (instance, function_coverage) in function_coverage_map {
debug!("Generate function coverage for {}, {:?}", cx.codegen_unit.name(), instance);
- let mangled_function_name = tcx.symbol_name(instance).to_string();
+ let mangled_function_name = tcx.symbol_name(instance).name;
let source_hash = function_coverage.source_hash();
let is_used = function_coverage.is_used();
let (expressions, counter_regions) =
@@ -90,19 +88,24 @@ pub fn finalize(cx: &CodegenCx<'_, '_>) {
// Encode all filenames referenced by counters/expressions in this module
let filenames_buffer = llvm::build_byte_buffer(|filenames_buffer| {
- coverageinfo::write_filenames_section_to_buffer(&mapgen.filenames, filenames_buffer);
+ coverageinfo::write_filenames_section_to_buffer(
+ mapgen.filenames.iter().map(Symbol::as_str),
+ filenames_buffer,
+ );
});
let filenames_size = filenames_buffer.len();
let filenames_val = cx.const_bytes(&filenames_buffer);
- let filenames_ref = coverageinfo::hash_bytes(filenames_buffer);
+ let filenames_ref = coverageinfo::hash_bytes(&filenames_buffer);
// Generate the LLVM IR representation of the coverage map and store it in a well-known global
let cov_data_val = mapgen.generate_coverage_map(cx, version, filenames_size, filenames_val);
+ let covfun_section_name = coverageinfo::covfun_section_name(cx);
for (mangled_function_name, source_hash, is_used, coverage_mapping_buffer) in function_data {
save_function_record(
cx,
+ &covfun_section_name,
mangled_function_name,
source_hash,
filenames_ref,
@@ -116,7 +119,7 @@ pub fn finalize(cx: &CodegenCx<'_, '_>) {
}
struct CoverageMapGenerator {
- filenames: FxIndexSet<CString>,
+ filenames: FxIndexSet<Symbol>,
}
impl CoverageMapGenerator {
@@ -127,11 +130,10 @@ impl CoverageMapGenerator {
// Since rustc generates coverage maps with relative paths, the
// compilation directory can be combined with the relative paths
// to get absolute paths, if needed.
- let working_dir =
- tcx.sess.opts.working_dir.remapped_path_if_available().to_string_lossy().to_string();
- let c_filename =
- CString::new(working_dir).expect("null error converting filename to C string");
- filenames.insert(c_filename);
+ let working_dir = Symbol::intern(
+ &tcx.sess.opts.working_dir.remapped_path_if_available().to_string_lossy(),
+ );
+ filenames.insert(working_dir);
Self { filenames }
}
@@ -169,10 +171,8 @@ impl CoverageMapGenerator {
current_file_id += 1;
}
current_file_name = Some(file_name);
- let c_filename = CString::new(file_name.to_string())
- .expect("null error converting filename to C string");
- debug!(" file_id: {} = '{:?}'", current_file_id, c_filename);
- let (filenames_index, _) = self.filenames.insert_full(c_filename);
+ debug!(" file_id: {} = '{:?}'", current_file_id, file_name);
+ let (filenames_index, _) = self.filenames.insert_full(file_name);
virtual_file_mapping.push(filenames_index as u32);
}
debug!("Adding counter {:?} to map for {:?}", counter, region);
@@ -228,7 +228,8 @@ impl CoverageMapGenerator {
/// specific, well-known section and name.
fn save_function_record(
cx: &CodegenCx<'_, '_>,
- mangled_function_name: String,
+ covfun_section_name: &str,
+ mangled_function_name: &str,
source_hash: u64,
filenames_ref: u64,
coverage_mapping_buffer: Vec<u8>,
@@ -238,7 +239,7 @@ fn save_function_record(
let coverage_mapping_size = coverage_mapping_buffer.len();
let coverage_mapping_val = cx.const_bytes(&coverage_mapping_buffer);
- let func_name_hash = coverageinfo::hash_str(&mangled_function_name);
+ let func_name_hash = coverageinfo::hash_bytes(mangled_function_name.as_bytes());
let func_name_hash_val = cx.const_u64(func_name_hash);
let coverage_mapping_size_val = cx.const_u32(coverage_mapping_size as u32);
let source_hash_val = cx.const_u64(source_hash);
@@ -254,7 +255,13 @@ fn save_function_record(
/*packed=*/ true,
);
- coverageinfo::save_func_record_to_mod(cx, func_name_hash, func_record_val, is_used);
+ coverageinfo::save_func_record_to_mod(
+ cx,
+ covfun_section_name,
+ func_name_hash,
+ func_record_val,
+ is_used,
+ );
}
/// When finalizing the coverage map, `FunctionCoverage` only has the `CodeRegion`s and counters for
diff --git a/compiler/rustc_codegen_llvm/src/coverageinfo/mod.rs b/compiler/rustc_codegen_llvm/src/coverageinfo/mod.rs
index 42fdbd786..621fd36b2 100644
--- a/compiler/rustc_codegen_llvm/src/coverageinfo/mod.rs
+++ b/compiler/rustc_codegen_llvm/src/coverageinfo/mod.rs
@@ -3,10 +3,10 @@ use crate::llvm;
use crate::abi::Abi;
use crate::builder::Builder;
use crate::common::CodegenCx;
-use crate::coverageinfo::map_data::{CounterExpression, FunctionCoverage};
+use crate::coverageinfo::ffi::{CounterExpression, CounterMappingRegion};
+use crate::coverageinfo::map_data::FunctionCoverage;
use libc::c_uint;
-use llvm::coverageinfo::CounterMappingRegion;
use rustc_codegen_ssa::traits::{
BaseTypeMethods, BuilderMethods, ConstMethods, CoverageInfoBuilderMethods, MiscMethods,
StaticMethods,
@@ -16,24 +16,21 @@ use rustc_hir as hir;
use rustc_hir::def_id::DefId;
use rustc_llvm::RustString;
use rustc_middle::bug;
-use rustc_middle::mir::coverage::{
- CodeRegion, CounterValueReference, CoverageKind, ExpressionOperandId, InjectedExpressionId, Op,
-};
+use rustc_middle::mir::coverage::{CodeRegion, CounterId, CoverageKind, ExpressionId, Op, Operand};
use rustc_middle::mir::Coverage;
use rustc_middle::ty;
use rustc_middle::ty::layout::{FnAbiOf, HasTyCtxt};
-use rustc_middle::ty::subst::InternalSubsts;
+use rustc_middle::ty::GenericArgs;
use rustc_middle::ty::Instance;
use rustc_middle::ty::Ty;
use std::cell::RefCell;
-use std::ffi::CString;
-mod ffi;
+pub(crate) mod ffi;
pub(crate) mod map_data;
pub mod mapgen;
-const UNUSED_FUNCTION_COUNTER_ID: CounterValueReference = CounterValueReference::START;
+const UNUSED_FUNCTION_COUNTER_ID: CounterId = CounterId::START;
const VAR_ALIGN_BYTES: usize = 8;
@@ -125,7 +122,7 @@ impl<'tcx> CoverageInfoBuilderMethods<'tcx> for Builder<'_, '_, 'tcx> {
let fn_name = bx.get_pgo_func_name_var(instance);
let hash = bx.const_u64(function_source_hash);
let num_counters = bx.const_u32(coverageinfo.num_counters);
- let index = bx.const_u32(id.zero_based_index());
+ let index = bx.const_u32(id.as_u32());
debug!(
"codegen intrinsic instrprof.increment(fn_name={:?}, hash={:?}, num_counters={:?}, index={:?})",
fn_name, hash, num_counters, index,
@@ -178,7 +175,7 @@ impl<'tcx> Builder<'_, '_, 'tcx> {
fn add_coverage_counter(
&mut self,
instance: Instance<'tcx>,
- id: CounterValueReference,
+ id: CounterId,
region: CodeRegion,
) -> bool {
if let Some(coverage_context) = self.coverage_context() {
@@ -202,10 +199,10 @@ impl<'tcx> Builder<'_, '_, 'tcx> {
fn add_coverage_counter_expression(
&mut self,
instance: Instance<'tcx>,
- id: InjectedExpressionId,
- lhs: ExpressionOperandId,
+ id: ExpressionId,
+ lhs: Operand,
op: Op,
- rhs: ExpressionOperandId,
+ rhs: Operand,
region: Option<CodeRegion>,
) -> bool {
if let Some(coverage_context) = self.coverage_context() {
@@ -250,7 +247,7 @@ fn declare_unused_fn<'tcx>(cx: &CodegenCx<'_, 'tcx>, def_id: DefId) -> Instance<
let instance = Instance::new(
def_id,
- InternalSubsts::for_item(tcx, def_id, |param, _| {
+ GenericArgs::for_item(tcx, def_id, |param, _| {
if let ty::GenericParamDefKind::Lifetime = param.kind {
tcx.lifetimes.re_erased.into()
} else {
@@ -334,21 +331,32 @@ fn create_pgo_func_name_var<'ll, 'tcx>(
cx: &CodegenCx<'ll, 'tcx>,
instance: Instance<'tcx>,
) -> &'ll llvm::Value {
- let mangled_fn_name = CString::new(cx.tcx.symbol_name(instance).name)
- .expect("error converting function name to C string");
+ let mangled_fn_name: &str = cx.tcx.symbol_name(instance).name;
let llfn = cx.get_fn(instance);
- unsafe { llvm::LLVMRustCoverageCreatePGOFuncNameVar(llfn, mangled_fn_name.as_ptr()) }
+ unsafe {
+ llvm::LLVMRustCoverageCreatePGOFuncNameVar(
+ llfn,
+ mangled_fn_name.as_ptr().cast(),
+ mangled_fn_name.len(),
+ )
+ }
}
pub(crate) fn write_filenames_section_to_buffer<'a>(
- filenames: impl IntoIterator<Item = &'a CString>,
+ filenames: impl IntoIterator<Item = &'a str>,
buffer: &RustString,
) {
- let c_str_vec = filenames.into_iter().map(|cstring| cstring.as_ptr()).collect::<Vec<_>>();
+ let (pointers, lengths) = filenames
+ .into_iter()
+ .map(|s: &str| (s.as_ptr().cast(), s.len()))
+ .unzip::<_, _, Vec<_>, Vec<_>>();
+
unsafe {
llvm::LLVMRustCoverageWriteFilenamesSectionToBuffer(
- c_str_vec.as_ptr(),
- c_str_vec.len(),
+ pointers.as_ptr(),
+ pointers.len(),
+ lengths.as_ptr(),
+ lengths.len(),
buffer,
);
}
@@ -373,12 +381,7 @@ pub(crate) fn write_mapping_to_buffer(
}
}
-pub(crate) fn hash_str(strval: &str) -> u64 {
- let strval = CString::new(strval).expect("null error converting hashable str to C string");
- unsafe { llvm::LLVMRustCoverageHashCString(strval.as_ptr()) }
-}
-
-pub(crate) fn hash_bytes(bytes: Vec<u8>) -> u64 {
+pub(crate) fn hash_bytes(bytes: &[u8]) -> u64 {
unsafe { llvm::LLVMRustCoverageHashByteArray(bytes.as_ptr().cast(), bytes.len()) }
}
@@ -413,6 +416,7 @@ pub(crate) fn save_cov_data_to_mod<'ll, 'tcx>(
pub(crate) fn save_func_record_to_mod<'ll, 'tcx>(
cx: &CodegenCx<'ll, 'tcx>,
+ covfun_section_name: &str,
func_name_hash: u64,
func_record_val: &'ll llvm::Value,
is_used: bool,
@@ -428,20 +432,33 @@ pub(crate) fn save_func_record_to_mod<'ll, 'tcx>(
let func_record_var_name =
format!("__covrec_{:X}{}", func_name_hash, if is_used { "u" } else { "" });
debug!("function record var name: {:?}", func_record_var_name);
-
- let func_record_section_name = llvm::build_string(|s| unsafe {
- llvm::LLVMRustCoverageWriteFuncSectionNameToString(cx.llmod, s);
- })
- .expect("Rust Coverage function record section name failed UTF-8 conversion");
- debug!("function record section name: {:?}", func_record_section_name);
+ debug!("function record section name: {:?}", covfun_section_name);
let llglobal = llvm::add_global(cx.llmod, cx.val_ty(func_record_val), &func_record_var_name);
llvm::set_initializer(llglobal, func_record_val);
llvm::set_global_constant(llglobal, true);
llvm::set_linkage(llglobal, llvm::Linkage::LinkOnceODRLinkage);
llvm::set_visibility(llglobal, llvm::Visibility::Hidden);
- llvm::set_section(llglobal, &func_record_section_name);
+ llvm::set_section(llglobal, covfun_section_name);
llvm::set_alignment(llglobal, VAR_ALIGN_BYTES);
llvm::set_comdat(cx.llmod, llglobal, &func_record_var_name);
cx.add_used_global(llglobal);
}
+
+/// Returns the section name string to pass through to the linker when embedding
+/// per-function coverage information in the object file, according to the target
+/// platform's object file format.
+///
+/// LLVM's coverage tools read coverage mapping details from this section when
+/// producing coverage reports.
+///
+/// Typical values are:
+/// - `__llvm_covfun` on Linux
+/// - `__LLVM_COV,__llvm_covfun` on macOS (includes `__LLVM_COV,` segment prefix)
+/// - `.lcovfun$M` on Windows (includes `$M` sorting suffix)
+pub(crate) fn covfun_section_name(cx: &CodegenCx<'_, '_>) -> String {
+ llvm::build_string(|s| unsafe {
+ llvm::LLVMRustCoverageWriteFuncSectionNameToString(cx.llmod, s);
+ })
+ .expect("Rust Coverage function record section name failed UTF-8 conversion")
+}
diff --git a/compiler/rustc_codegen_llvm/src/debuginfo/create_scope_map.rs b/compiler/rustc_codegen_llvm/src/debuginfo/create_scope_map.rs
index 65cbd5edc..d174a3593 100644
--- a/compiler/rustc_codegen_llvm/src/debuginfo/create_scope_map.rs
+++ b/compiler/rustc_codegen_llvm/src/debuginfo/create_scope_map.rs
@@ -91,7 +91,7 @@ fn make_mir_scope<'ll, 'tcx>(
// FIXME(eddyb) this would be `self.monomorphize(&callee)`
// if this is moved to `rustc_codegen_ssa::mir::debuginfo`.
let callee = cx.tcx.subst_and_normalize_erasing_regions(
- instance.substs,
+ instance.args,
ty::ParamEnv::reveal_all(),
ty::EarlyBinder::bind(callee),
);
diff --git a/compiler/rustc_codegen_llvm/src/debuginfo/gdb.rs b/compiler/rustc_codegen_llvm/src/debuginfo/gdb.rs
index 37f309176..425e935bc 100644
--- a/compiler/rustc_codegen_llvm/src/debuginfo/gdb.rs
+++ b/compiler/rustc_codegen_llvm/src/debuginfo/gdb.rs
@@ -17,8 +17,7 @@ use rustc_span::symbol::sym;
/// .debug_gdb_scripts global is referenced, so it isn't removed by the linker.
pub fn insert_reference_to_gdb_debug_scripts_section_global(bx: &mut Builder<'_, '_, '_>) {
if needs_gdb_debug_scripts_section(bx) {
- let gdb_debug_scripts_section =
- bx.const_bitcast(get_or_insert_gdb_debug_scripts_section_global(bx), bx.type_i8p());
+ let gdb_debug_scripts_section = get_or_insert_gdb_debug_scripts_section_global(bx);
// Load just the first byte as that's all that's necessary to force
// LLVM to keep around the reference to the global.
let volatile_load_instruction = bx.volatile_load(bx.type_i8(), gdb_debug_scripts_section);
@@ -54,7 +53,7 @@ pub fn get_or_insert_gdb_debug_scripts_section_global<'ll>(cx: &CodegenCx<'ll, '
// The initial byte `4` instructs GDB that the following pretty printer
// is defined inline as opposed to in a standalone file.
section_contents.extend_from_slice(b"\x04");
- let vis_name = format!("pretty-printer-{}-{}\n", crate_name, index);
+ let vis_name = format!("pretty-printer-{crate_name}-{index}\n");
section_contents.extend_from_slice(vis_name.as_bytes());
section_contents.extend_from_slice(&visualizer.src);
@@ -93,7 +92,7 @@ pub fn needs_gdb_debug_scripts_section(cx: &CodegenCx<'_, '_>) -> bool {
// each rlib could produce a different set of visualizers that would be embedded
// in the `.debug_gdb_scripts` section. For that reason, we make sure that the
// section is only emitted for leaf crates.
- let embed_visualizers = cx.sess().crate_types().iter().any(|&crate_type| match crate_type {
+ let embed_visualizers = cx.tcx.crate_types().iter().any(|&crate_type| match crate_type {
CrateType::Executable | CrateType::Dylib | CrateType::Cdylib | CrateType::Staticlib => {
// These are crate types for which we will embed pretty printers since they
// are treated as leaf crates.
diff --git a/compiler/rustc_codegen_llvm/src/debuginfo/metadata.rs b/compiler/rustc_codegen_llvm/src/debuginfo/metadata.rs
index d61400d3f..f8cbcbd5e 100644
--- a/compiler/rustc_codegen_llvm/src/debuginfo/metadata.rs
+++ b/compiler/rustc_codegen_llvm/src/debuginfo/metadata.rs
@@ -184,9 +184,7 @@ fn build_pointer_or_reference_di_node<'ll, 'tcx>(
debug_assert_eq!(
(data_layout.pointer_size, data_layout.pointer_align.abi),
cx.size_and_align_of(ptr_type),
- "ptr_type={}, pointee_type={}",
- ptr_type,
- pointee_type,
+ "ptr_type={ptr_type}, pointee_type={pointee_type}",
);
let di_node = unsafe {
@@ -449,7 +447,7 @@ pub fn type_di_node<'ll, 'tcx>(cx: &CodegenCx<'ll, 'tcx>, t: Ty<'tcx>) -> &'ll D
}
// Box<T, A> may have a non-ZST allocator A. In that case, we
// cannot treat Box<T, A> as just an owned alias of `*mut T`.
- ty::Adt(def, substs) if def.is_box() && cx.layout_of(substs.type_at(1)).is_zst() => {
+ ty::Adt(def, args) if def.is_box() && cx.layout_of(args.type_at(1)).is_zst() => {
build_pointer_or_reference_di_node(cx, t, t.boxed_ty(), unique_type_id)
}
ty::FnDef(..) | ty::FnPtr(_) => build_subroutine_type_di_node(cx, unique_type_id),
@@ -521,7 +519,7 @@ fn recursion_marker_type_di_node<'ll, 'tcx>(cx: &CodegenCx<'ll, 'tcx>) -> &'ll D
fn hex_encode(data: &[u8]) -> String {
let mut hex_string = String::with_capacity(data.len() * 2);
for byte in data.iter() {
- write!(&mut hex_string, "{:02x}", byte).unwrap();
+ write!(&mut hex_string, "{byte:02x}").unwrap();
}
hex_string
}
@@ -739,7 +737,10 @@ fn build_foreign_type_di_node<'ll, 'tcx>(
debug!("build_foreign_type_di_node: {:?}", t);
let &ty::Foreign(def_id) = unique_type_id.expect_ty().kind() else {
- bug!("build_foreign_type_di_node() called with unexpected type: {:?}", unique_type_id.expect_ty());
+ bug!(
+ "build_foreign_type_di_node() called with unexpected type: {:?}",
+ unique_type_id.expect_ty()
+ );
};
build_type_with_children(
@@ -763,7 +764,7 @@ fn build_param_type_di_node<'ll, 'tcx>(
t: Ty<'tcx>,
) -> DINodeCreationResult<'ll> {
debug!("build_param_type_di_node: {:?}", t);
- let name = format!("{:?}", t);
+ let name = format!("{t:?}");
DINodeCreationResult {
di_node: unsafe {
llvm::LLVMRustDIBuilderCreateBasicType(
@@ -811,7 +812,7 @@ pub fn build_compile_unit_di_node<'ll, 'tcx>(
debug!("build_compile_unit_di_node: {:?}", name_in_debuginfo);
let rustc_producer = format!("rustc version {}", tcx.sess.cfg_version);
// FIXME(#41252) Remove "clang LLVM" if we can get GDB and LLVM to play nice.
- let producer = format!("clang LLVM ({})", rustc_producer);
+ let producer = format!("clang LLVM ({rustc_producer})");
let name_in_debuginfo = name_in_debuginfo.to_string_lossy();
let work_dir = tcx.sess.opts.working_dir.to_string_lossy(FileNameDisplayPreference::Remapped);
@@ -885,21 +886,6 @@ pub fn build_compile_unit_di_node<'ll, 'tcx>(
llvm::LLVMAddNamedMetadataOperand(debug_context.llmod, llvm_gcov_ident.as_ptr(), val);
}
- // Insert `llvm.ident` metadata on the wasm targets since that will
- // get hooked up to the "producer" sections `processed-by` information.
- if tcx.sess.target.is_like_wasm {
- let name_metadata = llvm::LLVMMDStringInContext(
- debug_context.llcontext,
- rustc_producer.as_ptr().cast(),
- rustc_producer.as_bytes().len() as c_uint,
- );
- llvm::LLVMAddNamedMetadataOperand(
- debug_context.llmod,
- cstr!("llvm.ident").as_ptr(),
- llvm::LLVMMDNodeInContext(debug_context.llcontext, &name_metadata, 1),
- );
- }
-
return unit_metadata;
};
@@ -1004,14 +990,8 @@ fn build_upvar_field_di_nodes<'ll, 'tcx>(
closure_or_generator_di_node: &'ll DIType,
) -> SmallVec<&'ll DIType> {
let (&def_id, up_var_tys) = match closure_or_generator_ty.kind() {
- ty::Generator(def_id, substs, _) => {
- let upvar_tys: SmallVec<_> = substs.as_generator().prefix_tys().collect();
- (def_id, upvar_tys)
- }
- ty::Closure(def_id, substs) => {
- let upvar_tys: SmallVec<_> = substs.as_closure().upvar_tys().collect();
- (def_id, upvar_tys)
- }
+ ty::Generator(def_id, args, _) => (def_id, args.as_generator().prefix_tys()),
+ ty::Closure(def_id, args) => (def_id, args.as_closure().upvar_tys()),
_ => {
bug!(
"build_upvar_field_di_nodes() called with non-closure-or-generator-type: {:?}",
@@ -1021,9 +1001,7 @@ fn build_upvar_field_di_nodes<'ll, 'tcx>(
};
debug_assert!(
- up_var_tys
- .iter()
- .all(|&t| t == cx.tcx.normalize_erasing_regions(ParamEnv::reveal_all(), t))
+ up_var_tys.iter().all(|t| t == cx.tcx.normalize_erasing_regions(ParamEnv::reveal_all(), t))
);
let capture_names = cx.tcx.closure_saved_names_of_captured_variables(def_id);
@@ -1099,7 +1077,7 @@ fn build_closure_env_di_node<'ll, 'tcx>(
unique_type_id: UniqueTypeId<'tcx>,
) -> DINodeCreationResult<'ll> {
let closure_env_type = unique_type_id.expect_ty();
- let &ty::Closure(def_id, _substs) = closure_env_type.kind() else {
+ let &ty::Closure(def_id, _args) = closure_env_type.kind() else {
bug!("build_closure_env_di_node() called with non-closure-type: {:?}", closure_env_type)
};
let containing_scope = get_namespace_for_item(cx, def_id);
@@ -1177,11 +1155,11 @@ fn build_generic_type_param_di_nodes<'ll, 'tcx>(
cx: &CodegenCx<'ll, 'tcx>,
ty: Ty<'tcx>,
) -> SmallVec<&'ll DIType> {
- if let ty::Adt(def, substs) = *ty.kind() {
- if substs.types().next().is_some() {
+ if let ty::Adt(def, args) = *ty.kind() {
+ if args.types().next().is_some() {
let generics = cx.tcx.generics_of(def.did());
let names = get_parameter_names(cx, generics);
- let template_params: SmallVec<_> = iter::zip(substs, names)
+ let template_params: SmallVec<_> = iter::zip(args, names)
.filter_map(|(kind, name)| {
kind.as_type().map(|ty| {
let actual_type =
@@ -1343,10 +1321,10 @@ fn build_vtable_type_di_node<'ll, 'tcx>(
// Note: This code does not try to give a proper name to each method
// because their might be multiple methods with the same name
// (coming from different traits).
- (format!("__method{}", index), void_pointer_type_di_node)
+ (format!("__method{index}"), void_pointer_type_di_node)
}
ty::VtblEntry::TraitVPtr(_) => {
- (format!("__super_trait_ptr{}", index), void_pointer_type_di_node)
+ (format!("__super_trait_ptr{index}"), void_pointer_type_di_node)
}
ty::VtblEntry::MetadataAlign => ("align".to_string(), usize_di_node),
ty::VtblEntry::MetadataSize => ("size".to_string(), usize_di_node),
@@ -1516,5 +1494,5 @@ pub fn tuple_field_name(field_index: usize) -> Cow<'static, str> {
TUPLE_FIELD_NAMES
.get(field_index)
.map(|s| Cow::from(*s))
- .unwrap_or_else(|| Cow::from(format!("__{}", field_index)))
+ .unwrap_or_else(|| Cow::from(format!("__{field_index}")))
}
diff --git a/compiler/rustc_codegen_llvm/src/debuginfo/metadata/enums/cpp_like.rs b/compiler/rustc_codegen_llvm/src/debuginfo/metadata/enums/cpp_like.rs
index b2765ffc9..88040557a 100644
--- a/compiler/rustc_codegen_llvm/src/debuginfo/metadata/enums/cpp_like.rs
+++ b/compiler/rustc_codegen_llvm/src/debuginfo/metadata/enums/cpp_like.rs
@@ -12,7 +12,7 @@ use rustc_middle::{
ty::{
self,
layout::{LayoutOf, TyAndLayout},
- AdtDef, GeneratorSubsts, Ty,
+ AdtDef, GeneratorArgs, Ty,
},
};
use rustc_target::abi::{Align, Endian, Size, TagEncoding, VariantIdx, Variants};
@@ -199,7 +199,7 @@ pub(super) fn build_enum_type_di_node<'ll, 'tcx>(
let enum_type = unique_type_id.expect_ty();
let &ty::Adt(enum_adt_def, _) = enum_type.kind() else {
bug!("build_enum_type_di_node() called with non-enum type: `{:?}`", enum_type)
- };
+ };
let enum_type_and_layout = cx.layout_of(enum_type);
let enum_type_name = compute_debuginfo_type_name(cx.tcx, enum_type, false);
@@ -667,19 +667,21 @@ fn build_union_fields_for_direct_tag_generator<'ll, 'tcx>(
generator_type_and_layout: TyAndLayout<'tcx>,
generator_type_di_node: &'ll DIType,
) -> SmallVec<&'ll DIType> {
- let Variants::Multiple { tag_encoding: TagEncoding::Direct, tag_field, .. } = generator_type_and_layout.variants else {
+ let Variants::Multiple { tag_encoding: TagEncoding::Direct, tag_field, .. } =
+ generator_type_and_layout.variants
+ else {
bug!("This function only supports layouts with directly encoded tags.")
};
- let (generator_def_id, generator_substs) = match generator_type_and_layout.ty.kind() {
- &ty::Generator(def_id, substs, _) => (def_id, substs.as_generator()),
+ let (generator_def_id, generator_args) = match generator_type_and_layout.ty.kind() {
+ &ty::Generator(def_id, args, _) => (def_id, args.as_generator()),
_ => unreachable!(),
};
let generator_layout = cx.tcx.optimized_mir(generator_def_id).generator_layout().unwrap();
let common_upvar_names = cx.tcx.closure_saved_names_of_captured_variables(generator_def_id);
- let variant_range = generator_substs.variant_range(generator_def_id, cx.tcx);
+ let variant_range = generator_args.variant_range(generator_def_id, cx.tcx);
let variant_count = (variant_range.start.as_u32()..variant_range.end.as_u32()).len();
let tag_base_type = tag_base_type(cx, generator_type_and_layout);
@@ -689,11 +691,11 @@ fn build_union_fields_for_direct_tag_generator<'ll, 'tcx>(
generator_type_di_node,
variant_range
.clone()
- .map(|variant_index| (variant_index, GeneratorSubsts::variant_name(variant_index))),
+ .map(|variant_index| (variant_index, GeneratorArgs::variant_name(variant_index))),
);
let discriminants: IndexVec<VariantIdx, DiscrResult> = {
- let discriminants_iter = generator_substs.discriminants(generator_def_id, cx.tcx);
+ let discriminants_iter = generator_args.discriminants(generator_def_id, cx.tcx);
let mut discriminants: IndexVec<VariantIdx, DiscrResult> =
IndexVec::with_capacity(variant_count);
for (variant_index, discr) in discriminants_iter {
diff --git a/compiler/rustc_codegen_llvm/src/debuginfo/metadata/enums/mod.rs b/compiler/rustc_codegen_llvm/src/debuginfo/metadata/enums/mod.rs
index 8746ce0c5..d3239d5c3 100644
--- a/compiler/rustc_codegen_llvm/src/debuginfo/metadata/enums/mod.rs
+++ b/compiler/rustc_codegen_llvm/src/debuginfo/metadata/enums/mod.rs
@@ -10,7 +10,7 @@ use rustc_middle::{
ty::{
self,
layout::{IntegerExt, LayoutOf, PrimitiveExt, TyAndLayout},
- AdtDef, GeneratorSubsts, Ty, VariantDef,
+ AdtDef, GeneratorArgs, Ty, VariantDef,
},
};
use rustc_span::Symbol;
@@ -51,7 +51,7 @@ pub(super) fn build_enum_type_di_node<'ll, 'tcx>(
let enum_type = unique_type_id.expect_ty();
let &ty::Adt(enum_adt_def, _) = enum_type.kind() else {
bug!("build_enum_type_di_node() called with non-enum type: `{:?}`", enum_type)
- };
+ };
let enum_type_and_layout = cx.layout_of(enum_type);
@@ -325,7 +325,7 @@ pub fn build_generator_variant_struct_type_di_node<'ll, 'tcx>(
generator_layout: &GeneratorLayout<'tcx>,
common_upvar_names: &IndexSlice<FieldIdx, Symbol>,
) -> &'ll DIType {
- let variant_name = GeneratorSubsts::variant_name(variant_index);
+ let variant_name = GeneratorArgs::variant_name(variant_index);
let unique_type_id = UniqueTypeId::for_enum_variant_struct_type(
cx.tcx,
generator_type_and_layout.ty,
@@ -334,8 +334,8 @@ pub fn build_generator_variant_struct_type_di_node<'ll, 'tcx>(
let variant_layout = generator_type_and_layout.for_variant(cx, variant_index);
- let generator_substs = match generator_type_and_layout.ty.kind() {
- ty::Generator(_, substs, _) => substs.as_generator(),
+ let generator_args = match generator_type_and_layout.ty.kind() {
+ ty::Generator(_, args, _) => args.as_generator(),
_ => unreachable!(),
};
@@ -377,8 +377,9 @@ pub fn build_generator_variant_struct_type_di_node<'ll, 'tcx>(
.collect();
// Fields that are common to all states
- let common_fields: SmallVec<_> = generator_substs
+ let common_fields: SmallVec<_> = generator_args
.prefix_tys()
+ .iter()
.zip(common_upvar_names)
.enumerate()
.map(|(index, (upvar_ty, upvar_name))| {
diff --git a/compiler/rustc_codegen_llvm/src/debuginfo/metadata/enums/native.rs b/compiler/rustc_codegen_llvm/src/debuginfo/metadata/enums/native.rs
index 4d1cd6486..feac40d8c 100644
--- a/compiler/rustc_codegen_llvm/src/debuginfo/metadata/enums/native.rs
+++ b/compiler/rustc_codegen_llvm/src/debuginfo/metadata/enums/native.rs
@@ -57,7 +57,7 @@ pub(super) fn build_enum_type_di_node<'ll, 'tcx>(
let enum_type = unique_type_id.expect_ty();
let &ty::Adt(enum_adt_def, _) = enum_type.kind() else {
bug!("build_enum_type_di_node() called with non-enum type: `{:?}`", enum_type)
- };
+ };
let containing_scope = get_namespace_for_item(cx, enum_adt_def.did());
let enum_type_and_layout = cx.layout_of(enum_type);
@@ -132,9 +132,9 @@ pub(super) fn build_generator_di_node<'ll, 'tcx>(
unique_type_id: UniqueTypeId<'tcx>,
) -> DINodeCreationResult<'ll> {
let generator_type = unique_type_id.expect_ty();
- let &ty::Generator(generator_def_id, _, _ ) = generator_type.kind() else {
+ let &ty::Generator(generator_def_id, _, _) = generator_type.kind() else {
bug!("build_generator_di_node() called with non-generator type: `{:?}`", generator_type)
- };
+ };
let containing_scope = get_namespace_for_item(cx, generator_def_id);
let generator_type_and_layout = cx.layout_of(generator_type);
@@ -158,7 +158,9 @@ pub(super) fn build_generator_di_node<'ll, 'tcx>(
let generator_layout =
cx.tcx.optimized_mir(generator_def_id).generator_layout().unwrap();
- let Variants::Multiple { tag_encoding: TagEncoding::Direct, ref variants, .. } = generator_type_and_layout.variants else {
+ let Variants::Multiple { tag_encoding: TagEncoding::Direct, ref variants, .. } =
+ generator_type_and_layout.variants
+ else {
bug!(
"Encountered generator with non-direct-tag layout: {:?}",
generator_type_and_layout
@@ -173,7 +175,7 @@ pub(super) fn build_generator_di_node<'ll, 'tcx>(
.indices()
.map(|variant_index| {
// FIXME: This is problematic because just a number is not a valid identifier.
- // GeneratorSubsts::variant_name(variant_index), would be consistent
+ // GeneratorArgs::variant_name(variant_index), would be consistent
// with enums?
let variant_name = format!("{}", variant_index.as_usize()).into();
diff --git a/compiler/rustc_codegen_llvm/src/debuginfo/mod.rs b/compiler/rustc_codegen_llvm/src/debuginfo/mod.rs
index b924c771a..40714a0af 100644
--- a/compiler/rustc_codegen_llvm/src/debuginfo/mod.rs
+++ b/compiler/rustc_codegen_llvm/src/debuginfo/mod.rs
@@ -27,7 +27,7 @@ use rustc_hir::def_id::{DefId, DefIdMap};
use rustc_index::IndexVec;
use rustc_middle::mir;
use rustc_middle::ty::layout::LayoutOf;
-use rustc_middle::ty::subst::SubstsRef;
+use rustc_middle::ty::GenericArgsRef;
use rustc_middle::ty::{self, Instance, ParamEnv, Ty, TypeVisitableExt};
use rustc_session::config::{self, DebugInfo};
use rustc_session::Session;
@@ -338,19 +338,19 @@ impl<'ll, 'tcx> DebugInfoMethods<'tcx> for CodegenCx<'ll, 'tcx> {
// Find the enclosing function, in case this is a closure.
let enclosing_fn_def_id = tcx.typeck_root_def_id(def_id);
- // We look up the generics of the enclosing function and truncate the substs
+ // We look up the generics of the enclosing function and truncate the args
// to their length in order to cut off extra stuff that might be in there for
// closures or generators.
let generics = tcx.generics_of(enclosing_fn_def_id);
- let substs = instance.substs.truncate_to(tcx, generics);
+ let args = instance.args.truncate_to(tcx, generics);
type_names::push_generic_params(
tcx,
- tcx.normalize_erasing_regions(ty::ParamEnv::reveal_all(), substs),
+ tcx.normalize_erasing_regions(ty::ParamEnv::reveal_all(), args),
&mut name,
);
- let template_parameters = get_template_parameters(self, generics, substs);
+ let template_parameters = get_template_parameters(self, generics, args);
let linkage_name = &mangled_name_of_instance(self, instance).name;
// Omit the linkage_name if it is the same as subprogram name.
@@ -471,16 +471,16 @@ impl<'ll, 'tcx> DebugInfoMethods<'tcx> for CodegenCx<'ll, 'tcx> {
fn get_template_parameters<'ll, 'tcx>(
cx: &CodegenCx<'ll, 'tcx>,
generics: &ty::Generics,
- substs: SubstsRef<'tcx>,
+ args: GenericArgsRef<'tcx>,
) -> &'ll DIArray {
- if substs.types().next().is_none() {
+ if args.types().next().is_none() {
return create_DIArray(DIB(cx), &[]);
}
// Again, only create type information if full debuginfo is enabled
let template_params: Vec<_> = if cx.sess().opts.debuginfo == DebugInfo::Full {
let names = get_parameter_names(cx, generics);
- iter::zip(substs, names)
+ iter::zip(args, names)
.filter_map(|(kind, name)| {
kind.as_type().map(|ty| {
let actual_type =
@@ -527,7 +527,7 @@ impl<'ll, 'tcx> DebugInfoMethods<'tcx> for CodegenCx<'ll, 'tcx> {
// If the method does *not* belong to a trait, proceed
if cx.tcx.trait_id_of_impl(impl_def_id).is_none() {
let impl_self_ty = cx.tcx.subst_and_normalize_erasing_regions(
- instance.substs,
+ instance.args,
ty::ParamEnv::reveal_all(),
cx.tcx.type_of(impl_def_id),
);
diff --git a/compiler/rustc_codegen_llvm/src/debuginfo/utils.rs b/compiler/rustc_codegen_llvm/src/debuginfo/utils.rs
index 7be836386..c758010c5 100644
--- a/compiler/rustc_codegen_llvm/src/debuginfo/utils.rs
+++ b/compiler/rustc_codegen_llvm/src/debuginfo/utils.rs
@@ -91,8 +91,7 @@ pub(crate) fn fat_pointer_kind<'ll, 'tcx>(
// For all other pointee types we should already have returned None
// at the beginning of the function.
panic!(
- "fat_pointer_kind() - Encountered unexpected `pointee_tail_ty`: {:?}",
- pointee_tail_ty
+ "fat_pointer_kind() - Encountered unexpected `pointee_tail_ty`: {pointee_tail_ty:?}"
)
}
}
diff --git a/compiler/rustc_codegen_llvm/src/errors.rs b/compiler/rustc_codegen_llvm/src/errors.rs
index 44869ced1..fced6d504 100644
--- a/compiler/rustc_codegen_llvm/src/errors.rs
+++ b/compiler/rustc_codegen_llvm/src/errors.rs
@@ -81,6 +81,8 @@ pub(crate) struct ErrorCallingDllTool<'a> {
#[derive(Diagnostic)]
#[diag(codegen_llvm_dlltool_fail_import_library)]
pub(crate) struct DlltoolFailImportLibrary<'a> {
+ pub dlltool_path: Cow<'a, str>,
+ pub dlltool_args: String,
pub stdout: Cow<'a, str>,
pub stderr: Cow<'a, str>,
}
diff --git a/compiler/rustc_codegen_llvm/src/intrinsic.rs b/compiler/rustc_codegen_llvm/src/intrinsic.rs
index a254c86c2..a9b06030e 100644
--- a/compiler/rustc_codegen_llvm/src/intrinsic.rs
+++ b/compiler/rustc_codegen_llvm/src/intrinsic.rs
@@ -90,7 +90,7 @@ impl<'ll, 'tcx> IntrinsicCallMethods<'tcx> for Builder<'_, 'll, 'tcx> {
let tcx = self.tcx;
let callee_ty = instance.ty(tcx, ty::ParamEnv::reveal_all());
- let ty::FnDef(def_id, substs) = *callee_ty.kind() else {
+ let ty::FnDef(def_id, fn_args) = *callee_ty.kind() else {
bug!("expected fn item type, found {}", callee_ty);
};
@@ -163,11 +163,10 @@ impl<'ll, 'tcx> IntrinsicCallMethods<'tcx> for Builder<'_, 'll, 'tcx> {
}
sym::volatile_load | sym::unaligned_volatile_load => {
- let tp_ty = substs.type_at(0);
+ let tp_ty = fn_args.type_at(0);
let ptr = args[0].immediate();
let load = if let PassMode::Cast(ty, _) = &fn_abi.ret.mode {
let llty = ty.llvm_type(self);
- let ptr = self.pointercast(ptr, self.type_ptr_to(llty));
self.volatile_load(llty, ptr)
} else {
self.volatile_load(self.layout_of(tp_ty).llvm_type(self), ptr)
@@ -230,22 +229,22 @@ impl<'ll, 'tcx> IntrinsicCallMethods<'tcx> for Builder<'_, 'll, 'tcx> {
sym::ctlz | sym::cttz => {
let y = self.const_bool(false);
self.call_intrinsic(
- &format!("llvm.{}.i{}", name, width),
+ &format!("llvm.{name}.i{width}"),
&[args[0].immediate(), y],
)
}
sym::ctlz_nonzero => {
let y = self.const_bool(true);
- let llvm_name = &format!("llvm.ctlz.i{}", width);
+ let llvm_name = &format!("llvm.ctlz.i{width}");
self.call_intrinsic(llvm_name, &[args[0].immediate(), y])
}
sym::cttz_nonzero => {
let y = self.const_bool(true);
- let llvm_name = &format!("llvm.cttz.i{}", width);
+ let llvm_name = &format!("llvm.cttz.i{width}");
self.call_intrinsic(llvm_name, &[args[0].immediate(), y])
}
sym::ctpop => self.call_intrinsic(
- &format!("llvm.ctpop.i{}", width),
+ &format!("llvm.ctpop.i{width}"),
&[args[0].immediate()],
),
sym::bswap => {
@@ -253,13 +252,13 @@ impl<'ll, 'tcx> IntrinsicCallMethods<'tcx> for Builder<'_, 'll, 'tcx> {
args[0].immediate() // byte swap a u8/i8 is just a no-op
} else {
self.call_intrinsic(
- &format!("llvm.bswap.i{}", width),
+ &format!("llvm.bswap.i{width}"),
&[args[0].immediate()],
)
}
}
sym::bitreverse => self.call_intrinsic(
- &format!("llvm.bitreverse.i{}", width),
+ &format!("llvm.bitreverse.i{width}"),
&[args[0].immediate()],
),
sym::rotate_left | sym::rotate_right => {
@@ -298,7 +297,7 @@ impl<'ll, 'tcx> IntrinsicCallMethods<'tcx> for Builder<'_, 'll, 'tcx> {
sym::raw_eq => {
use abi::Abi::*;
- let tp_ty = substs.type_at(0);
+ let tp_ty = fn_args.type_at(0);
let layout = self.layout_of(tp_ty).layout;
let use_integer_compare = match layout.abi() {
Scalar(_) | ScalarPair(_, _) => true,
@@ -317,18 +316,12 @@ impl<'ll, 'tcx> IntrinsicCallMethods<'tcx> for Builder<'_, 'll, 'tcx> {
self.const_bool(true)
} else if use_integer_compare {
let integer_ty = self.type_ix(layout.size().bits());
- let ptr_ty = self.type_ptr_to(integer_ty);
- let a_ptr = self.bitcast(a, ptr_ty);
- let a_val = self.load(integer_ty, a_ptr, layout.align().abi);
- let b_ptr = self.bitcast(b, ptr_ty);
- let b_val = self.load(integer_ty, b_ptr, layout.align().abi);
+ let a_val = self.load(integer_ty, a, layout.align().abi);
+ let b_val = self.load(integer_ty, b, layout.align().abi);
self.icmp(IntPredicate::IntEQ, a_val, b_val)
} else {
- let i8p_ty = self.type_i8p();
- let a_ptr = self.bitcast(a, i8p_ty);
- let b_ptr = self.bitcast(b, i8p_ty);
let n = self.const_usize(layout.size().bytes());
- let cmp = self.call_intrinsic("memcmp", &[a_ptr, b_ptr, n]);
+ let cmp = self.call_intrinsic("memcmp", &[a, b, n]);
match self.cx.sess().target.arch.as_ref() {
"avr" | "msp430" => self.icmp(IntPredicate::IntEQ, cmp, self.const_i16(0)),
_ => self.icmp(IntPredicate::IntEQ, cmp, self.const_i32(0)),
@@ -336,6 +329,16 @@ impl<'ll, 'tcx> IntrinsicCallMethods<'tcx> for Builder<'_, 'll, 'tcx> {
}
}
+ sym::compare_bytes => {
+ // Here we assume that the `memcmp` provided by the target is a NOP for size 0.
+ let cmp = self.call_intrinsic(
+ "memcmp",
+ &[args[0].immediate(), args[1].immediate(), args[2].immediate()],
+ );
+ // Some targets have `memcmp` returning `i16`, but the intrinsic is always `i32`.
+ self.sext(cmp, self.type_ix(32))
+ }
+
sym::black_box => {
args[0].val.store(self, result);
let result_val_span = [result.llval];
@@ -383,10 +386,8 @@ impl<'ll, 'tcx> IntrinsicCallMethods<'tcx> for Builder<'_, 'll, 'tcx> {
};
if !fn_abi.ret.is_ignore() {
- if let PassMode::Cast(ty, _) = &fn_abi.ret.mode {
- let ptr_llty = self.type_ptr_to(ty.llvm_type(self));
- let ptr = self.pointercast(result.llval, ptr_llty);
- self.store(llval, ptr, result.align);
+ if let PassMode::Cast(_, _) = &fn_abi.ret.mode {
+ self.store(llval, result.llval, result.align);
} else {
OperandRef::from_immediate_or_packed_pair(self, llval, result.layout)
.val
@@ -410,9 +411,7 @@ impl<'ll, 'tcx> IntrinsicCallMethods<'tcx> for Builder<'_, 'll, 'tcx> {
fn type_test(&mut self, pointer: Self::Value, typeid: Self::Value) -> Self::Value {
// Test the called operand using llvm.type.test intrinsic. The LowerTypeTests link-time
// optimization pass replaces calls to this intrinsic with code to test type membership.
- let i8p_ty = self.type_i8p();
- let bitcast = self.bitcast(pointer, i8p_ty);
- self.call_intrinsic("llvm.type.test", &[bitcast, typeid])
+ self.call_intrinsic("llvm.type.test", &[pointer, typeid])
}
fn type_checked_load(
@@ -444,7 +443,7 @@ fn try_intrinsic<'ll>(
dest: &'ll Value,
) {
if bx.sess().panic_strategy() == PanicStrategy::Abort {
- let try_func_ty = bx.type_func(&[bx.type_i8p()], bx.type_void());
+ let try_func_ty = bx.type_func(&[bx.type_ptr()], bx.type_void());
bx.call(try_func_ty, None, None, try_func, &[data], None);
// Return 0 unconditionally from the intrinsic call;
// we can never unwind.
@@ -544,8 +543,8 @@ fn codegen_msvc_try<'ll>(
//
// More information can be found in libstd's seh.rs implementation.
let ptr_align = bx.tcx().data_layout.pointer_align.abi;
- let slot = bx.alloca(bx.type_i8p(), ptr_align);
- let try_func_ty = bx.type_func(&[bx.type_i8p()], bx.type_void());
+ let slot = bx.alloca(bx.type_ptr(), ptr_align);
+ let try_func_ty = bx.type_func(&[bx.type_ptr()], bx.type_void());
bx.invoke(try_func_ty, None, None, try_func, &[data], normal, catchswitch, None);
bx.switch_to_block(normal);
@@ -568,10 +567,10 @@ fn codegen_msvc_try<'ll>(
//
// When modifying, make sure that the type_name string exactly matches
// the one used in library/panic_unwind/src/seh.rs.
- let type_info_vtable = bx.declare_global("??_7type_info@@6B@", bx.type_i8p());
+ let type_info_vtable = bx.declare_global("??_7type_info@@6B@", bx.type_ptr());
let type_name = bx.const_bytes(b"rust_panic\0");
let type_info =
- bx.const_struct(&[type_info_vtable, bx.const_null(bx.type_i8p()), type_name], false);
+ bx.const_struct(&[type_info_vtable, bx.const_null(bx.type_ptr()), type_name], false);
let tydesc = bx.declare_global("__rust_panic_type_info", bx.val_ty(type_info));
unsafe {
llvm::LLVMRustSetLinkage(tydesc, llvm::Linkage::LinkOnceODRLinkage);
@@ -588,15 +587,15 @@ fn codegen_msvc_try<'ll>(
bx.switch_to_block(catchpad_rust);
let flags = bx.const_i32(8);
let funclet = bx.catch_pad(cs, &[tydesc, flags, slot]);
- let ptr = bx.load(bx.type_i8p(), slot, ptr_align);
- let catch_ty = bx.type_func(&[bx.type_i8p(), bx.type_i8p()], bx.type_void());
+ let ptr = bx.load(bx.type_ptr(), slot, ptr_align);
+ let catch_ty = bx.type_func(&[bx.type_ptr(), bx.type_ptr()], bx.type_void());
bx.call(catch_ty, None, None, catch_func, &[data, ptr], Some(&funclet));
bx.catch_ret(&funclet, caught);
// The flag value of 64 indicates a "catch-all".
bx.switch_to_block(catchpad_foreign);
let flags = bx.const_i32(64);
- let null = bx.const_null(bx.type_i8p());
+ let null = bx.const_null(bx.type_ptr());
let funclet = bx.catch_pad(cs, &[null, flags, null]);
bx.call(catch_ty, None, None, catch_func, &[data, null], Some(&funclet));
bx.catch_ret(&funclet, caught);
@@ -655,7 +654,7 @@ fn codegen_wasm_try<'ll>(
// ret i32 1
// }
//
- let try_func_ty = bx.type_func(&[bx.type_i8p()], bx.type_void());
+ let try_func_ty = bx.type_func(&[bx.type_ptr()], bx.type_void());
bx.invoke(try_func_ty, None, None, try_func, &[data], normal, catchswitch, None);
bx.switch_to_block(normal);
@@ -665,13 +664,13 @@ fn codegen_wasm_try<'ll>(
let cs = bx.catch_switch(None, None, &[catchpad]);
bx.switch_to_block(catchpad);
- let null = bx.const_null(bx.type_i8p());
+ let null = bx.const_null(bx.type_ptr());
let funclet = bx.catch_pad(cs, &[null]);
let ptr = bx.call_intrinsic("llvm.wasm.get.exception", &[funclet.cleanuppad()]);
let _sel = bx.call_intrinsic("llvm.wasm.get.ehselector", &[funclet.cleanuppad()]);
- let catch_ty = bx.type_func(&[bx.type_i8p(), bx.type_i8p()], bx.type_void());
+ let catch_ty = bx.type_func(&[bx.type_ptr(), bx.type_ptr()], bx.type_void());
bx.call(catch_ty, None, None, catch_func, &[data, ptr], Some(&funclet));
bx.catch_ret(&funclet, caught);
@@ -723,7 +722,7 @@ fn codegen_gnu_try<'ll>(
let try_func = llvm::get_param(bx.llfn(), 0);
let data = llvm::get_param(bx.llfn(), 1);
let catch_func = llvm::get_param(bx.llfn(), 2);
- let try_func_ty = bx.type_func(&[bx.type_i8p()], bx.type_void());
+ let try_func_ty = bx.type_func(&[bx.type_ptr()], bx.type_void());
bx.invoke(try_func_ty, None, None, try_func, &[data], then, catch, None);
bx.switch_to_block(then);
@@ -736,12 +735,12 @@ fn codegen_gnu_try<'ll>(
// the landing pad clauses the exception's type had been matched to.
// rust_try ignores the selector.
bx.switch_to_block(catch);
- let lpad_ty = bx.type_struct(&[bx.type_i8p(), bx.type_i32()], false);
+ let lpad_ty = bx.type_struct(&[bx.type_ptr(), bx.type_i32()], false);
let vals = bx.landing_pad(lpad_ty, bx.eh_personality(), 1);
- let tydesc = bx.const_null(bx.type_i8p());
+ let tydesc = bx.const_null(bx.type_ptr());
bx.add_clause(vals, tydesc);
let ptr = bx.extract_value(vals, 0);
- let catch_ty = bx.type_func(&[bx.type_i8p(), bx.type_i8p()], bx.type_void());
+ let catch_ty = bx.type_func(&[bx.type_ptr(), bx.type_ptr()], bx.type_void());
bx.call(catch_ty, None, None, catch_func, &[data, ptr], None);
bx.ret(bx.const_i32(1));
});
@@ -787,7 +786,7 @@ fn codegen_emcc_try<'ll>(
let try_func = llvm::get_param(bx.llfn(), 0);
let data = llvm::get_param(bx.llfn(), 1);
let catch_func = llvm::get_param(bx.llfn(), 2);
- let try_func_ty = bx.type_func(&[bx.type_i8p()], bx.type_void());
+ let try_func_ty = bx.type_func(&[bx.type_ptr()], bx.type_void());
bx.invoke(try_func_ty, None, None, try_func, &[data], then, catch, None);
bx.switch_to_block(then);
@@ -800,10 +799,10 @@ fn codegen_emcc_try<'ll>(
// the landing pad clauses the exception's type had been matched to.
bx.switch_to_block(catch);
let tydesc = bx.eh_catch_typeinfo();
- let lpad_ty = bx.type_struct(&[bx.type_i8p(), bx.type_i32()], false);
+ let lpad_ty = bx.type_struct(&[bx.type_ptr(), bx.type_i32()], false);
let vals = bx.landing_pad(lpad_ty, bx.eh_personality(), 2);
bx.add_clause(vals, tydesc);
- bx.add_clause(vals, bx.const_null(bx.type_i8p()));
+ bx.add_clause(vals, bx.const_null(bx.type_ptr()));
let ptr = bx.extract_value(vals, 0);
let selector = bx.extract_value(vals, 1);
@@ -816,7 +815,7 @@ fn codegen_emcc_try<'ll>(
// create an alloca and pass a pointer to that.
let ptr_align = bx.tcx().data_layout.pointer_align.abi;
let i8_align = bx.tcx().data_layout.i8_align.abi;
- let catch_data_type = bx.type_struct(&[bx.type_i8p(), bx.type_bool()], false);
+ let catch_data_type = bx.type_struct(&[bx.type_ptr(), bx.type_bool()], false);
let catch_data = bx.alloca(catch_data_type, ptr_align);
let catch_data_0 =
bx.inbounds_gep(catch_data_type, catch_data, &[bx.const_usize(0), bx.const_usize(0)]);
@@ -824,9 +823,8 @@ fn codegen_emcc_try<'ll>(
let catch_data_1 =
bx.inbounds_gep(catch_data_type, catch_data, &[bx.const_usize(0), bx.const_usize(1)]);
bx.store(is_rust_panic, catch_data_1, i8_align);
- let catch_data = bx.bitcast(catch_data, bx.type_i8p());
- let catch_ty = bx.type_func(&[bx.type_i8p(), bx.type_i8p()], bx.type_void());
+ let catch_ty = bx.type_func(&[bx.type_ptr(), bx.type_ptr()], bx.type_void());
bx.call(catch_ty, None, None, catch_func, &[data, catch_data], None);
bx.ret(bx.const_i32(1));
});
@@ -967,8 +965,7 @@ fn generic_simd_intrinsic<'ll, 'tcx>(
let place = PlaceRef::alloca(bx, args[0].layout);
args[0].val.store(bx, place);
let int_ty = bx.type_ix(expected_bytes * 8);
- let ptr = bx.pointercast(place.llval, bx.cx.type_ptr_to(int_ty));
- bx.load(int_ty, ptr, Align::ONE)
+ bx.load(int_ty, place.llval, Align::ONE)
}
_ => return_error!(InvalidMonomorphization::InvalidBitmask {
span,
@@ -1033,28 +1030,20 @@ fn generic_simd_intrinsic<'ll, 'tcx>(
));
}
- if let Some(stripped) = name.as_str().strip_prefix("simd_shuffle") {
- // If this intrinsic is the older "simd_shuffleN" form, simply parse the integer.
- // If there is no suffix, use the index array length.
- let n: u64 = if stripped.is_empty() {
- // Make sure this is actually an array, since typeck only checks the length-suffixed
- // version of this intrinsic.
- match args[2].layout.ty.kind() {
- ty::Array(ty, len) if matches!(ty.kind(), ty::Uint(ty::UintTy::U32)) => {
- len.try_eval_target_usize(bx.cx.tcx, ty::ParamEnv::reveal_all()).unwrap_or_else(
- || span_bug!(span, "could not evaluate shuffle index array length"),
- )
- }
- _ => return_error!(InvalidMonomorphization::SimdShuffle {
- span,
- name,
- ty: args[2].layout.ty
- }),
+ if name == sym::simd_shuffle {
+ // Make sure this is actually an array, since typeck only checks the length-suffixed
+ // version of this intrinsic.
+ let n: u64 = match args[2].layout.ty.kind() {
+ ty::Array(ty, len) if matches!(ty.kind(), ty::Uint(ty::UintTy::U32)) => {
+ len.try_eval_target_usize(bx.cx.tcx, ty::ParamEnv::reveal_all()).unwrap_or_else(
+ || span_bug!(span, "could not evaluate shuffle index array length"),
+ )
}
- } else {
- stripped.parse().unwrap_or_else(|_| {
- span_bug!(span, "bad `simd_shuffle` instruction only caught in codegen?")
- })
+ _ => return_error!(InvalidMonomorphization::SimdShuffle {
+ span,
+ name,
+ ty: args[2].layout.ty
+ }),
};
require_simd!(ret_ty, InvalidMonomorphization::SimdReturn { span, name, ty: ret_ty });
@@ -1217,7 +1206,6 @@ fn generic_simd_intrinsic<'ll, 'tcx>(
let ptr = bx.alloca(bx.type_ix(expected_bytes * 8), Align::ONE);
bx.store(ze, ptr, Align::ONE);
let array_ty = bx.type_array(bx.type_i8(), expected_bytes);
- let ptr = bx.pointercast(ptr, bx.cx.type_ptr_to(array_ty));
return Ok(bx.load(array_ty, ptr, Align::ONE));
}
_ => return_error!(InvalidMonomorphization::CannotReturn {
@@ -1283,7 +1271,7 @@ fn generic_simd_intrinsic<'ll, 'tcx>(
sym::simd_trunc => ("trunc", bx.type_func(&[vec_ty], vec_ty)),
_ => return_error!(InvalidMonomorphization::UnrecognizedIntrinsic { span, name }),
};
- let llvm_name = &format!("llvm.{0}.v{1}{2}", intr_name, in_len, elem_ty_str);
+ let llvm_name = &format!("llvm.{intr_name}.v{in_len}{elem_ty_str}");
let f = bx.declare_cfn(llvm_name, llvm::UnnamedAddr::No, fn_ty);
let c = bx.call(
fn_ty,
@@ -1321,50 +1309,34 @@ fn generic_simd_intrinsic<'ll, 'tcx>(
// FIXME: use:
// https://github.com/llvm-mirror/llvm/blob/master/include/llvm/IR/Function.h#L182
// https://github.com/llvm-mirror/llvm/blob/master/include/llvm/IR/Intrinsics.h#L81
- fn llvm_vector_str(
- elem_ty: Ty<'_>,
- vec_len: u64,
- no_pointers: usize,
- bx: &Builder<'_, '_, '_>,
- ) -> String {
- let p0s: String = "p0".repeat(no_pointers);
+ fn llvm_vector_str(bx: &Builder<'_, '_, '_>, elem_ty: Ty<'_>, vec_len: u64) -> String {
match *elem_ty.kind() {
ty::Int(v) => format!(
- "v{}{}i{}",
+ "v{}i{}",
vec_len,
- p0s,
// Normalize to prevent crash if v: IntTy::Isize
v.normalize(bx.target_spec().pointer_width).bit_width().unwrap()
),
ty::Uint(v) => format!(
- "v{}{}i{}",
+ "v{}i{}",
vec_len,
- p0s,
// Normalize to prevent crash if v: UIntTy::Usize
v.normalize(bx.target_spec().pointer_width).bit_width().unwrap()
),
- ty::Float(v) => format!("v{}{}f{}", vec_len, p0s, v.bit_width()),
+ ty::Float(v) => format!("v{}f{}", vec_len, v.bit_width()),
+ ty::RawPtr(_) => format!("v{}p0", vec_len),
_ => unreachable!(),
}
}
- fn llvm_vector_ty<'ll>(
- cx: &CodegenCx<'ll, '_>,
- elem_ty: Ty<'_>,
- vec_len: u64,
- mut no_pointers: usize,
- ) -> &'ll Type {
- // FIXME: use cx.layout_of(ty).llvm_type() ?
- let mut elem_ty = match *elem_ty.kind() {
+ fn llvm_vector_ty<'ll>(cx: &CodegenCx<'ll, '_>, elem_ty: Ty<'_>, vec_len: u64) -> &'ll Type {
+ let elem_ty = match *elem_ty.kind() {
ty::Int(v) => cx.type_int_from_ty(v),
ty::Uint(v) => cx.type_uint_from_ty(v),
ty::Float(v) => cx.type_float_from_ty(v),
+ ty::RawPtr(_) => cx.type_ptr(),
_ => unreachable!(),
};
- while no_pointers > 0 {
- elem_ty = cx.type_ptr_to(elem_ty);
- no_pointers -= 1;
- }
cx.type_vector(elem_ty, vec_len)
}
@@ -1419,47 +1391,26 @@ fn generic_simd_intrinsic<'ll, 'tcx>(
InvalidMonomorphization::ExpectedReturnType { span, name, in_ty, ret_ty }
);
- // This counts how many pointers
- fn ptr_count(t: Ty<'_>) -> usize {
- match t.kind() {
- ty::RawPtr(p) => 1 + ptr_count(p.ty),
- _ => 0,
- }
- }
-
- // Non-ptr type
- fn non_ptr(t: Ty<'_>) -> Ty<'_> {
- match t.kind() {
- ty::RawPtr(p) => non_ptr(p.ty),
- _ => t,
- }
- }
-
// The second argument must be a simd vector with an element type that's a pointer
// to the element type of the first argument
let (_, element_ty0) = arg_tys[0].simd_size_and_type(bx.tcx());
let (_, element_ty1) = arg_tys[1].simd_size_and_type(bx.tcx());
- let (pointer_count, underlying_ty) = match element_ty1.kind() {
- ty::RawPtr(p) if p.ty == in_elem => (ptr_count(element_ty1), non_ptr(element_ty1)),
- _ => {
- require!(
- false,
- InvalidMonomorphization::ExpectedElementType {
- span,
- name,
- expected_element: element_ty1,
- second_arg: arg_tys[1],
- in_elem,
- in_ty,
- mutability: ExpectedPointerMutability::Not,
- }
- );
- unreachable!();
+
+ require!(
+ matches!(
+ element_ty1.kind(),
+ ty::RawPtr(p) if p.ty == in_elem && p.ty.kind() == element_ty0.kind()
+ ),
+ InvalidMonomorphization::ExpectedElementType {
+ span,
+ name,
+ expected_element: element_ty1,
+ second_arg: arg_tys[1],
+ in_elem,
+ in_ty,
+ mutability: ExpectedPointerMutability::Not,
}
- };
- assert!(pointer_count > 0);
- assert_eq!(pointer_count - 1, ptr_count(element_ty0));
- assert_eq!(underlying_ty, non_ptr(element_ty0));
+ );
// The element type of the third argument must be a signed integer type of any width:
let (_, element_ty2) = arg_tys[2].simd_size_and_type(bx.tcx());
@@ -1490,15 +1441,15 @@ fn generic_simd_intrinsic<'ll, 'tcx>(
};
// Type of the vector of pointers:
- let llvm_pointer_vec_ty = llvm_vector_ty(bx, underlying_ty, in_len, pointer_count);
- let llvm_pointer_vec_str = llvm_vector_str(underlying_ty, in_len, pointer_count, bx);
+ let llvm_pointer_vec_ty = llvm_vector_ty(bx, element_ty1, in_len);
+ let llvm_pointer_vec_str = llvm_vector_str(bx, element_ty1, in_len);
// Type of the vector of elements:
- let llvm_elem_vec_ty = llvm_vector_ty(bx, underlying_ty, in_len, pointer_count - 1);
- let llvm_elem_vec_str = llvm_vector_str(underlying_ty, in_len, pointer_count - 1, bx);
+ let llvm_elem_vec_ty = llvm_vector_ty(bx, element_ty0, in_len);
+ let llvm_elem_vec_str = llvm_vector_str(bx, element_ty0, in_len);
let llvm_intrinsic =
- format!("llvm.masked.gather.{}.{}", llvm_elem_vec_str, llvm_pointer_vec_str);
+ format!("llvm.masked.gather.{llvm_elem_vec_str}.{llvm_pointer_vec_str}");
let fn_ty = bx.type_func(
&[llvm_pointer_vec_ty, alignment_ty, mask_ty, llvm_elem_vec_ty],
llvm_elem_vec_ty,
@@ -1559,50 +1510,28 @@ fn generic_simd_intrinsic<'ll, 'tcx>(
}
);
- // This counts how many pointers
- fn ptr_count(t: Ty<'_>) -> usize {
- match t.kind() {
- ty::RawPtr(p) => 1 + ptr_count(p.ty),
- _ => 0,
- }
- }
-
- // Non-ptr type
- fn non_ptr(t: Ty<'_>) -> Ty<'_> {
- match t.kind() {
- ty::RawPtr(p) => non_ptr(p.ty),
- _ => t,
- }
- }
-
// The second argument must be a simd vector with an element type that's a pointer
// to the element type of the first argument
let (_, element_ty0) = arg_tys[0].simd_size_and_type(bx.tcx());
let (_, element_ty1) = arg_tys[1].simd_size_and_type(bx.tcx());
let (_, element_ty2) = arg_tys[2].simd_size_and_type(bx.tcx());
- let (pointer_count, underlying_ty) = match element_ty1.kind() {
- ty::RawPtr(p) if p.ty == in_elem && p.mutbl.is_mut() => {
- (ptr_count(element_ty1), non_ptr(element_ty1))
- }
- _ => {
- require!(
- false,
- InvalidMonomorphization::ExpectedElementType {
- span,
- name,
- expected_element: element_ty1,
- second_arg: arg_tys[1],
- in_elem,
- in_ty,
- mutability: ExpectedPointerMutability::Mut,
- }
- );
- unreachable!();
+
+ require!(
+ matches!(
+ element_ty1.kind(),
+ ty::RawPtr(p)
+ if p.ty == in_elem && p.mutbl.is_mut() && p.ty.kind() == element_ty0.kind()
+ ),
+ InvalidMonomorphization::ExpectedElementType {
+ span,
+ name,
+ expected_element: element_ty1,
+ second_arg: arg_tys[1],
+ in_elem,
+ in_ty,
+ mutability: ExpectedPointerMutability::Mut,
}
- };
- assert!(pointer_count > 0);
- assert_eq!(pointer_count - 1, ptr_count(element_ty0));
- assert_eq!(underlying_ty, non_ptr(element_ty0));
+ );
// The element type of the third argument must be a signed integer type of any width:
match element_ty2.kind() {
@@ -1634,15 +1563,15 @@ fn generic_simd_intrinsic<'ll, 'tcx>(
let ret_t = bx.type_void();
// Type of the vector of pointers:
- let llvm_pointer_vec_ty = llvm_vector_ty(bx, underlying_ty, in_len, pointer_count);
- let llvm_pointer_vec_str = llvm_vector_str(underlying_ty, in_len, pointer_count, bx);
+ let llvm_pointer_vec_ty = llvm_vector_ty(bx, element_ty1, in_len);
+ let llvm_pointer_vec_str = llvm_vector_str(bx, element_ty1, in_len);
// Type of the vector of elements:
- let llvm_elem_vec_ty = llvm_vector_ty(bx, underlying_ty, in_len, pointer_count - 1);
- let llvm_elem_vec_str = llvm_vector_str(underlying_ty, in_len, pointer_count - 1, bx);
+ let llvm_elem_vec_ty = llvm_vector_ty(bx, element_ty0, in_len);
+ let llvm_elem_vec_str = llvm_vector_str(bx, element_ty0, in_len);
let llvm_intrinsic =
- format!("llvm.masked.scatter.{}.{}", llvm_elem_vec_str, llvm_pointer_vec_str);
+ format!("llvm.masked.scatter.{llvm_elem_vec_str}.{llvm_pointer_vec_str}");
let fn_ty =
bx.type_func(&[llvm_elem_vec_ty, llvm_pointer_vec_ty, alignment_ty, mask_ty], ret_t);
let f = bx.declare_cfn(&llvm_intrinsic, llvm::UnnamedAddr::No, fn_ty);
@@ -1857,11 +1786,7 @@ fn generic_simd_intrinsic<'ll, 'tcx>(
}
}
- if in_elem == out_elem {
- return Ok(args[0].immediate());
- } else {
- return Ok(bx.pointercast(args[0].immediate(), llret_ty));
- }
+ return Ok(args[0].immediate());
}
if name == sym::simd_expose_addr {
@@ -2074,6 +1999,52 @@ fn generic_simd_intrinsic<'ll, 'tcx>(
simd_neg: Int => neg, Float => fneg;
}
+ // Unary integer intrinsics
+ if matches!(name, sym::simd_bswap | sym::simd_bitreverse | sym::simd_ctlz | sym::simd_cttz) {
+ let vec_ty = bx.cx.type_vector(
+ match *in_elem.kind() {
+ ty::Int(i) => bx.cx.type_int_from_ty(i),
+ ty::Uint(i) => bx.cx.type_uint_from_ty(i),
+ _ => return_error!(InvalidMonomorphization::UnsupportedOperation {
+ span,
+ name,
+ in_ty,
+ in_elem
+ }),
+ },
+ in_len as u64,
+ );
+ let intrinsic_name = match name {
+ sym::simd_bswap => "bswap",
+ sym::simd_bitreverse => "bitreverse",
+ sym::simd_ctlz => "ctlz",
+ sym::simd_cttz => "cttz",
+ _ => unreachable!(),
+ };
+ let int_size = in_elem.int_size_and_signed(bx.tcx()).0.bits();
+ let llvm_intrinsic = &format!("llvm.{}.v{}i{}", intrinsic_name, in_len, int_size,);
+
+ return if name == sym::simd_bswap && int_size == 8 {
+ // byte swap is no-op for i8/u8
+ Ok(args[0].immediate())
+ } else if matches!(name, sym::simd_ctlz | sym::simd_cttz) {
+ let fn_ty = bx.type_func(&[vec_ty, bx.type_i1()], vec_ty);
+ let f = bx.declare_cfn(llvm_intrinsic, llvm::UnnamedAddr::No, fn_ty);
+ Ok(bx.call(
+ fn_ty,
+ None,
+ None,
+ f,
+ &[args[0].immediate(), bx.const_int(bx.type_i1(), 0)],
+ None,
+ ))
+ } else {
+ let fn_ty = bx.type_func(&[vec_ty], vec_ty);
+ let f = bx.declare_cfn(llvm_intrinsic, llvm::UnnamedAddr::No, fn_ty);
+ Ok(bx.call(fn_ty, None, None, f, &[args[0].immediate()], None))
+ };
+ }
+
if name == sym::simd_arith_offset {
// This also checks that the first operand is a ptr type.
let pointee = in_elem.builtin_deref(true).unwrap_or_else(|| {
diff --git a/compiler/rustc_codegen_llvm/src/lib.rs b/compiler/rustc_codegen_llvm/src/lib.rs
index 24ba28bbc..d283299ac 100644
--- a/compiler/rustc_codegen_llvm/src/lib.rs
+++ b/compiler/rustc_codegen_llvm/src/lib.rs
@@ -28,7 +28,7 @@ pub use llvm_util::target_features;
use rustc_ast::expand::allocator::AllocatorKind;
use rustc_codegen_ssa::back::lto::{LtoModuleCodegen, SerializedModule, ThinModule};
use rustc_codegen_ssa::back::write::{
- CodegenContext, FatLTOInput, ModuleConfig, TargetMachineFactoryConfig, TargetMachineFactoryFn,
+ CodegenContext, FatLtoInput, ModuleConfig, TargetMachineFactoryConfig, TargetMachineFactoryFn,
};
use rustc_codegen_ssa::traits::*;
use rustc_codegen_ssa::ModuleCodegen;
@@ -40,12 +40,13 @@ use rustc_metadata::EncodedMetadata;
use rustc_middle::dep_graph::{WorkProduct, WorkProductId};
use rustc_middle::query::Providers;
use rustc_middle::ty::TyCtxt;
-use rustc_session::config::{OptLevel, OutputFilenames, PrintRequest};
+use rustc_session::config::{OptLevel, OutputFilenames, PrintKind, PrintRequest};
use rustc_session::Session;
use rustc_span::symbol::Symbol;
use std::any::Any;
use std::ffi::CStr;
+use std::io::Write;
mod back {
pub mod archive;
@@ -140,18 +141,6 @@ impl ExtraBackendMethods for LlvmCodegenBackend {
back::write::target_machine_factory(sess, optlvl, target_features)
}
- fn spawn_thread<F, T>(time_trace: bool, f: F) -> std::thread::JoinHandle<T>
- where
- F: FnOnce() -> T,
- F: Send + 'static,
- T: Send + 'static,
- {
- std::thread::spawn(move || {
- let _profiler = TimeTraceProfiler::new(time_trace);
- f()
- })
- }
-
fn spawn_named_thread<F, T>(
time_trace: bool,
name: String,
@@ -178,7 +167,28 @@ impl WriteBackendMethods for LlvmCodegenBackend {
type ThinBuffer = back::lto::ThinBuffer;
fn print_pass_timings(&self) {
unsafe {
- llvm::LLVMRustPrintPassTimings();
+ let mut size = 0;
+ let cstr = llvm::LLVMRustPrintPassTimings(&mut size as *mut usize);
+ if cstr.is_null() {
+ println!("failed to get pass timings");
+ } else {
+ let timings = std::slice::from_raw_parts(cstr as *const u8, size);
+ std::io::stdout().write_all(timings).unwrap();
+ libc::free(cstr as *mut _);
+ }
+ }
+ }
+ fn print_statistics(&self) {
+ unsafe {
+ let mut size = 0;
+ let cstr = llvm::LLVMRustPrintStatistics(&mut size as *mut usize);
+ if cstr.is_null() {
+ println!("failed to get pass stats");
+ } else {
+ let stats = std::slice::from_raw_parts(cstr as *const u8, size);
+ std::io::stdout().write_all(stats).unwrap();
+ libc::free(cstr as *mut _);
+ }
}
}
fn run_link(
@@ -190,7 +200,7 @@ impl WriteBackendMethods for LlvmCodegenBackend {
}
fn run_fat_lto(
cgcx: &CodegenContext<Self>,
- modules: Vec<FatLTOInput<Self>>,
+ modules: Vec<FatLtoInput<Self>>,
cached_modules: Vec<(SerializedModule<Self::ModuleBuffer>, WorkProduct)>,
) -> Result<LtoModuleCodegen<Self>, FatalError> {
back::lto::run_fat(cgcx, modules, cached_modules)
@@ -262,10 +272,10 @@ impl CodegenBackend for LlvmCodegenBackend {
|tcx, ()| llvm_util::global_llvm_features(tcx.sess, true)
}
- fn print(&self, req: PrintRequest, sess: &Session) {
- match req {
- PrintRequest::RelocationModels => {
- println!("Available relocation models:");
+ fn print(&self, req: &PrintRequest, out: &mut dyn PrintBackendInfo, sess: &Session) {
+ match req.kind {
+ PrintKind::RelocationModels => {
+ writeln!(out, "Available relocation models:");
for name in &[
"static",
"pic",
@@ -276,26 +286,27 @@ impl CodegenBackend for LlvmCodegenBackend {
"ropi-rwpi",
"default",
] {
- println!(" {}", name);
+ writeln!(out, " {name}");
}
- println!();
+ writeln!(out);
}
- PrintRequest::CodeModels => {
- println!("Available code models:");
+ PrintKind::CodeModels => {
+ writeln!(out, "Available code models:");
for name in &["tiny", "small", "kernel", "medium", "large"] {
- println!(" {}", name);
+ writeln!(out, " {name}");
}
- println!();
+ writeln!(out);
}
- PrintRequest::TlsModels => {
- println!("Available TLS models:");
+ PrintKind::TlsModels => {
+ writeln!(out, "Available TLS models:");
for name in &["global-dynamic", "local-dynamic", "initial-exec", "local-exec"] {
- println!(" {}", name);
+ writeln!(out, " {name}");
}
- println!();
+ writeln!(out);
}
- PrintRequest::StackProtectorStrategies => {
- println!(
+ PrintKind::StackProtectorStrategies => {
+ writeln!(
+ out,
r#"Available stack protector strategies:
all
Generate stack canaries in all functions.
@@ -319,7 +330,7 @@ impl CodegenBackend for LlvmCodegenBackend {
"#
);
}
- req => llvm_util::print(req, sess),
+ _other => llvm_util::print(req, out, sess),
}
}
diff --git a/compiler/rustc_codegen_llvm/src/llvm/diagnostic.rs b/compiler/rustc_codegen_llvm/src/llvm/diagnostic.rs
index 45de284d2..06e846a2b 100644
--- a/compiler/rustc_codegen_llvm/src/llvm/diagnostic.rs
+++ b/compiler/rustc_codegen_llvm/src/llvm/diagnostic.rs
@@ -9,7 +9,7 @@ use libc::c_uint;
use super::{DiagnosticInfo, SMDiagnostic};
use rustc_span::InnerSpan;
-#[derive(Copy, Clone)]
+#[derive(Copy, Clone, Debug)]
pub enum OptimizationDiagnosticKind {
OptimizationRemark,
OptimizationMissed,
diff --git a/compiler/rustc_codegen_llvm/src/llvm/ffi.rs b/compiler/rustc_codegen_llvm/src/llvm/ffi.rs
index 3ad546b61..84157d1e2 100644
--- a/compiler/rustc_codegen_llvm/src/llvm/ffi.rs
+++ b/compiler/rustc_codegen_llvm/src/llvm/ffi.rs
@@ -1,8 +1,6 @@
#![allow(non_camel_case_types)]
#![allow(non_upper_case_globals)]
-use crate::coverageinfo::map_data as coverage_map;
-
use super::debuginfo::{
DIArray, DIBasicType, DIBuilder, DICompositeType, DIDerivedType, DIDescriptor, DIEnumerator,
DIFile, DIFlags, DIGlobalVariableExpression, DILexicalBlock, DILocation, DINameSpace,
@@ -477,6 +475,8 @@ pub enum OptStage {
pub struct SanitizerOptions {
pub sanitize_address: bool,
pub sanitize_address_recover: bool,
+ pub sanitize_cfi: bool,
+ pub sanitize_kcfi: bool,
pub sanitize_memory: bool,
pub sanitize_memory_recover: bool,
pub sanitize_memory_track_origins: c_int,
@@ -688,204 +688,6 @@ extern "C" {
pub type DiagnosticHandlerTy = unsafe extern "C" fn(&DiagnosticInfo, *mut c_void);
pub type InlineAsmDiagHandlerTy = unsafe extern "C" fn(&SMDiagnostic, *const c_void, c_uint);
-pub mod coverageinfo {
- use super::coverage_map;
-
- /// Corresponds to enum `llvm::coverage::CounterMappingRegion::RegionKind`.
- ///
- /// Must match the layout of `LLVMRustCounterMappingRegionKind`.
- #[derive(Copy, Clone, Debug)]
- #[repr(C)]
- pub enum RegionKind {
- /// A CodeRegion associates some code with a counter
- CodeRegion = 0,
-
- /// An ExpansionRegion represents a file expansion region that associates
- /// a source range with the expansion of a virtual source file, such as
- /// for a macro instantiation or #include file.
- ExpansionRegion = 1,
-
- /// A SkippedRegion represents a source range with code that was skipped
- /// by a preprocessor or similar means.
- SkippedRegion = 2,
-
- /// A GapRegion is like a CodeRegion, but its count is only set as the
- /// line execution count when its the only region in the line.
- GapRegion = 3,
-
- /// A BranchRegion represents leaf-level boolean expressions and is
- /// associated with two counters, each representing the number of times the
- /// expression evaluates to true or false.
- BranchRegion = 4,
- }
-
- /// This struct provides LLVM's representation of a "CoverageMappingRegion", encoded into the
- /// coverage map, in accordance with the
- /// [LLVM Code Coverage Mapping Format](https://github.com/rust-lang/llvm-project/blob/rustc/13.0-2021-09-30/llvm/docs/CoverageMappingFormat.rst#llvm-code-coverage-mapping-format).
- /// The struct composes fields representing the `Counter` type and value(s) (injected counter
- /// ID, or expression type and operands), the source file (an indirect index into a "filenames
- /// array", encoded separately), and source location (start and end positions of the represented
- /// code region).
- ///
- /// Corresponds to struct `llvm::coverage::CounterMappingRegion`.
- ///
- /// Must match the layout of `LLVMRustCounterMappingRegion`.
- #[derive(Copy, Clone, Debug)]
- #[repr(C)]
- pub struct CounterMappingRegion {
- /// The counter type and type-dependent counter data, if any.
- counter: coverage_map::Counter,
-
- /// If the `RegionKind` is a `BranchRegion`, this represents the counter
- /// for the false branch of the region.
- false_counter: coverage_map::Counter,
-
- /// An indirect reference to the source filename. In the LLVM Coverage Mapping Format, the
- /// file_id is an index into a function-specific `virtual_file_mapping` array of indexes
- /// that, in turn, are used to look up the filename for this region.
- file_id: u32,
-
- /// If the `RegionKind` is an `ExpansionRegion`, the `expanded_file_id` can be used to find
- /// the mapping regions created as a result of macro expansion, by checking if their file id
- /// matches the expanded file id.
- expanded_file_id: u32,
-
- /// 1-based starting line of the mapping region.
- start_line: u32,
-
- /// 1-based starting column of the mapping region.
- start_col: u32,
-
- /// 1-based ending line of the mapping region.
- end_line: u32,
-
- /// 1-based ending column of the mapping region. If the high bit is set, the current
- /// mapping region is a gap area.
- end_col: u32,
-
- kind: RegionKind,
- }
-
- impl CounterMappingRegion {
- pub(crate) fn code_region(
- counter: coverage_map::Counter,
- file_id: u32,
- start_line: u32,
- start_col: u32,
- end_line: u32,
- end_col: u32,
- ) -> Self {
- Self {
- counter,
- false_counter: coverage_map::Counter::zero(),
- file_id,
- expanded_file_id: 0,
- start_line,
- start_col,
- end_line,
- end_col,
- kind: RegionKind::CodeRegion,
- }
- }
-
- // This function might be used in the future; the LLVM API is still evolving, as is coverage
- // support.
- #[allow(dead_code)]
- pub(crate) fn branch_region(
- counter: coverage_map::Counter,
- false_counter: coverage_map::Counter,
- file_id: u32,
- start_line: u32,
- start_col: u32,
- end_line: u32,
- end_col: u32,
- ) -> Self {
- Self {
- counter,
- false_counter,
- file_id,
- expanded_file_id: 0,
- start_line,
- start_col,
- end_line,
- end_col,
- kind: RegionKind::BranchRegion,
- }
- }
-
- // This function might be used in the future; the LLVM API is still evolving, as is coverage
- // support.
- #[allow(dead_code)]
- pub(crate) fn expansion_region(
- file_id: u32,
- expanded_file_id: u32,
- start_line: u32,
- start_col: u32,
- end_line: u32,
- end_col: u32,
- ) -> Self {
- Self {
- counter: coverage_map::Counter::zero(),
- false_counter: coverage_map::Counter::zero(),
- file_id,
- expanded_file_id,
- start_line,
- start_col,
- end_line,
- end_col,
- kind: RegionKind::ExpansionRegion,
- }
- }
-
- // This function might be used in the future; the LLVM API is still evolving, as is coverage
- // support.
- #[allow(dead_code)]
- pub(crate) fn skipped_region(
- file_id: u32,
- start_line: u32,
- start_col: u32,
- end_line: u32,
- end_col: u32,
- ) -> Self {
- Self {
- counter: coverage_map::Counter::zero(),
- false_counter: coverage_map::Counter::zero(),
- file_id,
- expanded_file_id: 0,
- start_line,
- start_col,
- end_line,
- end_col,
- kind: RegionKind::SkippedRegion,
- }
- }
-
- // This function might be used in the future; the LLVM API is still evolving, as is coverage
- // support.
- #[allow(dead_code)]
- pub(crate) fn gap_region(
- counter: coverage_map::Counter,
- file_id: u32,
- start_line: u32,
- start_col: u32,
- end_line: u32,
- end_col: u32,
- ) -> Self {
- Self {
- counter,
- false_counter: coverage_map::Counter::zero(),
- file_id,
- expanded_file_id: 0,
- start_line,
- start_col,
- end_line,
- end_col: (1_u32 << 31) | end_col,
- kind: RegionKind::GapRegion,
- }
- }
- }
-}
-
pub mod debuginfo {
use super::{InvariantOpaque, Metadata};
use bitflags::bitflags;
@@ -1073,7 +875,7 @@ extern "C" {
// Operations on array, pointer, and vector types (sequence types)
pub fn LLVMRustArrayType(ElementType: &Type, ElementCount: u64) -> &Type;
- pub fn LLVMPointerType(ElementType: &Type, AddressSpace: c_uint) -> &Type;
+ pub fn LLVMPointerTypeInContext(C: &Context, AddressSpace: c_uint) -> &Type;
pub fn LLVMVectorType(ElementType: &Type, ElementCount: c_uint) -> &Type;
pub fn LLVMGetElementType(Ty: &Type) -> &Type;
@@ -1094,6 +896,7 @@ extern "C" {
pub fn LLVMRustGlobalAddMetadata<'a>(Val: &'a Value, KindID: c_uint, Metadata: &'a Metadata);
pub fn LLVMValueAsMetadata(Node: &Value) -> &Metadata;
pub fn LLVMIsAFunction(Val: &Value) -> Option<&Value>;
+ pub fn LLVMRustIsNonGVFunctionPointerTy(Val: &Value) -> bool;
// Operations on constants of any type
pub fn LLVMConstNull(Ty: &Type) -> &Value;
@@ -1155,7 +958,7 @@ extern "C" {
pub fn LLVMConstVector(ScalarConstantVals: *const &Value, Size: c_uint) -> &Value;
// Constant expressions
- pub fn LLVMRustConstInBoundsGEP2<'a>(
+ pub fn LLVMConstInBoundsGEP2<'a>(
ty: &'a Type,
ConstantVal: &'a Value,
ConstantIndices: *const &'a Value,
@@ -1868,7 +1671,10 @@ extern "C" {
pub fn LLVMRustGetLastError() -> *const c_char;
/// Print the pass timings since static dtors aren't picking them up.
- pub fn LLVMRustPrintPassTimings();
+ pub fn LLVMRustPrintPassTimings(size: *const size_t) -> *const c_char;
+
+ /// Print the statistics since static dtors aren't picking them up.
+ pub fn LLVMRustPrintStatistics(size: *const size_t) -> *const c_char;
pub fn LLVMStructCreateNamed(C: &Context, Name: *const c_char) -> &Type;
@@ -1901,6 +1707,8 @@ extern "C" {
pub fn LLVMRustCoverageWriteFilenamesSectionToBuffer(
Filenames: *const *const c_char,
FilenamesLen: size_t,
+ Lengths: *const size_t,
+ LengthsLen: size_t,
BufferOut: &RustString,
);
@@ -1908,15 +1716,18 @@ extern "C" {
pub fn LLVMRustCoverageWriteMappingToBuffer(
VirtualFileMappingIDs: *const c_uint,
NumVirtualFileMappingIDs: c_uint,
- Expressions: *const coverage_map::CounterExpression,
+ Expressions: *const crate::coverageinfo::ffi::CounterExpression,
NumExpressions: c_uint,
- MappingRegions: *const coverageinfo::CounterMappingRegion,
+ MappingRegions: *const crate::coverageinfo::ffi::CounterMappingRegion,
NumMappingRegions: c_uint,
BufferOut: &RustString,
);
- pub fn LLVMRustCoverageCreatePGOFuncNameVar(F: &Value, FuncName: *const c_char) -> &Value;
- pub fn LLVMRustCoverageHashCString(StrVal: *const c_char) -> u64;
+ pub fn LLVMRustCoverageCreatePGOFuncNameVar(
+ F: &Value,
+ FuncName: *const c_char,
+ FuncNameLen: size_t,
+ ) -> &Value;
pub fn LLVMRustCoverageHashByteArray(Bytes: *const c_char, NumBytes: size_t) -> u64;
#[allow(improper_ctypes)]
@@ -2281,7 +2092,12 @@ extern "C" {
pub fn LLVMRustHasFeature(T: &TargetMachine, s: *const c_char) -> bool;
- pub fn LLVMRustPrintTargetCPUs(T: &TargetMachine, cpu: *const c_char);
+ pub fn LLVMRustPrintTargetCPUs(
+ T: &TargetMachine,
+ cpu: *const c_char,
+ print: unsafe extern "C" fn(out: *mut c_void, string: *const c_char, len: usize),
+ out: *mut c_void,
+ );
pub fn LLVMRustGetTargetFeaturesCount(T: &TargetMachine) -> size_t;
pub fn LLVMRustGetTargetFeature(
T: &TargetMachine,
@@ -2331,6 +2147,7 @@ extern "C" {
TM: &'a TargetMachine,
OptLevel: PassBuilderOptLevel,
OptStage: OptStage,
+ IsLinkerPluginLTO: bool,
NoPrepopulatePasses: bool,
VerifyIR: bool,
UseThinLTOBuffers: bool,
@@ -2525,6 +2342,7 @@ extern "C" {
remark_passes: *const *const c_char,
remark_passes_len: usize,
remark_file: *const c_char,
+ pgo_available: bool,
);
#[allow(improper_ctypes)]
diff --git a/compiler/rustc_codegen_llvm/src/llvm_util.rs b/compiler/rustc_codegen_llvm/src/llvm_util.rs
index 03be0654b..a76c9c9b7 100644
--- a/compiler/rustc_codegen_llvm/src/llvm_util.rs
+++ b/compiler/rustc_codegen_llvm/src/llvm_util.rs
@@ -8,16 +8,17 @@ use libc::c_int;
use rustc_codegen_ssa::target_features::{
supported_target_features, tied_target_features, RUSTC_SPECIFIC_FEATURES,
};
+use rustc_codegen_ssa::traits::PrintBackendInfo;
use rustc_data_structures::fx::{FxHashMap, FxHashSet};
use rustc_data_structures::small_c_str::SmallCStr;
use rustc_fs_util::path_to_c_string;
use rustc_middle::bug;
-use rustc_session::config::PrintRequest;
+use rustc_session::config::{PrintKind, PrintRequest};
use rustc_session::Session;
use rustc_span::symbol::Symbol;
use rustc_target::spec::{MergeFunctions, PanicStrategy};
-use std::ffi::{CStr, CString};
+use std::ffi::{c_char, c_void, CStr, CString};
use std::path::Path;
use std::ptr;
use std::slice;
@@ -110,6 +111,10 @@ unsafe fn configure_llvm(sess: &Session) {
// Use non-zero `import-instr-limit` multiplier for cold callsites.
add("-import-cold-multiplier=0.1", false);
+ if sess.print_llvm_stats() {
+ add("-stats", false);
+ }
+
for arg in sess_args {
add(&(*arg), true);
}
@@ -310,7 +315,7 @@ pub fn target_features(sess: &Session, allow_unstable: bool) -> Vec<Symbol> {
pub fn print_version() {
let (major, minor, patch) = get_version();
- println!("LLVM version: {}.{}.{}", major, minor, patch);
+ println!("LLVM version: {major}.{minor}.{patch}");
}
pub fn get_version() -> (u32, u32, u32) {
@@ -350,7 +355,7 @@ fn llvm_target_features(tm: &llvm::TargetMachine) -> Vec<(&str, &str)> {
ret
}
-fn print_target_features(sess: &Session, tm: &llvm::TargetMachine) {
+fn print_target_features(out: &mut dyn PrintBackendInfo, sess: &Session, tm: &llvm::TargetMachine) {
let mut llvm_target_features = llvm_target_features(tm);
let mut known_llvm_target_features = FxHashSet::<&'static str>::default();
let mut rustc_target_features = supported_target_features(sess)
@@ -383,36 +388,48 @@ fn print_target_features(sess: &Session, tm: &llvm::TargetMachine) {
.max()
.unwrap_or(0);
- println!("Features supported by rustc for this target:");
+ writeln!(out, "Features supported by rustc for this target:");
for (feature, desc) in &rustc_target_features {
- println!(" {1:0$} - {2}.", max_feature_len, feature, desc);
+ writeln!(out, " {feature:max_feature_len$} - {desc}.");
}
- println!("\nCode-generation features supported by LLVM for this target:");
+ writeln!(out, "\nCode-generation features supported by LLVM for this target:");
for (feature, desc) in &llvm_target_features {
- println!(" {1:0$} - {2}.", max_feature_len, feature, desc);
+ writeln!(out, " {feature:max_feature_len$} - {desc}.");
}
if llvm_target_features.is_empty() {
- println!(" Target features listing is not supported by this LLVM version.");
+ writeln!(out, " Target features listing is not supported by this LLVM version.");
}
- println!("\nUse +feature to enable a feature, or -feature to disable it.");
- println!("For example, rustc -C target-cpu=mycpu -C target-feature=+feature1,-feature2\n");
- println!("Code-generation features cannot be used in cfg or #[target_feature],");
- println!("and may be renamed or removed in a future version of LLVM or rustc.\n");
+ writeln!(out, "\nUse +feature to enable a feature, or -feature to disable it.");
+ writeln!(out, "For example, rustc -C target-cpu=mycpu -C target-feature=+feature1,-feature2\n");
+ writeln!(out, "Code-generation features cannot be used in cfg or #[target_feature],");
+ writeln!(out, "and may be renamed or removed in a future version of LLVM or rustc.\n");
}
-pub(crate) fn print(req: PrintRequest, sess: &Session) {
+pub(crate) fn print(req: &PrintRequest, mut out: &mut dyn PrintBackendInfo, sess: &Session) {
require_inited();
let tm = create_informational_target_machine(sess);
- match req {
- PrintRequest::TargetCPUs => {
+ match req.kind {
+ PrintKind::TargetCPUs => {
// SAFETY generate a C compatible string from a byte slice to pass
// the target CPU name into LLVM, the lifetime of the reference is
// at least as long as the C function
let cpu_cstring = CString::new(handle_native(sess.target.cpu.as_ref()))
.unwrap_or_else(|e| bug!("failed to convert to cstring: {}", e));
- unsafe { llvm::LLVMRustPrintTargetCPUs(tm, cpu_cstring.as_ptr()) };
+ unsafe extern "C" fn callback(out: *mut c_void, string: *const c_char, len: usize) {
+ let out = &mut *(out as *mut &mut dyn PrintBackendInfo);
+ let bytes = slice::from_raw_parts(string as *const u8, len);
+ write!(out, "{}", String::from_utf8_lossy(bytes));
+ }
+ unsafe {
+ llvm::LLVMRustPrintTargetCPUs(
+ tm,
+ cpu_cstring.as_ptr(),
+ callback,
+ &mut out as *mut &mut dyn PrintBackendInfo as *mut c_void,
+ );
+ }
}
- PrintRequest::TargetFeatures => print_target_features(sess, tm),
+ PrintKind::TargetFeatures => print_target_features(out, sess, tm),
_ => bug!("rustc_codegen_llvm can't handle print request: {:?}", req),
}
}
@@ -490,8 +507,6 @@ pub(crate) fn global_llvm_features(sess: &Session, diagnostics: bool) -> Vec<Str
.features
.split(',')
.filter(|v| !v.is_empty() && backend_feature_name(v).is_some())
- // Drop +atomics-32 feature introduced in LLVM 15.
- .filter(|v| *v != "+atomics-32" || get_version() >= (15, 0, 0))
.map(String::from),
);
@@ -558,7 +573,7 @@ pub(crate) fn global_llvm_features(sess: &Session, diagnostics: bool) -> Vec<Str
match (enable_disable, feat) {
('-' | '+', TargetFeatureFoldStrength::Both(f))
| ('+', TargetFeatureFoldStrength::EnableOnly(f)) => {
- Some(format!("{}{}", enable_disable, f))
+ Some(format!("{enable_disable}{f}"))
}
_ => None,
}
diff --git a/compiler/rustc_codegen_llvm/src/mono_item.rs b/compiler/rustc_codegen_llvm/src/mono_item.rs
index c24854b27..38e822056 100644
--- a/compiler/rustc_codegen_llvm/src/mono_item.rs
+++ b/compiler/rustc_codegen_llvm/src/mono_item.rs
@@ -48,7 +48,7 @@ impl<'tcx> PreDefineMethods<'tcx> for CodegenCx<'_, 'tcx> {
visibility: Visibility,
symbol_name: &str,
) {
- assert!(!instance.substs.has_infer());
+ assert!(!instance.args.has_infer());
let fn_abi = self.fn_abi_of_instance(instance, ty::List::empty());
let lldecl = self.declare_fn(symbol_name, fn_abi, Some(instance));
@@ -111,7 +111,7 @@ impl CodegenCx<'_, '_> {
}
// Symbols from executables can't really be imported any further.
- let all_exe = self.tcx.sess.crate_types().iter().all(|ty| *ty == CrateType::Executable);
+ let all_exe = self.tcx.crate_types().iter().all(|ty| *ty == CrateType::Executable);
let is_declaration_for_linker =
is_declaration || linkage == llvm::Linkage::AvailableExternallyLinkage;
if all_exe && !is_declaration_for_linker {
diff --git a/compiler/rustc_codegen_llvm/src/type_.rs b/compiler/rustc_codegen_llvm/src/type_.rs
index 7e672a8dc..8db6195d9 100644
--- a/compiler/rustc_codegen_llvm/src/type_.rs
+++ b/compiler/rustc_codegen_llvm/src/type_.rs
@@ -112,12 +112,6 @@ impl<'ll> CodegenCx<'ll, '_> {
}
}
- pub(crate) fn type_pointee_for_align(&self, align: Align) -> &'ll Type {
- // FIXME(eddyb) We could find a better approximation if ity.align < align.
- let ity = Integer::approximate_align(self, align);
- self.type_from_integer(ity)
- }
-
/// Return a LLVM type that has at most the required alignment,
/// and exactly the required size, as a best-effort padding array.
pub(crate) fn type_padding_filler(&self, size: Size, align: Align) -> &'ll Type {
@@ -189,17 +183,12 @@ impl<'ll, 'tcx> BaseTypeMethods<'tcx> for CodegenCx<'ll, 'tcx> {
unsafe { llvm::LLVMRustGetTypeKind(ty).to_generic() }
}
- fn type_ptr_to(&self, ty: &'ll Type) -> &'ll Type {
- assert_ne!(
- self.type_kind(ty),
- TypeKind::Function,
- "don't call ptr_to on function types, use ptr_to_llvm_type on FnAbi instead or explicitly specify an address space if it makes sense"
- );
- ty.ptr_to(AddressSpace::DATA)
+ fn type_ptr(&self) -> &'ll Type {
+ self.type_ptr_ext(AddressSpace::DATA)
}
- fn type_ptr_to_ext(&self, ty: &'ll Type, address_space: AddressSpace) -> &'ll Type {
- ty.ptr_to(address_space)
+ fn type_ptr_ext(&self, address_space: AddressSpace) -> &'ll Type {
+ unsafe { llvm::LLVMPointerTypeInContext(self.llcx, address_space.0) }
}
fn element_type(&self, ty: &'ll Type) -> &'ll Type {
@@ -247,12 +236,8 @@ impl Type {
unsafe { llvm::LLVMIntTypeInContext(llcx, num_bits as c_uint) }
}
- pub fn i8p_llcx(llcx: &llvm::Context) -> &Type {
- Type::i8_llcx(llcx).ptr_to(AddressSpace::DATA)
- }
-
- fn ptr_to(&self, address_space: AddressSpace) -> &Type {
- unsafe { llvm::LLVMPointerType(self, address_space.0) }
+ pub fn ptr_llcx(llcx: &llvm::Context) -> &Type {
+ unsafe { llvm::LLVMPointerTypeInContext(llcx, AddressSpace::DATA.0) }
}
}
diff --git a/compiler/rustc_codegen_llvm/src/type_of.rs b/compiler/rustc_codegen_llvm/src/type_of.rs
index 7f7da8483..831645579 100644
--- a/compiler/rustc_codegen_llvm/src/type_of.rs
+++ b/compiler/rustc_codegen_llvm/src/type_of.rs
@@ -23,7 +23,7 @@ fn uncached_llvm_type<'a, 'tcx>(
match layout.abi {
Abi::Scalar(_) => bug!("handled elsewhere"),
Abi::Vector { element, count } => {
- let element = layout.scalar_llvm_type_at(cx, element, Size::ZERO);
+ let element = layout.scalar_llvm_type_at(cx, element);
return cx.type_vector(element, count);
}
Abi::ScalarPair(..) => {
@@ -57,13 +57,10 @@ fn uncached_llvm_type<'a, 'tcx>(
if let (&ty::Generator(_, _, _), &Variants::Single { index }) =
(layout.ty.kind(), &layout.variants)
{
- write!(&mut name, "::{}", ty::GeneratorSubsts::variant_name(index)).unwrap();
+ write!(&mut name, "::{}", ty::GeneratorArgs::variant_name(index)).unwrap();
}
Some(name)
}
- // Use identified structure types for ADT. Due to pointee types in LLVM IR their definition
- // might be recursive. Other cases are non-recursive and we can use literal structure types.
- ty::Adt(..) => Some(String::new()),
_ => None,
};
@@ -179,12 +176,7 @@ pub trait LayoutLlvmExt<'tcx> {
fn is_llvm_scalar_pair(&self) -> bool;
fn llvm_type<'a>(&self, cx: &CodegenCx<'a, 'tcx>) -> &'a Type;
fn immediate_llvm_type<'a>(&self, cx: &CodegenCx<'a, 'tcx>) -> &'a Type;
- fn scalar_llvm_type_at<'a>(
- &self,
- cx: &CodegenCx<'a, 'tcx>,
- scalar: Scalar,
- offset: Size,
- ) -> &'a Type;
+ fn scalar_llvm_type_at<'a>(&self, cx: &CodegenCx<'a, 'tcx>, scalar: Scalar) -> &'a Type;
fn scalar_pair_element_llvm_type<'a>(
&self,
cx: &CodegenCx<'a, 'tcx>,
@@ -230,16 +222,12 @@ impl<'tcx> LayoutLlvmExt<'tcx> for TyAndLayout<'tcx> {
return llty;
}
let llty = match *self.ty.kind() {
- ty::Ref(_, ty, _) | ty::RawPtr(ty::TypeAndMut { ty, .. }) => {
- cx.type_ptr_to(cx.layout_of(ty).llvm_type(cx))
- }
- ty::Adt(def, _) if def.is_box() => {
- cx.type_ptr_to(cx.layout_of(self.ty.boxed_ty()).llvm_type(cx))
- }
+ ty::Ref(..) | ty::RawPtr(_) => cx.type_ptr(),
+ ty::Adt(def, _) if def.is_box() => cx.type_ptr(),
ty::FnPtr(sig) => {
cx.fn_ptr_backend_type(cx.fn_abi_of_fn_ptr(sig, ty::List::empty()))
}
- _ => self.scalar_llvm_type_at(cx, scalar, Size::ZERO),
+ _ => self.scalar_llvm_type_at(cx, scalar),
};
cx.scalar_lltypes.borrow_mut().insert(self.ty, llty);
return llty;
@@ -300,25 +288,12 @@ impl<'tcx> LayoutLlvmExt<'tcx> for TyAndLayout<'tcx> {
self.llvm_type(cx)
}
- fn scalar_llvm_type_at<'a>(
- &self,
- cx: &CodegenCx<'a, 'tcx>,
- scalar: Scalar,
- offset: Size,
- ) -> &'a Type {
+ fn scalar_llvm_type_at<'a>(&self, cx: &CodegenCx<'a, 'tcx>, scalar: Scalar) -> &'a Type {
match scalar.primitive() {
Int(i, _) => cx.type_from_integer(i),
F32 => cx.type_f32(),
F64 => cx.type_f64(),
- Pointer(address_space) => {
- // If we know the alignment, pick something better than i8.
- let pointee = if let Some(pointee) = self.pointee_info_at(cx, offset) {
- cx.type_pointee_for_align(pointee.align)
- } else {
- cx.type_i8()
- };
- cx.type_ptr_to_ext(pointee, address_space)
- }
+ Pointer(address_space) => cx.type_ptr_ext(address_space),
}
}
@@ -336,7 +311,7 @@ impl<'tcx> LayoutLlvmExt<'tcx> for TyAndLayout<'tcx> {
}
// only wide pointer boxes are handled as pointers
// thin pointer boxes with scalar allocators are handled by the general logic below
- ty::Adt(def, substs) if def.is_box() && cx.layout_of(substs.type_at(1)).is_zst() => {
+ ty::Adt(def, args) if def.is_box() && cx.layout_of(args.type_at(1)).is_zst() => {
let ptr_ty = Ty::new_mut_ptr(cx.tcx, self.ty.boxed_ty());
return cx.layout_of(ptr_ty).scalar_pair_element_llvm_type(cx, index, immediate);
}
@@ -364,8 +339,7 @@ impl<'tcx> LayoutLlvmExt<'tcx> for TyAndLayout<'tcx> {
return cx.type_i1();
}
- let offset = if index == 0 { Size::ZERO } else { a.size(cx).align_to(b.align(cx).abi) };
- self.scalar_llvm_type_at(cx, scalar, offset)
+ self.scalar_llvm_type_at(cx, scalar)
}
fn llvm_field_index<'a>(&self, cx: &CodegenCx<'a, 'tcx>, index: usize) -> u64 {
diff --git a/compiler/rustc_codegen_llvm/src/va_arg.rs b/compiler/rustc_codegen_llvm/src/va_arg.rs
index 8800caa71..172c66a7a 100644
--- a/compiler/rustc_codegen_llvm/src/va_arg.rs
+++ b/compiler/rustc_codegen_llvm/src/va_arg.rs
@@ -5,7 +5,7 @@ use crate::value::Value;
use rustc_codegen_ssa::mir::operand::OperandRef;
use rustc_codegen_ssa::{
common::IntPredicate,
- traits::{BaseTypeMethods, BuilderMethods, ConstMethods, DerivedTypeMethods},
+ traits::{BaseTypeMethods, BuilderMethods, ConstMethods},
};
use rustc_middle::ty::layout::{HasTyCtxt, LayoutOf};
use rustc_middle::ty::Ty;
@@ -26,24 +26,18 @@ fn round_pointer_up_to_alignment<'ll>(
fn emit_direct_ptr_va_arg<'ll, 'tcx>(
bx: &mut Builder<'_, 'll, 'tcx>,
list: OperandRef<'tcx, &'ll Value>,
- llty: &'ll Type,
size: Size,
align: Align,
slot_size: Align,
allow_higher_align: bool,
) -> (&'ll Value, Align) {
- let va_list_ty = bx.type_i8p();
- let va_list_ptr_ty = bx.type_ptr_to(va_list_ty);
- let va_list_addr = if list.layout.llvm_type(bx.cx) != va_list_ptr_ty {
- bx.bitcast(list.immediate(), va_list_ptr_ty)
- } else {
- list.immediate()
- };
+ let va_list_ty = bx.type_ptr();
+ let va_list_addr = list.immediate();
let ptr = bx.load(va_list_ty, va_list_addr, bx.tcx().data_layout.pointer_align.abi);
let (addr, addr_align) = if allow_higher_align && align > slot_size {
- (round_pointer_up_to_alignment(bx, ptr, align, bx.cx().type_i8p()), align)
+ (round_pointer_up_to_alignment(bx, ptr, align, bx.type_ptr()), align)
} else {
(ptr, slot_size)
};
@@ -56,9 +50,9 @@ fn emit_direct_ptr_va_arg<'ll, 'tcx>(
if size.bytes() < slot_size.bytes() && bx.tcx().sess.target.endian == Endian::Big {
let adjusted_size = bx.cx().const_i32((slot_size.bytes() - size.bytes()) as i32);
let adjusted = bx.inbounds_gep(bx.type_i8(), addr, &[adjusted_size]);
- (bx.bitcast(adjusted, bx.cx().type_ptr_to(llty)), addr_align)
+ (adjusted, addr_align)
} else {
- (bx.bitcast(addr, bx.cx().type_ptr_to(llty)), addr_align)
+ (addr, addr_align)
}
}
@@ -81,7 +75,7 @@ fn emit_ptr_va_arg<'ll, 'tcx>(
(layout.llvm_type(bx.cx), layout.size, layout.align)
};
let (addr, addr_align) =
- emit_direct_ptr_va_arg(bx, list, llty, size, align.abi, slot_size, allow_higher_align);
+ emit_direct_ptr_va_arg(bx, list, size, align.abi, slot_size, allow_higher_align);
if indirect {
let tmp_ret = bx.load(llty, addr, addr_align);
bx.load(bx.cx.layout_of(target_ty).llvm_type(bx.cx), tmp_ret, align.abi)
@@ -146,7 +140,7 @@ fn emit_aapcs_va_arg<'ll, 'tcx>(
bx.cond_br(use_stack, on_stack, in_reg);
bx.switch_to_block(in_reg);
- let top_type = bx.type_i8p();
+ let top_type = bx.type_ptr();
let top = bx.struct_gep(va_list_ty, va_list_addr, reg_top_index);
let top = bx.load(top_type, top, bx.tcx().data_layout.pointer_align.abi);
@@ -158,7 +152,6 @@ fn emit_aapcs_va_arg<'ll, 'tcx>(
reg_addr = bx.gep(bx.type_i8(), reg_addr, &[offset]);
}
let reg_type = layout.llvm_type(bx);
- let reg_addr = bx.bitcast(reg_addr, bx.cx.type_ptr_to(reg_type));
let reg_value = bx.load(reg_type, reg_addr, layout.align.abi);
bx.br(end);
@@ -218,7 +211,7 @@ fn emit_s390x_va_arg<'ll, 'tcx>(
// Work out the address of the value in the register save area.
let reg_ptr =
bx.struct_gep(va_list_ty, va_list_addr, va_list_layout.llvm_field_index(bx.cx, 3));
- let reg_ptr_v = bx.load(bx.type_i8p(), reg_ptr, bx.tcx().data_layout.pointer_align.abi);
+ let reg_ptr_v = bx.load(bx.type_ptr(), reg_ptr, bx.tcx().data_layout.pointer_align.abi);
let scaled_reg_count = bx.mul(reg_count_v, bx.const_u64(8));
let reg_off = bx.add(scaled_reg_count, bx.const_u64(reg_save_index * 8 + reg_padding));
let reg_addr = bx.gep(bx.type_i8(), reg_ptr_v, &[reg_off]);
@@ -234,7 +227,7 @@ fn emit_s390x_va_arg<'ll, 'tcx>(
// Work out the address of the value in the argument overflow area.
let arg_ptr =
bx.struct_gep(va_list_ty, va_list_addr, va_list_layout.llvm_field_index(bx.cx, 2));
- let arg_ptr_v = bx.load(bx.type_i8p(), arg_ptr, bx.tcx().data_layout.pointer_align.abi);
+ let arg_ptr_v = bx.load(bx.type_ptr(), arg_ptr, bx.tcx().data_layout.pointer_align.abi);
let arg_off = bx.const_u64(padding);
let mem_addr = bx.gep(bx.type_i8(), arg_ptr_v, &[arg_off]);
@@ -246,14 +239,12 @@ fn emit_s390x_va_arg<'ll, 'tcx>(
// Return the appropriate result.
bx.switch_to_block(end);
- let val_addr = bx.phi(bx.type_i8p(), &[reg_addr, mem_addr], &[in_reg, in_mem]);
+ let val_addr = bx.phi(bx.type_ptr(), &[reg_addr, mem_addr], &[in_reg, in_mem]);
let val_type = layout.llvm_type(bx);
let val_addr = if indirect {
- let ptr_type = bx.cx.type_ptr_to(val_type);
- let ptr_addr = bx.bitcast(val_addr, bx.cx.type_ptr_to(ptr_type));
- bx.load(ptr_type, ptr_addr, bx.tcx().data_layout.pointer_align.abi)
+ bx.load(bx.cx.type_ptr(), val_addr, bx.tcx().data_layout.pointer_align.abi)
} else {
- bx.bitcast(val_addr, bx.cx.type_ptr_to(val_type))
+ val_addr
};
bx.load(val_type, val_addr, layout.align.abi)
}
diff --git a/compiler/rustc_codegen_ssa/Cargo.toml b/compiler/rustc_codegen_ssa/Cargo.toml
index 984efa210..34d0e2d1d 100644
--- a/compiler/rustc_codegen_ssa/Cargo.toml
+++ b/compiler/rustc_codegen_ssa/Cargo.toml
@@ -3,21 +3,17 @@ name = "rustc_codegen_ssa"
version = "0.0.0"
edition = "2021"
-[lib]
-test = false
-
[dependencies]
-ar_archive_writer = "0.1.3"
+ar_archive_writer = "0.1.5"
bitflags = "1.2.1"
cc = "1.0.69"
itertools = "0.10.1"
tracing = "0.1"
jobserver = "0.1.22"
tempfile = "3.2"
-thorin-dwp = "0.6"
+thorin-dwp = "0.7"
pathdiff = "0.2.0"
serde_json = "1.0.59"
-snap = "1"
smallvec = { version = "1.8.1", features = ["union", "may_dangle"] }
regex = "1.4"
@@ -46,7 +42,7 @@ rustc_session = { path = "../rustc_session" }
libc = "0.2.50"
[dependencies.object]
-version = "0.31.1"
+version = "0.32.0"
default-features = false
features = ["read_core", "elf", "macho", "pe", "xcoff", "unaligned", "archive", "write"]
diff --git a/compiler/rustc_codegen_ssa/messages.ftl b/compiler/rustc_codegen_ssa/messages.ftl
index f73080182..b6c70c622 100644
--- a/compiler/rustc_codegen_ssa/messages.ftl
+++ b/compiler/rustc_codegen_ssa/messages.ftl
@@ -197,6 +197,8 @@ codegen_ssa_specify_libraries_to_link = use the `-l` flag to specify native libr
codegen_ssa_static_library_native_artifacts = Link against the following native artifacts when linking against this static library. The order and any duplication can be significant on some platforms.
+codegen_ssa_static_library_native_artifacts_to_file = Native artifacts to link against have been written to {$path}. The order and any duplication can be significant on some platforms.
+
codegen_ssa_stripping_debug_info_failed = stripping debug info with `{$util}` failed: {$status}
.note = {$output}
diff --git a/compiler/rustc_codegen_ssa/src/back/link.rs b/compiler/rustc_codegen_ssa/src/back/link.rs
index b603a8787..a7ac728c5 100644
--- a/compiler/rustc_codegen_ssa/src/back/link.rs
+++ b/compiler/rustc_codegen_ssa/src/back/link.rs
@@ -12,8 +12,8 @@ use rustc_metadata::fs::{copy_to_stdout, emit_wrapper_file, METADATA_FILENAME};
use rustc_middle::middle::debugger_visualizer::DebuggerVisualizerFile;
use rustc_middle::middle::dependency_format::Linkage;
use rustc_middle::middle::exported_symbols::SymbolExportKind;
-use rustc_session::config::{self, CFGuard, CrateType, DebugInfo, Strip};
-use rustc_session::config::{OutputFilenames, OutputType, PrintRequest, SplitDwarfKind};
+use rustc_session::config::{self, CFGuard, CrateType, DebugInfo, OutFileName, Strip};
+use rustc_session::config::{OutputFilenames, OutputType, PrintKind, SplitDwarfKind};
use rustc_session::cstore::DllImport;
use rustc_session::output::{check_file_is_writeable, invalid_output_for_target, out_filename};
use rustc_session::search_paths::PathKind;
@@ -69,7 +69,7 @@ pub fn link_binary<'a>(
let _timer = sess.timer("link_binary");
let output_metadata = sess.opts.output_types.contains_key(&OutputType::Metadata);
let mut tempfiles_for_stdout_output: Vec<PathBuf> = Vec::new();
- for &crate_type in sess.crate_types().iter() {
+ for &crate_type in &codegen_results.crate_info.crate_types {
// Ignore executable crates if we have -Z no-codegen, as they will error.
if (sess.opts.unstable_opts.no_codegen || !sess.opts.output_types.should_codegen())
&& !output_metadata
@@ -596,8 +596,10 @@ fn link_staticlib<'a>(
all_native_libs.extend_from_slice(&codegen_results.crate_info.used_libraries);
- if sess.opts.prints.contains(&PrintRequest::NativeStaticLibs) {
- print_native_static_libs(sess, &all_native_libs, &all_rust_dylibs);
+ for print in &sess.opts.prints {
+ if print.kind == PrintKind::NativeStaticLibs {
+ print_native_static_libs(sess, &print.out, &all_native_libs, &all_rust_dylibs);
+ }
}
Ok(())
@@ -744,8 +746,11 @@ fn link_natively<'a>(
cmd.env_remove(k.as_ref());
}
- if sess.opts.prints.contains(&PrintRequest::LinkArgs) {
- println!("{:?}", &cmd);
+ for print in &sess.opts.prints {
+ if print.kind == PrintKind::LinkArgs {
+ let content = format!("{cmd:?}");
+ print.out.overwrite(&content, sess);
+ }
}
// May have not found libraries in the right formats.
@@ -1231,22 +1236,21 @@ fn link_sanitizer_runtime(sess: &Session, linker: &mut dyn Linker, name: &str) {
}
}
- let channel = option_env!("CFG_RELEASE_CHANNEL")
- .map(|channel| format!("-{}", channel))
- .unwrap_or_default();
+ let channel =
+ option_env!("CFG_RELEASE_CHANNEL").map(|channel| format!("-{channel}")).unwrap_or_default();
if sess.target.is_like_osx {
// On Apple platforms, the sanitizer is always built as a dylib, and
// LLVM will link to `@rpath/*.dylib`, so we need to specify an
// rpath to the library as well (the rpath should be absolute, see
// PR #41352 for details).
- let filename = format!("rustc{}_rt.{}", channel, name);
+ let filename = format!("rustc{channel}_rt.{name}");
let path = find_sanitizer_runtime(&sess, &filename);
let rpath = path.to_str().expect("non-utf8 component in path");
linker.args(&["-Wl,-rpath", "-Xlinker", rpath]);
linker.link_dylib(&filename, false, true);
} else {
- let filename = format!("librustc{}_rt.{}.a", channel, name);
+ let filename = format!("librustc{channel}_rt.{name}.a");
let path = find_sanitizer_runtime(&sess, &filename).join(&filename);
linker.link_whole_rlib(&path);
}
@@ -1386,12 +1390,18 @@ enum RlibFlavor {
fn print_native_static_libs(
sess: &Session,
+ out: &OutFileName,
all_native_libs: &[NativeLib],
all_rust_dylibs: &[&Path],
) {
let mut lib_args: Vec<_> = all_native_libs
.iter()
.filter(|l| relevant_lib(sess, l))
+ // Deduplication of successive repeated libraries, see rust-lang/rust#113209
+ //
+ // note: we don't use PartialEq/Eq because NativeLib transitively depends on local
+ // elements like spans, which we don't care about and would make the deduplication impossible
+ .dedup_by(|l1, l2| l1.name == l2.name && l1.kind == l2.kind && l1.verbatim == l2.verbatim)
.filter_map(|lib| {
let name = lib.name;
match lib.kind {
@@ -1404,12 +1414,12 @@ fn print_native_static_libs(
} else if sess.target.linker_flavor.is_gnu() {
Some(format!("-l{}{}", if verbatim { ":" } else { "" }, name))
} else {
- Some(format!("-l{}", name))
+ Some(format!("-l{name}"))
}
}
NativeLibKind::Framework { .. } => {
// ld-only syntax, since there are no frameworks in MSVC
- Some(format!("-framework {}", name))
+ Some(format!("-framework {name}"))
}
// These are included, no need to print them
NativeLibKind::Static { bundle: None | Some(true), .. }
@@ -1446,19 +1456,30 @@ fn print_native_static_libs(
// `foo.lib` file if the dll doesn't actually export any symbols, so we
// check to see if the file is there and just omit linking to it if it's
// not present.
- let name = format!("{}.dll.lib", lib);
+ let name = format!("{lib}.dll.lib");
if path.join(&name).exists() {
lib_args.push(name);
}
} else {
- lib_args.push(format!("-l{}", lib));
+ lib_args.push(format!("-l{lib}"));
}
}
- if !lib_args.is_empty() {
- sess.emit_note(errors::StaticLibraryNativeArtifacts);
- // Prefix for greppability
- // Note: This must not be translated as tools are allowed to depend on this exact string.
- sess.note_without_error(format!("native-static-libs: {}", &lib_args.join(" ")));
+
+ match out {
+ OutFileName::Real(path) => {
+ out.overwrite(&lib_args.join(" "), sess);
+ if !lib_args.is_empty() {
+ sess.emit_note(errors::StaticLibraryNativeArtifactsToFile { path });
+ }
+ }
+ OutFileName::Stdout => {
+ if !lib_args.is_empty() {
+ sess.emit_note(errors::StaticLibraryNativeArtifacts);
+ // Prefix for greppability
+ // Note: This must not be translated as tools are allowed to depend on this exact string.
+ sess.note_without_error(format!("native-static-libs: {}", &lib_args.join(" ")));
+ }
+ }
}
}
@@ -1606,8 +1627,8 @@ fn exec_linker(
write!(f, "\"")?;
for c in self.arg.chars() {
match c {
- '"' => write!(f, "\\{}", c)?,
- c => write!(f, "{}", c)?,
+ '"' => write!(f, "\\{c}")?,
+ c => write!(f, "{c}")?,
}
}
write!(f, "\"")?;
@@ -1624,8 +1645,8 @@ fn exec_linker(
// ensure the line is interpreted as one whole argument.
for c in self.arg.chars() {
match c {
- '\\' | ' ' => write!(f, "\\{}", c)?,
- c => write!(f, "{}", c)?,
+ '\\' | ' ' => write!(f, "\\{c}")?,
+ c => write!(f, "{c}")?,
}
}
}
@@ -2262,7 +2283,7 @@ fn add_order_independent_options(
} else {
""
};
- cmd.arg(format!("--dynamic-linker={}ld.so.1", prefix));
+ cmd.arg(format!("--dynamic-linker={prefix}ld.so.1"));
}
if sess.target.eh_frame_header {
@@ -2970,25 +2991,10 @@ fn add_lld_args(cmd: &mut dyn Linker, sess: &Session, flavor: LinkerFlavor) {
return;
}
- let self_contained_linker = sess.opts.cg.link_self_contained.linker();
-
- // FIXME: some targets default to using `lld`, but users can only override the linker on the CLI
- // and cannot yet select the precise linker flavor to opt out of that. See for example issue
- // #113597 for the `thumbv6m-none-eabi` target: a driver is used, and its default linker
- // conflicts with the target's flavor, causing unexpected arguments being passed.
- //
- // Until the new `LinkerFlavor`-like CLI options are stabilized, we only adopt MCP510's behavior
- // if its dedicated unstable CLI flags are used, to keep the current sub-optimal stable
- // behavior.
- let using_mcp510 =
- self_contained_linker || sess.opts.cg.linker_flavor.is_some_and(|f| f.is_unstable());
- if !using_mcp510 && !unstable_use_lld {
- return;
- }
-
// 1. Implement the "self-contained" part of this feature by adding rustc distribution
// directories to the tool's search path.
- if self_contained_linker || unstable_use_lld {
+ let self_contained_linker = sess.opts.cg.link_self_contained.linker() || unstable_use_lld;
+ if self_contained_linker {
for path in sess.get_tools_search_paths(false) {
cmd.arg({
let mut arg = OsString::from("-B");
diff --git a/compiler/rustc_codegen_ssa/src/back/linker.rs b/compiler/rustc_codegen_ssa/src/back/linker.rs
index 8ac86fa4b..11afe0fbc 100644
--- a/compiler/rustc_codegen_ssa/src/back/linker.rs
+++ b/compiler/rustc_codegen_ssa/src/back/linker.rs
@@ -310,7 +310,7 @@ impl<'a> GccLinker<'a> {
self.linker_arg(&format!("-plugin-opt=sample-profile={}", path.display()));
};
self.linker_args(&[
- &format!("-plugin-opt={}", opt_level),
+ &format!("-plugin-opt={opt_level}"),
&format!("-plugin-opt=mcpu={}", self.target_cpu),
]);
}
@@ -488,7 +488,7 @@ impl<'a> Linker for GccLinker<'a> {
fn link_rust_dylib(&mut self, lib: &str, _path: &Path) {
self.hint_dynamic();
- self.cmd.arg(format!("-l{}", lib));
+ self.cmd.arg(format!("-l{lib}"));
}
fn link_framework(&mut self, framework: &str, as_needed: bool) {
@@ -670,8 +670,8 @@ impl<'a> Linker for GccLinker<'a> {
let res: io::Result<()> = try {
let mut f = BufWriter::new(File::create(&path)?);
for sym in symbols {
- debug!(" _{}", sym);
- writeln!(f, "_{}", sym)?;
+ debug!(" _{sym}");
+ writeln!(f, "_{sym}")?;
}
};
if let Err(error) = res {
@@ -685,8 +685,8 @@ impl<'a> Linker for GccLinker<'a> {
// because LD doesn't like when it's empty
writeln!(f, "EXPORTS")?;
for symbol in symbols {
- debug!(" _{}", symbol);
- writeln!(f, " {}", symbol)?;
+ debug!(" _{symbol}");
+ writeln!(f, " {symbol}")?;
}
};
if let Err(error) = res {
@@ -700,8 +700,8 @@ impl<'a> Linker for GccLinker<'a> {
if !symbols.is_empty() {
writeln!(f, " global:")?;
for sym in symbols {
- debug!(" {};", sym);
- writeln!(f, " {};", sym)?;
+ debug!(" {sym};");
+ writeln!(f, " {sym};")?;
}
}
writeln!(f, "\n local:\n *;\n}};")?;
@@ -836,7 +836,7 @@ impl<'a> Linker for MsvcLinker<'a> {
// `foo.lib` file if the dll doesn't actually export any symbols, so we
// check to see if the file is there and just omit linking to it if it's
// not present.
- let name = format!("{}.dll.lib", lib);
+ let name = format!("{lib}.dll.lib");
if path.join(&name).exists() {
self.cmd.arg(name);
}
@@ -976,8 +976,8 @@ impl<'a> Linker for MsvcLinker<'a> {
writeln!(f, "LIBRARY")?;
writeln!(f, "EXPORTS")?;
for symbol in symbols {
- debug!(" _{}", symbol);
- writeln!(f, " {}", symbol)?;
+ debug!(" _{symbol}");
+ writeln!(f, " {symbol}")?;
}
};
if let Err(error) = res {
@@ -991,7 +991,7 @@ impl<'a> Linker for MsvcLinker<'a> {
fn subsystem(&mut self, subsystem: &str) {
// Note that previous passes of the compiler validated this subsystem,
// so we just blindly pass it to the linker.
- self.cmd.arg(&format!("/SUBSYSTEM:{}", subsystem));
+ self.cmd.arg(&format!("/SUBSYSTEM:{subsystem}"));
// Windows has two subsystems we're interested in right now, the console
// and windows subsystems. These both implicitly have different entry
@@ -1146,7 +1146,7 @@ impl<'a> Linker for EmLinker<'a> {
&symbols.iter().map(|sym| "_".to_owned() + sym).collect::<Vec<_>>(),
)
.unwrap();
- debug!("{}", encoded);
+ debug!("{encoded}");
arg.push(encoded);
@@ -1349,7 +1349,7 @@ impl<'a> Linker for L4Bender<'a> {
}
fn link_staticlib(&mut self, lib: &str, _verbatim: bool) {
self.hint_static();
- self.cmd.arg(format!("-PC{}", lib));
+ self.cmd.arg(format!("-PC{lib}"));
}
fn link_rlib(&mut self, lib: &Path) {
self.hint_static();
@@ -1398,7 +1398,7 @@ impl<'a> Linker for L4Bender<'a> {
fn link_whole_staticlib(&mut self, lib: &str, _verbatim: bool, _search_path: &[PathBuf]) {
self.hint_static();
- self.cmd.arg("--whole-archive").arg(format!("-l{}", lib));
+ self.cmd.arg("--whole-archive").arg(format!("-l{lib}"));
self.cmd.arg("--no-whole-archive");
}
@@ -1452,7 +1452,7 @@ impl<'a> Linker for L4Bender<'a> {
}
fn subsystem(&mut self, subsystem: &str) {
- self.cmd.arg(&format!("--subsystem {}", subsystem));
+ self.cmd.arg(&format!("--subsystem {subsystem}"));
}
fn reset_per_library_state(&mut self) {
@@ -1517,12 +1517,12 @@ impl<'a> AixLinker<'a> {
impl<'a> Linker for AixLinker<'a> {
fn link_dylib(&mut self, lib: &str, _verbatim: bool, _as_needed: bool) {
self.hint_dynamic();
- self.cmd.arg(format!("-l{}", lib));
+ self.cmd.arg(format!("-l{lib}"));
}
fn link_staticlib(&mut self, lib: &str, _verbatim: bool) {
self.hint_static();
- self.cmd.arg(format!("-l{}", lib));
+ self.cmd.arg(format!("-l{lib}"));
}
fn link_rlib(&mut self, lib: &Path) {
@@ -1572,7 +1572,7 @@ impl<'a> Linker for AixLinker<'a> {
fn link_rust_dylib(&mut self, lib: &str, _: &Path) {
self.hint_dynamic();
- self.cmd.arg(format!("-l{}", lib));
+ self.cmd.arg(format!("-l{lib}"));
}
fn link_framework(&mut self, _framework: &str, _as_needed: bool) {
@@ -1625,12 +1625,12 @@ impl<'a> Linker for AixLinker<'a> {
let mut f = BufWriter::new(File::create(&path)?);
// FIXME: use llvm-nm to generate export list.
for symbol in symbols {
- debug!(" _{}", symbol);
- writeln!(f, " {}", symbol)?;
+ debug!(" _{symbol}");
+ writeln!(f, " {symbol}")?;
}
};
if let Err(e) = res {
- self.sess.fatal(format!("failed to write export file: {}", e));
+ self.sess.fatal(format!("failed to write export file: {e}"));
}
self.cmd.arg(format!("-bE:{}", path.to_str().unwrap()));
}
@@ -1703,7 +1703,7 @@ fn exported_symbols_for_proc_macro_crate(tcx: TyCtxt<'_>) -> Vec<String> {
return Vec::new();
}
- let stable_crate_id = tcx.sess.local_stable_crate_id();
+ let stable_crate_id = tcx.stable_crate_id(LOCAL_CRATE);
let proc_macro_decls_name = tcx.sess.generate_proc_macro_decls_symbol(stable_crate_id);
let metadata_symbol_name = exported_symbols::metadata_symbol_name(tcx);
@@ -1927,7 +1927,7 @@ impl<'a> Linker for BpfLinker<'a> {
let res: io::Result<()> = try {
let mut f = BufWriter::new(File::create(&path)?);
for sym in symbols {
- writeln!(f, "{}", sym)?;
+ writeln!(f, "{sym}")?;
}
};
if let Err(error) = res {
diff --git a/compiler/rustc_codegen_ssa/src/back/metadata.rs b/compiler/rustc_codegen_ssa/src/back/metadata.rs
index 00e6acb5c..4c8547407 100644
--- a/compiler/rustc_codegen_ssa/src/back/metadata.rs
+++ b/compiler/rustc_codegen_ssa/src/back/metadata.rs
@@ -10,15 +10,13 @@ use object::{
ObjectSymbol, SectionFlags, SectionKind, SymbolFlags, SymbolKind, SymbolScope,
};
-use snap::write::FrameEncoder;
-
-use object::elf::NT_GNU_PROPERTY_TYPE_0;
use rustc_data_structures::memmap::Mmap;
use rustc_data_structures::owned_slice::{try_slice_owned, OwnedSlice};
use rustc_metadata::fs::METADATA_FILENAME;
use rustc_metadata::EncodedMetadata;
use rustc_session::cstore::MetadataLoader;
use rustc_session::Session;
+use rustc_span::sym;
use rustc_target::abi::Endian;
use rustc_target::spec::{ef_avr_arch, RelocModel, Target};
@@ -124,7 +122,7 @@ fn add_gnu_property_note(
let mut data: Vec<u8> = Vec::new();
let n_namsz: u32 = 4; // Size of the n_name field
let n_descsz: u32 = 16; // Size of the n_desc field
- let n_type: u32 = NT_GNU_PROPERTY_TYPE_0; // Type of note descriptor
+ let n_type: u32 = object::elf::NT_GNU_PROPERTY_TYPE_0; // Type of note descriptor
let header_values = [n_namsz, n_descsz, n_type];
header_values.iter().for_each(|v| {
data.extend_from_slice(&match endianness {
@@ -134,8 +132,8 @@ fn add_gnu_property_note(
});
data.extend_from_slice(b"GNU\0"); // Owner of the program property note
let pr_type: u32 = match architecture {
- Architecture::X86_64 => 0xc0000002,
- Architecture::Aarch64 => 0xc0000000,
+ Architecture::X86_64 => object::elf::GNU_PROPERTY_X86_FEATURE_1_AND,
+ Architecture::Aarch64 => object::elf::GNU_PROPERTY_AARCH64_FEATURE_1_AND,
_ => unreachable!(),
};
let pr_datasz: u32 = 4; //size of the pr_data field
@@ -161,20 +159,19 @@ pub(super) fn get_metadata_xcoff<'a>(path: &Path, data: &'a [u8]) -> Result<&'a
{
let offset = metadata_symbol.address() as usize;
if offset < 4 {
- return Err(format!("Invalid metadata symbol offset: {}", offset));
+ return Err(format!("Invalid metadata symbol offset: {offset}"));
}
// The offset specifies the location of rustc metadata in the comment section.
// The metadata is preceded by a 4-byte length field.
let len = u32::from_be_bytes(info_data[(offset - 4)..offset].try_into().unwrap()) as usize;
if offset + len > (info_data.len() as usize) {
return Err(format!(
- "Metadata at offset {} with size {} is beyond .info section",
- offset, len
+ "Metadata at offset {offset} with size {len} is beyond .info section"
));
}
return Ok(&info_data[offset..(offset + len)]);
} else {
- return Err(format!("Unable to find symbol {}", AIX_METADATA_SYMBOL_NAME));
+ return Err(format!("Unable to find symbol {AIX_METADATA_SYMBOL_NAME}"));
};
}
@@ -194,8 +191,8 @@ pub(crate) fn create_object_file(sess: &Session) -> Option<write::Object<'static
}
"x86" => Architecture::I386,
"s390x" => Architecture::S390x,
- "mips" => Architecture::Mips,
- "mips64" => Architecture::Mips64,
+ "mips" | "mips32r6" => Architecture::Mips,
+ "mips64" | "mips64r6" => Architecture::Mips64,
"x86_64" => {
if sess.target.pointer_width == 32 {
Architecture::X86_64_X32
@@ -213,6 +210,7 @@ pub(crate) fn create_object_file(sess: &Session) -> Option<write::Object<'static
"hexagon" => Architecture::Hexagon,
"bpf" => Architecture::Bpf,
"loongarch64" => Architecture::LoongArch64,
+ "csky" => Architecture::Csky,
// Unsupported architecture.
_ => return None,
};
@@ -243,8 +241,16 @@ pub(crate) fn create_object_file(sess: &Session) -> Option<write::Object<'static
s if s.contains("r6") => elf::EF_MIPS_ARCH_32R6,
_ => elf::EF_MIPS_ARCH_32R2,
};
- // The only ABI LLVM supports for 32-bit MIPS CPUs is o32.
- let mut e_flags = elf::EF_MIPS_CPIC | elf::EF_MIPS_ABI_O32 | arch;
+
+ let mut e_flags = elf::EF_MIPS_CPIC | arch;
+
+ // If the ABI is explicitly given, use it or default to O32.
+ match sess.target.options.llvm_abiname.to_lowercase().as_str() {
+ "n32" => e_flags |= elf::EF_MIPS_ABI2,
+ "o32" => e_flags |= elf::EF_MIPS_ABI_O32,
+ _ => e_flags |= elf::EF_MIPS_ABI_O32,
+ };
+
if sess.target.options.relocation_model != RelocModel::Static {
e_flags |= elf::EF_MIPS_PIC;
}
@@ -267,35 +273,38 @@ pub(crate) fn create_object_file(sess: &Session) -> Option<write::Object<'static
Architecture::Riscv32 | Architecture::Riscv64 => {
// Source: https://github.com/riscv-non-isa/riscv-elf-psabi-doc/blob/079772828bd10933d34121117a222b4cc0ee2200/riscv-elf.adoc
let mut e_flags: u32 = 0x0;
- let features = &sess.target.options.features;
+
// Check if compressed is enabled
- if features.contains("+c") {
+ // `unstable_target_features` is used here because "c" is gated behind riscv_target_feature.
+ if sess.unstable_target_features.contains(&sym::c) {
e_flags |= elf::EF_RISCV_RVC;
}
- // Select the appropriate floating-point ABI
- if features.contains("+d") {
- e_flags |= elf::EF_RISCV_FLOAT_ABI_DOUBLE;
- } else if features.contains("+f") {
- e_flags |= elf::EF_RISCV_FLOAT_ABI_SINGLE;
- } else {
- e_flags |= elf::EF_RISCV_FLOAT_ABI_SOFT;
+ // Set the appropriate flag based on ABI
+ // This needs to match LLVM `RISCVELFStreamer.cpp`
+ match &*sess.target.llvm_abiname {
+ "" | "ilp32" | "lp64" => (),
+ "ilp32f" | "lp64f" => e_flags |= elf::EF_RISCV_FLOAT_ABI_SINGLE,
+ "ilp32d" | "lp64d" => e_flags |= elf::EF_RISCV_FLOAT_ABI_DOUBLE,
+ "ilp32e" => e_flags |= elf::EF_RISCV_RVE,
+ _ => bug!("unknown RISC-V ABI name"),
}
+
e_flags
}
Architecture::LoongArch64 => {
// Source: https://github.com/loongson/la-abi-specs/blob/release/laelf.adoc#e_flags-identifies-abi-type-and-version
let mut e_flags: u32 = elf::EF_LARCH_OBJABI_V1;
- let features = &sess.target.options.features;
- // Select the appropriate floating-point ABI
- if features.contains("+d") {
- e_flags |= elf::EF_LARCH_ABI_DOUBLE_FLOAT;
- } else if features.contains("+f") {
- e_flags |= elf::EF_LARCH_ABI_SINGLE_FLOAT;
- } else {
- e_flags |= elf::EF_LARCH_ABI_SOFT_FLOAT;
+ // Set the appropriate flag based on ABI
+ // This needs to match LLVM `LoongArchELFStreamer.cpp`
+ match &*sess.target.llvm_abiname {
+ "ilp32s" | "lp64s" => e_flags |= elf::EF_LARCH_ABI_SOFT_FLOAT,
+ "ilp32f" | "lp64f" => e_flags |= elf::EF_LARCH_ABI_SINGLE_FLOAT,
+ "ilp32d" | "lp64d" => e_flags |= elf::EF_LARCH_ABI_DOUBLE_FLOAT,
+ _ => bug!("unknown RISC-V ABI name"),
}
+
e_flags
}
Architecture::Avr => {
@@ -303,6 +312,13 @@ pub(crate) fn create_object_file(sess: &Session) -> Option<write::Object<'static
// the appropriate EF_AVR_ARCH flag.
ef_avr_arch(&sess.target.options.cpu)
}
+ Architecture::Csky => {
+ let e_flags = match sess.target.options.abi.as_ref() {
+ "abiv2" => elf::EF_CSKY_ABIV2,
+ _ => elf::EF_CSKY_ABIV1,
+ };
+ e_flags
+ }
_ => 0,
};
// adapted from LLVM's `MCELFObjectTargetWriter::getOSABI`
@@ -474,19 +490,15 @@ pub fn create_compressed_metadata_file(
metadata: &EncodedMetadata,
symbol_name: &str,
) -> Vec<u8> {
- let mut compressed = rustc_metadata::METADATA_HEADER.to_vec();
- // Our length will be backfilled once we're done writing
- compressed.write_all(&[0; 4]).unwrap();
- FrameEncoder::new(&mut compressed).write_all(metadata.raw_data()).unwrap();
- let meta_len = rustc_metadata::METADATA_HEADER.len();
- let data_len = (compressed.len() - meta_len - 4) as u32;
- compressed[meta_len..meta_len + 4].copy_from_slice(&data_len.to_be_bytes());
+ let mut packed_metadata = rustc_metadata::METADATA_HEADER.to_vec();
+ packed_metadata.write_all(&(metadata.raw_data().len() as u32).to_be_bytes()).unwrap();
+ packed_metadata.extend(metadata.raw_data());
let Some(mut file) = create_object_file(sess) else {
- return compressed.to_vec();
+ return packed_metadata.to_vec();
};
if file.format() == BinaryFormat::Xcoff {
- return create_compressed_metadata_file_for_xcoff(file, &compressed, symbol_name);
+ return create_compressed_metadata_file_for_xcoff(file, &packed_metadata, symbol_name);
}
let section = file.add_section(
file.segment_name(StandardSegment::Data).to_vec(),
@@ -500,14 +512,14 @@ pub fn create_compressed_metadata_file(
}
_ => {}
};
- let offset = file.append_section_data(section, &compressed, 1);
+ let offset = file.append_section_data(section, &packed_metadata, 1);
// For MachO and probably PE this is necessary to prevent the linker from throwing away the
// .rustc section. For ELF this isn't necessary, but it also doesn't harm.
file.add_symbol(Symbol {
name: symbol_name.as_bytes().to_vec(),
value: offset,
- size: compressed.len() as u64,
+ size: packed_metadata.len() as u64,
kind: SymbolKind::Data,
scope: SymbolScope::Dynamic,
weak: false,
diff --git a/compiler/rustc_codegen_ssa/src/back/rpath.rs b/compiler/rustc_codegen_ssa/src/back/rpath.rs
index 0b5656c9a..ebf04e7a3 100644
--- a/compiler/rustc_codegen_ssa/src/back/rpath.rs
+++ b/compiler/rustc_codegen_ssa/src/back/rpath.rs
@@ -1,6 +1,7 @@
use pathdiff::diff_paths;
use rustc_data_structures::fx::FxHashSet;
use std::env;
+use std::ffi::OsString;
use std::fs;
use std::path::{Path, PathBuf};
@@ -12,7 +13,7 @@ pub struct RPathConfig<'a> {
pub linker_is_gnu: bool,
}
-pub fn get_rpath_flags(config: &mut RPathConfig<'_>) -> Vec<String> {
+pub fn get_rpath_flags(config: &mut RPathConfig<'_>) -> Vec<OsString> {
// No rpath on windows
if !config.has_rpath {
return Vec::new();
@@ -21,36 +22,38 @@ pub fn get_rpath_flags(config: &mut RPathConfig<'_>) -> Vec<String> {
debug!("preparing the RPATH!");
let rpaths = get_rpaths(config);
- let mut flags = rpaths_to_flags(&rpaths);
+ let mut flags = rpaths_to_flags(rpaths);
if config.linker_is_gnu {
// Use DT_RUNPATH instead of DT_RPATH if available
- flags.push("-Wl,--enable-new-dtags".to_owned());
+ flags.push("-Wl,--enable-new-dtags".into());
// Set DF_ORIGIN for substitute $ORIGIN
- flags.push("-Wl,-z,origin".to_owned());
+ flags.push("-Wl,-z,origin".into());
}
flags
}
-fn rpaths_to_flags(rpaths: &[String]) -> Vec<String> {
+fn rpaths_to_flags(rpaths: Vec<OsString>) -> Vec<OsString> {
let mut ret = Vec::with_capacity(rpaths.len()); // the minimum needed capacity
for rpath in rpaths {
- if rpath.contains(',') {
+ if rpath.to_string_lossy().contains(',') {
ret.push("-Wl,-rpath".into());
ret.push("-Xlinker".into());
- ret.push(rpath.clone());
+ ret.push(rpath);
} else {
- ret.push(format!("-Wl,-rpath,{}", &(*rpath)));
+ let mut single_arg = OsString::from("-Wl,-rpath,");
+ single_arg.push(rpath);
+ ret.push(single_arg);
}
}
ret
}
-fn get_rpaths(config: &mut RPathConfig<'_>) -> Vec<String> {
+fn get_rpaths(config: &mut RPathConfig<'_>) -> Vec<OsString> {
debug!("output: {:?}", config.out_filename.display());
debug!("libs:");
for libpath in config.libs {
@@ -64,18 +67,18 @@ fn get_rpaths(config: &mut RPathConfig<'_>) -> Vec<String> {
debug!("rpaths:");
for rpath in &rpaths {
- debug!(" {}", rpath);
+ debug!(" {:?}", rpath);
}
// Remove duplicates
minimize_rpaths(&rpaths)
}
-fn get_rpaths_relative_to_output(config: &mut RPathConfig<'_>) -> Vec<String> {
+fn get_rpaths_relative_to_output(config: &mut RPathConfig<'_>) -> Vec<OsString> {
config.libs.iter().map(|a| get_rpath_relative_to_output(config, a)).collect()
}
-fn get_rpath_relative_to_output(config: &mut RPathConfig<'_>, lib: &Path) -> String {
+fn get_rpath_relative_to_output(config: &mut RPathConfig<'_>, lib: &Path) -> OsString {
// Mac doesn't appear to support $ORIGIN
let prefix = if config.is_like_osx { "@loader_path" } else { "$ORIGIN" };
@@ -86,9 +89,12 @@ fn get_rpath_relative_to_output(config: &mut RPathConfig<'_>, lib: &Path) -> Str
output.pop(); // strip filename
let output = fs::canonicalize(&output).unwrap_or(output);
let relative = path_relative_from(&lib, &output)
- .unwrap_or_else(|| panic!("couldn't create relative path from {:?} to {:?}", output, lib));
- // FIXME (#9639): This needs to handle non-utf8 paths
- format!("{}/{}", prefix, relative.to_str().expect("non-utf8 component in path"))
+ .unwrap_or_else(|| panic!("couldn't create relative path from {output:?} to {lib:?}"));
+
+ let mut rpath = OsString::from(prefix);
+ rpath.push("/");
+ rpath.push(relative);
+ rpath
}
// This routine is adapted from the *old* Path's `path_relative_from`
@@ -99,7 +105,7 @@ fn path_relative_from(path: &Path, base: &Path) -> Option<PathBuf> {
diff_paths(path, base)
}
-fn minimize_rpaths(rpaths: &[String]) -> Vec<String> {
+fn minimize_rpaths(rpaths: &[OsString]) -> Vec<OsString> {
let mut set = FxHashSet::default();
let mut minimized = Vec::new();
for rpath in rpaths {
diff --git a/compiler/rustc_codegen_ssa/src/back/rpath/tests.rs b/compiler/rustc_codegen_ssa/src/back/rpath/tests.rs
index 604f19144..ac2e54072 100644
--- a/compiler/rustc_codegen_ssa/src/back/rpath/tests.rs
+++ b/compiler/rustc_codegen_ssa/src/back/rpath/tests.rs
@@ -1,32 +1,33 @@
use super::RPathConfig;
use super::{get_rpath_relative_to_output, minimize_rpaths, rpaths_to_flags};
+use std::ffi::OsString;
use std::path::{Path, PathBuf};
#[test]
fn test_rpaths_to_flags() {
- let flags = rpaths_to_flags(&["path1".to_string(), "path2".to_string()]);
+ let flags = rpaths_to_flags(vec!["path1".into(), "path2".into()]);
assert_eq!(flags, ["-Wl,-rpath,path1", "-Wl,-rpath,path2"]);
}
#[test]
fn test_minimize1() {
- let res = minimize_rpaths(&["rpath1".to_string(), "rpath2".to_string(), "rpath1".to_string()]);
+ let res = minimize_rpaths(&["rpath1".into(), "rpath2".into(), "rpath1".into()]);
assert!(res == ["rpath1", "rpath2",]);
}
#[test]
fn test_minimize2() {
let res = minimize_rpaths(&[
- "1a".to_string(),
- "2".to_string(),
- "2".to_string(),
- "1a".to_string(),
- "4a".to_string(),
- "1a".to_string(),
- "2".to_string(),
- "3".to_string(),
- "4a".to_string(),
- "3".to_string(),
+ "1a".into(),
+ "2".into(),
+ "2".into(),
+ "1a".into(),
+ "4a".into(),
+ "1a".into(),
+ "2".into(),
+ "3".into(),
+ "4a".into(),
+ "3".into(),
]);
assert!(res == ["1a", "2", "4a", "3",]);
}
@@ -58,15 +59,15 @@ fn test_rpath_relative() {
#[test]
fn test_xlinker() {
- let args = rpaths_to_flags(&["a/normal/path".to_string(), "a,comma,path".to_string()]);
+ let args = rpaths_to_flags(vec!["a/normal/path".into(), "a,comma,path".into()]);
assert_eq!(
args,
vec![
- "-Wl,-rpath,a/normal/path".to_string(),
- "-Wl,-rpath".to_string(),
- "-Xlinker".to_string(),
- "a,comma,path".to_string()
+ OsString::from("-Wl,-rpath,a/normal/path"),
+ OsString::from("-Wl,-rpath"),
+ OsString::from("-Xlinker"),
+ OsString::from("a,comma,path")
]
);
}
diff --git a/compiler/rustc_codegen_ssa/src/back/symbol_export.rs b/compiler/rustc_codegen_ssa/src/back/symbol_export.rs
index a8b6030ac..8fb2ccb7e 100644
--- a/compiler/rustc_codegen_ssa/src/back/symbol_export.rs
+++ b/compiler/rustc_codegen_ssa/src/back/symbol_export.rs
@@ -12,14 +12,14 @@ use rustc_middle::middle::exported_symbols::{
};
use rustc_middle::query::LocalCrate;
use rustc_middle::query::{ExternProviders, Providers};
-use rustc_middle::ty::subst::{GenericArgKind, SubstsRef};
use rustc_middle::ty::Instance;
use rustc_middle::ty::{self, SymbolName, TyCtxt};
+use rustc_middle::ty::{GenericArgKind, GenericArgsRef};
use rustc_session::config::{CrateType, OomStrategy};
use rustc_target::spec::SanitizerSet;
pub fn threshold(tcx: TyCtxt<'_>) -> SymbolExportLevel {
- crates_export_threshold(&tcx.sess.crate_types())
+ crates_export_threshold(tcx.crate_types())
}
fn crate_export_threshold(crate_type: CrateType) -> SymbolExportLevel {
@@ -233,15 +233,6 @@ fn exported_symbols_provider_local(
));
}
- symbols.push((
- ExportedSymbol::NoDefId(SymbolName::new(tcx, OomStrategy::SYMBOL)),
- SymbolExportInfo {
- level: SymbolExportLevel::Rust,
- kind: SymbolExportKind::Text,
- used: false,
- },
- ));
-
let exported_symbol =
ExportedSymbol::NoDefId(SymbolName::new(tcx, NO_ALLOC_SHIM_IS_UNSTABLE));
symbols.push((
@@ -299,8 +290,8 @@ fn exported_symbols_provider_local(
}));
}
- if tcx.sess.crate_types().contains(&CrateType::Dylib)
- || tcx.sess.crate_types().contains(&CrateType::ProcMacro)
+ if tcx.crate_types().contains(&CrateType::Dylib)
+ || tcx.crate_types().contains(&CrateType::ProcMacro)
{
let symbol_name = metadata_symbol_name(tcx);
let exported_symbol = ExportedSymbol::NoDefId(SymbolName::new(tcx, &symbol_name));
@@ -328,23 +319,23 @@ fn exported_symbols_provider_local(
let (_, cgus) = tcx.collect_and_partition_mono_items(());
- for (mono_item, &(linkage, visibility)) in cgus.iter().flat_map(|cgu| cgu.items().iter()) {
- if linkage != Linkage::External {
+ for (mono_item, data) in cgus.iter().flat_map(|cgu| cgu.items().iter()) {
+ if data.linkage != Linkage::External {
// We can only re-use things with external linkage, otherwise
// we'll get a linker error
continue;
}
- if need_visibility && visibility == Visibility::Hidden {
+ if need_visibility && data.visibility == Visibility::Hidden {
// If we potentially share things from Rust dylibs, they must
// not be hidden
continue;
}
match *mono_item {
- MonoItem::Fn(Instance { def: InstanceDef::Item(def), substs }) => {
- if substs.non_erasable_generics().next().is_some() {
- let symbol = ExportedSymbol::Generic(def, substs);
+ MonoItem::Fn(Instance { def: InstanceDef::Item(def), args }) => {
+ if args.non_erasable_generics().next().is_some() {
+ let symbol = ExportedSymbol::Generic(def, args);
symbols.push((
symbol,
SymbolExportInfo {
@@ -355,10 +346,10 @@ fn exported_symbols_provider_local(
));
}
}
- MonoItem::Fn(Instance { def: InstanceDef::DropGlue(_, Some(ty)), substs }) => {
+ MonoItem::Fn(Instance { def: InstanceDef::DropGlue(_, Some(ty)), args }) => {
// A little sanity-check
debug_assert_eq!(
- substs.non_erasable_generics().next(),
+ args.non_erasable_generics().next(),
Some(GenericArgKind::Type(ty))
);
symbols.push((
@@ -386,7 +377,7 @@ fn exported_symbols_provider_local(
fn upstream_monomorphizations_provider(
tcx: TyCtxt<'_>,
(): (),
-) -> DefIdMap<FxHashMap<SubstsRef<'_>, CrateNum>> {
+) -> DefIdMap<FxHashMap<GenericArgsRef<'_>, CrateNum>> {
let cnums = tcx.crates(());
let mut instances: DefIdMap<FxHashMap<_, _>> = Default::default();
@@ -395,11 +386,11 @@ fn upstream_monomorphizations_provider(
for &cnum in cnums.iter() {
for (exported_symbol, _) in tcx.exported_symbols(cnum).iter() {
- let (def_id, substs) = match *exported_symbol {
- ExportedSymbol::Generic(def_id, substs) => (def_id, substs),
+ let (def_id, args) = match *exported_symbol {
+ ExportedSymbol::Generic(def_id, args) => (def_id, args),
ExportedSymbol::DropGlue(ty) => {
if let Some(drop_in_place_fn_def_id) = drop_in_place_fn_def_id {
- (drop_in_place_fn_def_id, tcx.mk_substs(&[ty.into()]))
+ (drop_in_place_fn_def_id, tcx.mk_args(&[ty.into()]))
} else {
// `drop_in_place` in place does not exist, don't try
// to use it.
@@ -414,9 +405,9 @@ fn upstream_monomorphizations_provider(
}
};
- let substs_map = instances.entry(def_id).or_default();
+ let args_map = instances.entry(def_id).or_default();
- match substs_map.entry(substs) {
+ match args_map.entry(args) {
Occupied(mut e) => {
// If there are multiple monomorphizations available,
// we select one deterministically.
@@ -438,17 +429,17 @@ fn upstream_monomorphizations_provider(
fn upstream_monomorphizations_for_provider(
tcx: TyCtxt<'_>,
def_id: DefId,
-) -> Option<&FxHashMap<SubstsRef<'_>, CrateNum>> {
+) -> Option<&FxHashMap<GenericArgsRef<'_>, CrateNum>> {
debug_assert!(!def_id.is_local());
tcx.upstream_monomorphizations(()).get(&def_id)
}
fn upstream_drop_glue_for_provider<'tcx>(
tcx: TyCtxt<'tcx>,
- substs: SubstsRef<'tcx>,
+ args: GenericArgsRef<'tcx>,
) -> Option<CrateNum> {
if let Some(def_id) = tcx.lang_items().drop_in_place_fn() {
- tcx.upstream_monomorphizations_for(def_id).and_then(|monos| monos.get(&substs).cloned())
+ tcx.upstream_monomorphizations_for(def_id).and_then(|monos| monos.get(&args).cloned())
} else {
None
}
@@ -521,10 +512,10 @@ pub fn symbol_name_for_instance_in_crate<'tcx>(
instantiating_crate,
)
}
- ExportedSymbol::Generic(def_id, substs) => {
+ ExportedSymbol::Generic(def_id, args) => {
rustc_symbol_mangling::symbol_name_for_instance_in_crate(
tcx,
- Instance::new(def_id, substs),
+ Instance::new(def_id, args),
instantiating_crate,
)
}
@@ -533,7 +524,7 @@ pub fn symbol_name_for_instance_in_crate<'tcx>(
tcx,
ty::Instance {
def: ty::InstanceDef::ThreadLocalShim(def_id),
- substs: ty::InternalSubsts::empty(),
+ args: ty::GenericArgs::empty(),
},
instantiating_crate,
)
@@ -580,7 +571,7 @@ pub fn linking_symbol_name_for_instance_in_crate<'tcx>(
None
}
ExportedSymbol::NonGeneric(def_id) => Some(Instance::mono(tcx, def_id)),
- ExportedSymbol::Generic(def_id, substs) => Some(Instance::new(def_id, substs)),
+ ExportedSymbol::Generic(def_id, args) => Some(Instance::new(def_id, args)),
// DropGlue always use the Rust calling convention and thus follow the target's default
// symbol decoration scheme.
ExportedSymbol::DropGlue(..) => None,
diff --git a/compiler/rustc_codegen_ssa/src/back/write.rs b/compiler/rustc_codegen_ssa/src/back/write.rs
index ececa29b2..f485af00b 100644
--- a/compiler/rustc_codegen_ssa/src/back/write.rs
+++ b/compiler/rustc_codegen_ssa/src/back/write.rs
@@ -123,7 +123,7 @@ pub struct ModuleConfig {
impl ModuleConfig {
fn new(
kind: ModuleKind,
- sess: &Session,
+ tcx: TyCtxt<'_>,
no_builtins: bool,
is_compiler_builtins: bool,
) -> ModuleConfig {
@@ -135,6 +135,7 @@ impl ModuleConfig {
};
}
+ let sess = tcx.sess;
let opt_level_and_size = if_regular!(Some(sess.opts.optimize), None);
let save_temps = sess.opts.cg.save_temps;
@@ -166,7 +167,7 @@ impl ModuleConfig {
// `#![no_builtins]` is assumed to not participate in LTO and
// instead goes on to generate object code.
EmitObj::Bitcode
- } else if need_bitcode_in_object(sess) {
+ } else if need_bitcode_in_object(tcx) {
EmitObj::ObjectCode(BitcodeSection::Full)
} else {
EmitObj::ObjectCode(BitcodeSection::None)
@@ -349,8 +350,6 @@ pub struct CodegenContext<B: WriteBackendMethods> {
/// Directory into which should the LLVM optimization remarks be written.
/// If `None`, they will be written to stderr.
pub remark_dir: Option<PathBuf>,
- /// Worker thread number
- pub worker: usize,
/// The incremental compilation session directory, or None if we are not
/// compiling incrementally
pub incr_comp_session_dir: Option<PathBuf>,
@@ -362,7 +361,7 @@ pub struct CodegenContext<B: WriteBackendMethods> {
impl<B: WriteBackendMethods> CodegenContext<B> {
pub fn create_diag_handler(&self) -> Handler {
- Handler::with_emitter(true, None, Box::new(self.diag_emitter.clone()))
+ Handler::with_emitter(Box::new(self.diag_emitter.clone()))
}
pub fn config(&self, kind: ModuleKind) -> &ModuleConfig {
@@ -376,38 +375,39 @@ impl<B: WriteBackendMethods> CodegenContext<B> {
fn generate_lto_work<B: ExtraBackendMethods>(
cgcx: &CodegenContext<B>,
- needs_fat_lto: Vec<FatLTOInput<B>>,
+ needs_fat_lto: Vec<FatLtoInput<B>>,
needs_thin_lto: Vec<(String, B::ThinBuffer)>,
import_only_modules: Vec<(SerializedModule<B::ModuleBuffer>, WorkProduct)>,
) -> Vec<(WorkItem<B>, u64)> {
let _prof_timer = cgcx.prof.generic_activity("codegen_generate_lto_work");
- let (lto_modules, copy_jobs) = if !needs_fat_lto.is_empty() {
+ if !needs_fat_lto.is_empty() {
assert!(needs_thin_lto.is_empty());
- let lto_module =
+ let module =
B::run_fat_lto(cgcx, needs_fat_lto, import_only_modules).unwrap_or_else(|e| e.raise());
- (vec![lto_module], vec![])
+ // We are adding a single work item, so the cost doesn't matter.
+ vec![(WorkItem::LTO(module), 0)]
} else {
assert!(needs_fat_lto.is_empty());
- B::run_thin_lto(cgcx, needs_thin_lto, import_only_modules).unwrap_or_else(|e| e.raise())
- };
-
- lto_modules
- .into_iter()
- .map(|module| {
- let cost = module.cost();
- (WorkItem::LTO(module), cost)
- })
- .chain(copy_jobs.into_iter().map(|wp| {
- (
- WorkItem::CopyPostLtoArtifacts(CachedModuleCodegen {
- name: wp.cgu_name.clone(),
- source: wp,
- }),
- 0,
- )
- }))
- .collect()
+ let (lto_modules, copy_jobs) = B::run_thin_lto(cgcx, needs_thin_lto, import_only_modules)
+ .unwrap_or_else(|e| e.raise());
+ lto_modules
+ .into_iter()
+ .map(|module| {
+ let cost = module.cost();
+ (WorkItem::LTO(module), cost)
+ })
+ .chain(copy_jobs.into_iter().map(|wp| {
+ (
+ WorkItem::CopyPostLtoArtifacts(CachedModuleCodegen {
+ name: wp.cgu_name.clone(),
+ source: wp,
+ }),
+ 0, // copying is very cheap
+ )
+ }))
+ .collect()
+ }
}
pub struct CompiledModules {
@@ -415,9 +415,10 @@ pub struct CompiledModules {
pub allocator_module: Option<CompiledModule>,
}
-fn need_bitcode_in_object(sess: &Session) -> bool {
+fn need_bitcode_in_object(tcx: TyCtxt<'_>) -> bool {
+ let sess = tcx.sess;
let requested_for_rlib = sess.opts.cg.embed_bitcode
- && sess.crate_types().contains(&CrateType::Rlib)
+ && tcx.crate_types().contains(&CrateType::Rlib)
&& sess.opts.output_types.contains_key(&OutputType::Exe);
let forced_by_target = sess.target.forces_embed_bitcode;
requested_for_rlib || forced_by_target
@@ -451,11 +452,11 @@ pub fn start_async_codegen<B: ExtraBackendMethods>(
let crate_info = CrateInfo::new(tcx, target_cpu);
let regular_config =
- ModuleConfig::new(ModuleKind::Regular, sess, no_builtins, is_compiler_builtins);
+ ModuleConfig::new(ModuleKind::Regular, tcx, no_builtins, is_compiler_builtins);
let metadata_config =
- ModuleConfig::new(ModuleKind::Metadata, sess, no_builtins, is_compiler_builtins);
+ ModuleConfig::new(ModuleKind::Metadata, tcx, no_builtins, is_compiler_builtins);
let allocator_config =
- ModuleConfig::new(ModuleKind::Allocator, sess, no_builtins, is_compiler_builtins);
+ ModuleConfig::new(ModuleKind::Allocator, tcx, no_builtins, is_compiler_builtins);
let (shared_emitter, shared_emitter_main) = SharedEmitter::new();
let (codegen_worker_send, codegen_worker_receive) = channel();
@@ -709,7 +710,7 @@ impl<B: WriteBackendMethods> WorkItem<B> {
fn desc(short: &str, _long: &str, name: &str) -> String {
// The short label is three bytes, and is followed by a space. That
// leaves 11 bytes for the CGU name. How we obtain those 11 bytes
- // depends on the the CGU name form.
+ // depends on the CGU name form.
//
// - Non-incremental, e.g. `regex.f10ba03eb5ec7975-cgu.0`: the part
// before the `-cgu.0` is the same for every CGU, so use the
@@ -742,22 +743,32 @@ impl<B: WriteBackendMethods> WorkItem<B> {
}
match self {
- WorkItem::Optimize(m) => desc("opt", "optimize module {}", &m.name),
- WorkItem::CopyPostLtoArtifacts(m) => desc("cpy", "copy LTO artifacts for {}", &m.name),
- WorkItem::LTO(m) => desc("lto", "LTO module {}", m.name()),
+ WorkItem::Optimize(m) => desc("opt", "optimize module", &m.name),
+ WorkItem::CopyPostLtoArtifacts(m) => desc("cpy", "copy LTO artifacts for", &m.name),
+ WorkItem::LTO(m) => desc("lto", "LTO module", m.name()),
}
}
}
/// A result produced by the backend.
pub(crate) enum WorkItemResult<B: WriteBackendMethods> {
- Compiled(CompiledModule),
+ /// The backend has finished compiling a CGU, nothing more required.
+ Finished(CompiledModule),
+
+ /// The backend has finished compiling a CGU, which now needs linking
+ /// because `-Zcombine-cgu` was specified.
NeedsLink(ModuleCodegen<B::Module>),
- NeedsFatLTO(FatLTOInput<B>),
- NeedsThinLTO(String, B::ThinBuffer),
+
+ /// The backend has finished compiling a CGU, which now needs to go through
+ /// fat LTO.
+ NeedsFatLto(FatLtoInput<B>),
+
+ /// The backend has finished compiling a CGU, which now needs to go through
+ /// thin LTO.
+ NeedsThinLto(String, B::ThinBuffer),
}
-pub enum FatLTOInput<B: WriteBackendMethods> {
+pub enum FatLtoInput<B: WriteBackendMethods> {
Serialized { name: String, buffer: B::ModuleBuffer },
InMemory(ModuleCodegen<B::Module>),
}
@@ -846,7 +857,7 @@ fn execute_optimize_work_item<B: ExtraBackendMethods>(
panic!("Error writing pre-lto-bitcode file `{}`: {}", path.display(), e);
});
}
- Ok(WorkItemResult::NeedsThinLTO(name, thin_buffer))
+ Ok(WorkItemResult::NeedsThinLto(name, thin_buffer))
}
ComputedLtoType::Fat => match bitcode {
Some(path) => {
@@ -854,9 +865,9 @@ fn execute_optimize_work_item<B: ExtraBackendMethods>(
fs::write(&path, buffer.data()).unwrap_or_else(|e| {
panic!("Error writing pre-lto-bitcode file `{}`: {}", path.display(), e);
});
- Ok(WorkItemResult::NeedsFatLTO(FatLTOInput::Serialized { name, buffer }))
+ Ok(WorkItemResult::NeedsFatLto(FatLtoInput::Serialized { name, buffer }))
}
- None => Ok(WorkItemResult::NeedsFatLTO(FatLTOInput::InMemory(module))),
+ None => Ok(WorkItemResult::NeedsFatLto(FatLtoInput::InMemory(module))),
},
}
}
@@ -906,7 +917,7 @@ fn execute_copy_from_cache_work_item<B: ExtraBackendMethods>(
load_from_incr_comp_dir(dwarf_obj_out, &saved_dwarf_object_file)
});
- WorkItemResult::Compiled(CompiledModule {
+ WorkItemResult::Finished(CompiledModule {
name: module.name,
kind: ModuleKind::Regular,
object,
@@ -936,7 +947,7 @@ fn finish_intra_module_work<B: ExtraBackendMethods>(
|| module.kind == ModuleKind::Allocator
{
let module = unsafe { B::codegen(cgcx, &diag_handler, module, module_config)? };
- Ok(WorkItemResult::Compiled(module))
+ Ok(WorkItemResult::Finished(module))
} else {
Ok(WorkItemResult::NeedsLink(module))
}
@@ -987,10 +998,15 @@ struct Diagnostic {
}
#[derive(PartialEq, Clone, Copy, Debug)]
-enum MainThreadWorkerState {
+enum MainThreadState {
+ /// Doing nothing.
Idle,
+
+ /// Doing codegen, i.e. MIR-to-LLVM-IR conversion.
Codegenning,
- LLVMing,
+
+ /// Idle, but lending the compiler process's Token to an LLVM thread so it can do useful work.
+ Lending,
}
fn start_executing_work<B: ExtraBackendMethods>(
@@ -1078,7 +1094,7 @@ fn start_executing_work<B: ExtraBackendMethods>(
};
let cgcx = CodegenContext::<B> {
- crate_types: sess.crate_types().to_vec(),
+ crate_types: tcx.crate_types().to_vec(),
each_linked_rlib_for_lto,
lto: sess.lto(),
fewer_names: sess.fewer_names(),
@@ -1089,7 +1105,6 @@ fn start_executing_work<B: ExtraBackendMethods>(
exported_symbols,
remark: sess.opts.cg.remark.clone(),
remark_dir,
- worker: 0,
incr_comp_session_dir: sess.incr_comp_session_dir_opt().map(|r| r.clone()),
cgu_reuse_tracker: sess.cgu_reuse_tracker.clone(),
coordinator_send,
@@ -1242,7 +1257,7 @@ fn start_executing_work<B: ExtraBackendMethods>(
// Each LLVM module is automatically sent back to the coordinator for LTO if
// necessary. There's already optimizations in place to avoid sending work
// back to the coordinator if LTO isn't requested.
- return B::spawn_thread(cgcx.time_trace, move || {
+ return B::spawn_named_thread(cgcx.time_trace, "coordinator".to_string(), move || {
let mut worker_id_counter = 0;
let mut free_worker_ids = Vec::new();
let mut get_worker_id = |free_worker_ids: &mut Vec<usize>| {
@@ -1285,10 +1300,19 @@ fn start_executing_work<B: ExtraBackendMethods>(
// the implicit Token the compiler process owns no matter what.
let mut tokens = Vec::new();
- let mut main_thread_worker_state = MainThreadWorkerState::Idle;
- let mut running = 0;
+ let mut main_thread_state = MainThreadState::Idle;
+
+ // How many LLVM worker threads are running while holding a Token. This
+ // *excludes* any that the main thread is lending a Token to.
+ let mut running_with_own_token = 0;
+
+ // How many LLVM worker threads are running in total. This *includes*
+ // any that the main thread is lending a Token to.
+ let running_with_any_token = |main_thread_state, running_with_own_token| {
+ running_with_own_token
+ + if main_thread_state == MainThreadState::Lending { 1 } else { 0 }
+ };
- let prof = &cgcx.prof;
let mut llvm_start_time: Option<VerboseTimingGuard<'_>> = None;
// Run the message loop while there's still anything that needs message
@@ -1296,66 +1320,62 @@ fn start_executing_work<B: ExtraBackendMethods>(
// wait for all existing work to finish, so many of the conditions here
// only apply if codegen hasn't been aborted as they represent pending
// work to be done.
- while codegen_state == Ongoing
- || running > 0
- || main_thread_worker_state == MainThreadWorkerState::LLVMing
- || (codegen_state == Completed
- && !(work_items.is_empty()
- && needs_fat_lto.is_empty()
- && needs_thin_lto.is_empty()
- && lto_import_only_modules.is_empty()
- && main_thread_worker_state == MainThreadWorkerState::Idle))
- {
+ loop {
// While there are still CGUs to be codegened, the coordinator has
// to decide how to utilize the compiler processes implicit Token:
// For codegenning more CGU or for running them through LLVM.
if codegen_state == Ongoing {
- if main_thread_worker_state == MainThreadWorkerState::Idle {
+ if main_thread_state == MainThreadState::Idle {
// Compute the number of workers that will be running once we've taken as many
// items from the work queue as we can, plus one for the main thread. It's not
- // critically important that we use this instead of just `running`, but it
- // prevents the `queue_full_enough` heuristic from fluctuating just because a
- // worker finished up and we decreased the `running` count, even though we're
- // just going to increase it right after this when we put a new worker to work.
- let extra_tokens = tokens.len().checked_sub(running).unwrap();
+ // critically important that we use this instead of just
+ // `running_with_own_token`, but it prevents the `queue_full_enough` heuristic
+ // from fluctuating just because a worker finished up and we decreased the
+ // `running_with_own_token` count, even though we're just going to increase it
+ // right after this when we put a new worker to work.
+ let extra_tokens = tokens.len().checked_sub(running_with_own_token).unwrap();
let additional_running = std::cmp::min(extra_tokens, work_items.len());
- let anticipated_running = running + additional_running + 1;
+ let anticipated_running = running_with_own_token + additional_running + 1;
if !queue_full_enough(work_items.len(), anticipated_running) {
// The queue is not full enough, process more codegen units:
if codegen_worker_send.send(CguMessage).is_err() {
panic!("Could not send CguMessage to main thread")
}
- main_thread_worker_state = MainThreadWorkerState::Codegenning;
+ main_thread_state = MainThreadState::Codegenning;
} else {
// The queue is full enough to not let the worker
// threads starve. Use the implicit Token to do some
// LLVM work too.
let (item, _) =
work_items.pop().expect("queue empty - queue_full_enough() broken?");
- let cgcx = CodegenContext {
- worker: get_worker_id(&mut free_worker_ids),
- ..cgcx.clone()
- };
- maybe_start_llvm_timer(
- prof,
- cgcx.config(item.module_kind()),
+ main_thread_state = MainThreadState::Lending;
+ spawn_work(
+ &cgcx,
&mut llvm_start_time,
+ get_worker_id(&mut free_worker_ids),
+ item,
);
- main_thread_worker_state = MainThreadWorkerState::LLVMing;
- spawn_work(cgcx, item);
}
}
} else if codegen_state == Completed {
- // If we've finished everything related to normal codegen
- // then it must be the case that we've got some LTO work to do.
- // Perform the serial work here of figuring out what we're
- // going to LTO and then push a bunch of work items onto our
- // queue to do LTO
- if work_items.is_empty()
- && running == 0
- && main_thread_worker_state == MainThreadWorkerState::Idle
+ if running_with_any_token(main_thread_state, running_with_own_token) == 0
+ && work_items.is_empty()
{
+ // All codegen work is done. Do we have LTO work to do?
+ if needs_fat_lto.is_empty()
+ && needs_thin_lto.is_empty()
+ && lto_import_only_modules.is_empty()
+ {
+ // Nothing more to do!
+ break;
+ }
+
+ // We have LTO work to do. Perform the serial work here of
+ // figuring out what we're going to LTO and then push a
+ // bunch of work items onto our queue to do LTO. This all
+ // happens on the coordinator thread but it's very quick so
+ // we don't worry about tokens.
assert!(!started_lto);
started_lto = true;
@@ -1379,20 +1399,16 @@ fn start_executing_work<B: ExtraBackendMethods>(
// In this branch, we know that everything has been codegened,
// so it's just a matter of determining whether the implicit
// Token is free to use for LLVM work.
- match main_thread_worker_state {
- MainThreadWorkerState::Idle => {
+ match main_thread_state {
+ MainThreadState::Idle => {
if let Some((item, _)) = work_items.pop() {
- let cgcx = CodegenContext {
- worker: get_worker_id(&mut free_worker_ids),
- ..cgcx.clone()
- };
- maybe_start_llvm_timer(
- prof,
- cgcx.config(item.module_kind()),
+ main_thread_state = MainThreadState::Lending;
+ spawn_work(
+ &cgcx,
&mut llvm_start_time,
+ get_worker_id(&mut free_worker_ids),
+ item,
);
- main_thread_worker_state = MainThreadWorkerState::LLVMing;
- spawn_work(cgcx, item);
} else {
// There is no unstarted work, so let the main thread
// take over for a running worker. Otherwise the
@@ -1400,16 +1416,16 @@ fn start_executing_work<B: ExtraBackendMethods>(
// We reduce the `running` counter by one. The
// `tokens.truncate()` below will take care of
// giving the Token back.
- debug_assert!(running > 0);
- running -= 1;
- main_thread_worker_state = MainThreadWorkerState::LLVMing;
+ debug_assert!(running_with_own_token > 0);
+ running_with_own_token -= 1;
+ main_thread_state = MainThreadState::Lending;
}
}
- MainThreadWorkerState::Codegenning => bug!(
+ MainThreadState::Codegenning => bug!(
"codegen worker should not be codegenning after \
codegen was already completed"
),
- MainThreadWorkerState::LLVMing => {
+ MainThreadState::Lending => {
// Already making good use of that token
}
}
@@ -1417,35 +1433,39 @@ fn start_executing_work<B: ExtraBackendMethods>(
// Don't queue up any more work if codegen was aborted, we're
// just waiting for our existing children to finish.
assert!(codegen_state == Aborted);
+ if running_with_any_token(main_thread_state, running_with_own_token) == 0 {
+ break;
+ }
}
// Spin up what work we can, only doing this while we've got available
// parallelism slots and work left to spawn.
- while codegen_state != Aborted && !work_items.is_empty() && running < tokens.len() {
- let (item, _) = work_items.pop().unwrap();
-
- maybe_start_llvm_timer(prof, cgcx.config(item.module_kind()), &mut llvm_start_time);
-
- let cgcx =
- CodegenContext { worker: get_worker_id(&mut free_worker_ids), ..cgcx.clone() };
-
- spawn_work(cgcx, item);
- running += 1;
+ if codegen_state != Aborted {
+ while !work_items.is_empty() && running_with_own_token < tokens.len() {
+ let (item, _) = work_items.pop().unwrap();
+ spawn_work(
+ &cgcx,
+ &mut llvm_start_time,
+ get_worker_id(&mut free_worker_ids),
+ item,
+ );
+ running_with_own_token += 1;
+ }
}
- // Relinquish accidentally acquired extra tokens
- tokens.truncate(running);
+ // Relinquish accidentally acquired extra tokens.
+ tokens.truncate(running_with_own_token);
// If a thread exits successfully then we drop a token associated
- // with that worker and update our `running` count. We may later
- // re-acquire a token to continue running more work. We may also not
- // actually drop a token here if the worker was running with an
- // "ephemeral token"
+ // with that worker and update our `running_with_own_token` count.
+ // We may later re-acquire a token to continue running more work.
+ // We may also not actually drop a token here if the worker was
+ // running with an "ephemeral token".
let mut free_worker = |worker_id| {
- if main_thread_worker_state == MainThreadWorkerState::LLVMing {
- main_thread_worker_state = MainThreadWorkerState::Idle;
+ if main_thread_state == MainThreadState::Lending {
+ main_thread_state = MainThreadState::Idle;
} else {
- running -= 1;
+ running_with_own_token -= 1;
}
free_worker_ids.push(worker_id);
@@ -1461,17 +1481,17 @@ fn start_executing_work<B: ExtraBackendMethods>(
Ok(token) => {
tokens.push(token);
- if main_thread_worker_state == MainThreadWorkerState::LLVMing {
+ if main_thread_state == MainThreadState::Lending {
// If the main thread token is used for LLVM work
// at the moment, we turn that thread into a regular
// LLVM worker thread, so the main thread is free
// to react to codegen demand.
- main_thread_worker_state = MainThreadWorkerState::Idle;
- running += 1;
+ main_thread_state = MainThreadState::Idle;
+ running_with_own_token += 1;
}
}
Err(e) => {
- let msg = &format!("failed to acquire jobserver token: {}", e);
+ let msg = &format!("failed to acquire jobserver token: {e}");
shared_emitter.fatal(msg);
codegen_state = Aborted;
}
@@ -1496,16 +1516,16 @@ fn start_executing_work<B: ExtraBackendMethods>(
if !cgcx.opts.unstable_opts.no_parallel_llvm {
helper.request_token();
}
- assert_eq!(main_thread_worker_state, MainThreadWorkerState::Codegenning);
- main_thread_worker_state = MainThreadWorkerState::Idle;
+ assert_eq!(main_thread_state, MainThreadState::Codegenning);
+ main_thread_state = MainThreadState::Idle;
}
Message::CodegenComplete => {
if codegen_state != Aborted {
codegen_state = Completed;
}
- assert_eq!(main_thread_worker_state, MainThreadWorkerState::Codegenning);
- main_thread_worker_state = MainThreadWorkerState::Idle;
+ assert_eq!(main_thread_state, MainThreadState::Codegenning);
+ main_thread_state = MainThreadState::Idle;
}
// If codegen is aborted that means translation was aborted due
@@ -1513,7 +1533,8 @@ fn start_executing_work<B: ExtraBackendMethods>(
// to exit as soon as possible, but we want to make sure all
// existing work has finished. Flag codegen as being done, and
// then conditions above will ensure no more work is spawned but
- // we'll keep executing this loop until `running` hits 0.
+ // we'll keep executing this loop until `running_with_own_token`
+ // hits 0.
Message::CodegenAborted => {
codegen_state = Aborted;
}
@@ -1522,9 +1543,10 @@ fn start_executing_work<B: ExtraBackendMethods>(
free_worker(worker_id);
match result {
- Ok(WorkItemResult::Compiled(compiled_module)) => {
+ Ok(WorkItemResult::Finished(compiled_module)) => {
match compiled_module.kind {
ModuleKind::Regular => {
+ assert!(needs_link.is_empty());
compiled_modules.push(compiled_module);
}
ModuleKind::Allocator => {
@@ -1535,14 +1557,17 @@ fn start_executing_work<B: ExtraBackendMethods>(
}
}
Ok(WorkItemResult::NeedsLink(module)) => {
+ assert!(compiled_modules.is_empty());
needs_link.push(module);
}
- Ok(WorkItemResult::NeedsFatLTO(fat_lto_input)) => {
+ Ok(WorkItemResult::NeedsFatLto(fat_lto_input)) => {
assert!(!started_lto);
+ assert!(needs_thin_lto.is_empty());
needs_fat_lto.push(fat_lto_input);
}
- Ok(WorkItemResult::NeedsThinLTO(name, thin_buffer)) => {
+ Ok(WorkItemResult::NeedsThinLto(name, thin_buffer)) => {
assert!(!started_lto);
+ assert!(needs_fat_lto.is_empty());
needs_thin_lto.push((name, thin_buffer));
}
Err(Some(WorkerFatalError)) => {
@@ -1560,9 +1585,9 @@ fn start_executing_work<B: ExtraBackendMethods>(
Message::AddImportOnlyModule { module_data, work_product } => {
assert!(!started_lto);
assert_eq!(codegen_state, Ongoing);
- assert_eq!(main_thread_worker_state, MainThreadWorkerState::Codegenning);
+ assert_eq!(main_thread_state, MainThreadState::Codegenning);
lto_import_only_modules.push((module_data, work_product));
- main_thread_worker_state = MainThreadWorkerState::Idle;
+ main_thread_state = MainThreadState::Idle;
}
}
}
@@ -1595,7 +1620,8 @@ fn start_executing_work<B: ExtraBackendMethods>(
modules: compiled_modules,
allocator_module: compiled_allocator_module,
})
- });
+ })
+ .expect("failed to spawn coordinator thread");
// A heuristic that determines if we have enough LLVM WorkItems in the
// queue so that the main thread can do LLVM work instead of codegen
@@ -1653,23 +1679,24 @@ fn start_executing_work<B: ExtraBackendMethods>(
let quarter_of_workers = workers_running - 3 * workers_running / 4;
items_in_queue > 0 && items_in_queue >= quarter_of_workers
}
-
- fn maybe_start_llvm_timer<'a>(
- prof: &'a SelfProfilerRef,
- config: &ModuleConfig,
- llvm_start_time: &mut Option<VerboseTimingGuard<'a>>,
- ) {
- if config.time_module && llvm_start_time.is_none() {
- *llvm_start_time = Some(prof.verbose_generic_activity("LLVM_passes"));
- }
- }
}
/// `FatalError` is explicitly not `Send`.
#[must_use]
pub struct WorkerFatalError;
-fn spawn_work<B: ExtraBackendMethods>(cgcx: CodegenContext<B>, work: WorkItem<B>) {
+fn spawn_work<'a, B: ExtraBackendMethods>(
+ cgcx: &'a CodegenContext<B>,
+ llvm_start_time: &mut Option<VerboseTimingGuard<'a>>,
+ worker_id: usize,
+ work: WorkItem<B>,
+) {
+ if cgcx.config(work.module_kind()).time_module && llvm_start_time.is_none() {
+ *llvm_start_time = Some(cgcx.prof.verbose_generic_activity("LLVM_passes"));
+ }
+
+ let cgcx = cgcx.clone();
+
B::spawn_named_thread(cgcx.time_trace, work.short_description(), move || {
// Set up a destructor which will fire off a message that we're done as
// we exit.
@@ -1692,11 +1719,8 @@ fn spawn_work<B: ExtraBackendMethods>(cgcx: CodegenContext<B>, work: WorkItem<B>
}
}
- let mut bomb = Bomb::<B> {
- coordinator_send: cgcx.coordinator_send.clone(),
- result: None,
- worker_id: cgcx.worker,
- };
+ let mut bomb =
+ Bomb::<B> { coordinator_send: cgcx.coordinator_send.clone(), result: None, worker_id };
// Execute the work itself, and if it finishes successfully then flag
// ourselves as a success as well.
@@ -1728,7 +1752,7 @@ fn spawn_work<B: ExtraBackendMethods>(cgcx: CodegenContext<B>, work: WorkItem<B>
})
};
})
- .expect("failed to spawn thread");
+ .expect("failed to spawn work thread");
}
enum SharedEmitterMessage {
@@ -1945,6 +1969,10 @@ impl<B: ExtraBackendMethods> OngoingCodegen<B> {
self.backend.print_pass_timings()
}
+ if sess.print_llvm_stats() {
+ self.backend.print_statistics()
+ }
+
(
CodegenResults {
metadata: self.metadata,
@@ -1958,19 +1986,6 @@ impl<B: ExtraBackendMethods> OngoingCodegen<B> {
)
}
- pub fn submit_pre_codegened_module_to_llvm(
- &self,
- tcx: TyCtxt<'_>,
- module: ModuleCodegen<B::Module>,
- ) {
- self.wait_for_signal_to_codegen_item();
- self.check_for_errors(tcx.sess);
-
- // These are generally cheap and won't throw off scheduling.
- let cost = 0;
- submit_codegened_module_to_llvm(&self.backend, &self.coordinator.sender, module, cost);
- }
-
pub fn codegen_finished(&self, tcx: TyCtxt<'_>) {
self.wait_for_signal_to_codegen_item();
self.check_for_errors(tcx.sess);
@@ -2036,8 +2051,8 @@ pub fn submit_pre_lto_module_to_llvm<B: ExtraBackendMethods>(
})));
}
-pub fn pre_lto_bitcode_filename(module_name: &str) -> String {
- format!("{}.{}", module_name, PRE_LTO_BC_EXT)
+fn pre_lto_bitcode_filename(module_name: &str) -> String {
+ format!("{module_name}.{PRE_LTO_BC_EXT}")
}
fn msvc_imps_needed(tcx: TyCtxt<'_>) -> bool {
@@ -2050,7 +2065,7 @@ fn msvc_imps_needed(tcx: TyCtxt<'_>) -> bool {
);
tcx.sess.target.is_like_windows &&
- tcx.sess.crate_types().iter().any(|ct| *ct == CrateType::Rlib) &&
+ tcx.crate_types().iter().any(|ct| *ct == CrateType::Rlib) &&
// ThinLTO can't handle this workaround in all cases, so we don't
// emit the `__imp_` symbols. Instead we make them unnecessary by disallowing
// dynamic linking when linker plugin LTO is enabled.
diff --git a/compiler/rustc_codegen_ssa/src/base.rs b/compiler/rustc_codegen_ssa/src/base.rs
index 9133601ec..aa003e4e8 100644
--- a/compiler/rustc_codegen_ssa/src/base.rs
+++ b/compiler/rustc_codegen_ssa/src/base.rs
@@ -38,6 +38,7 @@ use rustc_span::symbol::sym;
use rustc_span::Symbol;
use rustc_target::abi::{Align, FIRST_VARIANT};
+use std::cmp;
use std::collections::BTreeSet;
use std::time::{Duration, Instant};
@@ -164,50 +165,27 @@ pub fn unsized_info<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>>(
cx.tcx().vtable_trait_upcasting_coercion_new_vptr_slot((source, target));
if let Some(entry_idx) = vptr_entry_idx {
- let ptr_ty = cx.type_i8p();
+ let ptr_ty = cx.type_ptr();
let ptr_align = cx.tcx().data_layout.pointer_align.abi;
- let vtable_ptr_ty = vtable_ptr_ty(cx, target, target_dyn_kind);
- let llvtable = bx.pointercast(old_info, bx.type_ptr_to(ptr_ty));
let gep = bx.inbounds_gep(
ptr_ty,
- llvtable,
+ old_info,
&[bx.const_usize(u64::try_from(entry_idx).unwrap())],
);
let new_vptr = bx.load(ptr_ty, gep, ptr_align);
bx.nonnull_metadata(new_vptr);
// VTable loads are invariant.
bx.set_invariant_load(new_vptr);
- bx.pointercast(new_vptr, vtable_ptr_ty)
+ new_vptr
} else {
old_info
}
}
- (_, &ty::Dynamic(ref data, _, target_dyn_kind)) => {
- let vtable_ptr_ty = vtable_ptr_ty(cx, target, target_dyn_kind);
- cx.const_ptrcast(meth::get_vtable(cx, source, data.principal()), vtable_ptr_ty)
- }
+ (_, &ty::Dynamic(ref data, _, _)) => meth::get_vtable(cx, source, data.principal()),
_ => bug!("unsized_info: invalid unsizing {:?} -> {:?}", source, target),
}
}
-// Returns the vtable pointer type of a `dyn` or `dyn*` type
-fn vtable_ptr_ty<'tcx, Cx: CodegenMethods<'tcx>>(
- cx: &Cx,
- target: Ty<'tcx>,
- kind: ty::DynKind,
-) -> <Cx as BackendTypes>::Type {
- cx.scalar_pair_element_backend_type(
- cx.layout_of(match kind {
- // vtable is the second field of `*mut dyn Trait`
- ty::Dyn => Ty::new_mut_ptr(cx.tcx(), target),
- // vtable is the second field of `dyn* Trait`
- ty::DynStar => target,
- }),
- 1,
- true,
- )
-}
-
/// Coerces `src` to `dst_ty`. `src_ty` must be a pointer.
pub fn unsize_ptr<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>>(
bx: &mut Bx,
@@ -221,8 +199,7 @@ pub fn unsize_ptr<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>>(
(&ty::Ref(_, a, _), &ty::Ref(_, b, _) | &ty::RawPtr(ty::TypeAndMut { ty: b, .. }))
| (&ty::RawPtr(ty::TypeAndMut { ty: a, .. }), &ty::RawPtr(ty::TypeAndMut { ty: b, .. })) => {
assert_eq!(bx.cx().type_is_sized(a), old_info.is_none());
- let ptr_ty = bx.cx().type_ptr_to(bx.cx().backend_type(bx.cx().layout_of(b)));
- (bx.pointercast(src, ptr_ty), unsized_info(bx, a, b, old_info))
+ (src, unsized_info(bx, a, b, old_info))
}
(&ty::Adt(def_a, _), &ty::Adt(def_b, _)) => {
assert_eq!(def_a, def_b);
@@ -247,11 +224,7 @@ pub fn unsize_ptr<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>>(
assert_eq!(result, None);
result = Some(unsize_ptr(bx, src, src_f.ty, dst_f.ty, old_info));
}
- let (lldata, llextra) = result.unwrap();
- let lldata_ty = bx.cx().scalar_pair_element_backend_type(dst_layout, 0, true);
- let llextra_ty = bx.cx().scalar_pair_element_backend_type(dst_layout, 1, true);
- // HACK(eddyb) have to bitcast pointers until LLVM removes pointee types.
- (bx.bitcast(lldata, lldata_ty), bx.bitcast(llextra, llextra_ty))
+ result.unwrap()
}
_ => bug!("unsize_ptr: called on bad types"),
}
@@ -270,11 +243,9 @@ pub fn cast_to_dyn_star<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>>(
matches!(dst_ty.kind(), ty::Dynamic(_, _, ty::DynStar)),
"destination type must be a dyn*"
);
- // FIXME(dyn-star): We can remove this when all supported LLVMs use opaque ptrs only.
- let unit_ptr = bx.cx().type_ptr_to(bx.cx().type_struct(&[], false));
let src = match bx.cx().type_kind(bx.cx().backend_type(src_ty_and_layout)) {
- TypeKind::Pointer => bx.pointercast(src, unit_ptr),
- TypeKind::Integer => bx.inttoptr(src, unit_ptr),
+ TypeKind::Pointer => src,
+ TypeKind::Integer => bx.inttoptr(src, bx.type_ptr()),
// FIXME(dyn-star): We probably have to do a bitcast first, then inttoptr.
kind => bug!("unexpected TypeKind for left-hand side of `dyn*` cast: {kind:?}"),
};
@@ -397,11 +368,6 @@ pub fn memcpy_ty<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>>(
if flags == MemFlags::empty()
&& let Some(bty) = bx.cx().scalar_copy_backend_type(layout)
{
- // I look forward to only supporting opaque pointers
- let pty = bx.type_ptr_to(bty);
- let src = bx.pointercast(src, pty);
- let dst = bx.pointercast(dst, pty);
-
let temp = bx.load(bty, src, src_align);
bx.store(temp, dst, dst_align);
} else {
@@ -455,7 +421,7 @@ pub fn maybe_create_entry_wrapper<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>>(
// The entry function is either `int main(void)` or `int main(int argc, char **argv)`,
// depending on whether the target needs `argc` and `argv` to be passed in.
let llfty = if cx.sess().target.main_needs_argc_argv {
- cx.type_func(&[cx.type_int(), cx.type_ptr_to(cx.type_i8p())], cx.type_int())
+ cx.type_func(&[cx.type_int(), cx.type_ptr()], cx.type_int())
} else {
cx.type_func(&[], cx.type_int())
};
@@ -489,7 +455,7 @@ pub fn maybe_create_entry_wrapper<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>>(
bx.insert_reference_to_gdb_debug_scripts_section_global();
let isize_ty = cx.type_isize();
- let i8pp_ty = cx.type_ptr_to(cx.type_i8p());
+ let ptr_ty = cx.type_ptr();
let (arg_argc, arg_argv) = get_argc_argv(cx, &mut bx);
let (start_fn, start_ty, args) = if let EntryFnType::Main { sigpipe } = entry_type {
@@ -499,7 +465,7 @@ pub fn maybe_create_entry_wrapper<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>>(
cx.tcx(),
ty::ParamEnv::reveal_all(),
start_def_id,
- cx.tcx().mk_substs(&[main_ret_ty.into()]),
+ cx.tcx().mk_args(&[main_ret_ty.into()]),
)
.unwrap()
.unwrap(),
@@ -508,12 +474,11 @@ pub fn maybe_create_entry_wrapper<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>>(
let i8_ty = cx.type_i8();
let arg_sigpipe = bx.const_u8(sigpipe);
- let start_ty =
- cx.type_func(&[cx.val_ty(rust_main), isize_ty, i8pp_ty, i8_ty], isize_ty);
+ let start_ty = cx.type_func(&[cx.val_ty(rust_main), isize_ty, ptr_ty, i8_ty], isize_ty);
(start_fn, start_ty, vec![rust_main, arg_argc, arg_argv, arg_sigpipe])
} else {
debug!("using user-defined start fn");
- let start_ty = cx.type_func(&[isize_ty, i8pp_ty], isize_ty);
+ let start_ty = cx.type_func(&[isize_ty, ptr_ty], isize_ty);
(rust_main, start_ty, vec![arg_argc, arg_argv])
};
@@ -540,7 +505,7 @@ fn get_argc_argv<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>>(
} else {
// The Rust start function doesn't need `argc` and `argv`, so just pass zeros.
let arg_argc = bx.const_int(cx.type_int(), 0);
- let arg_argv = bx.const_null(cx.type_ptr_to(cx.type_i8p()));
+ let arg_argv = bx.const_null(cx.type_ptr());
(arg_argc, arg_argv)
}
}
@@ -663,9 +628,16 @@ pub fn codegen_crate<B: ExtraBackendMethods>(
)
});
- ongoing_codegen.submit_pre_codegened_module_to_llvm(
- tcx,
+ ongoing_codegen.wait_for_signal_to_codegen_item();
+ ongoing_codegen.check_for_errors(tcx.sess);
+
+ // These modules are generally cheap and won't throw off scheduling.
+ let cost = 0;
+ submit_codegened_module_to_llvm(
+ &backend,
+ &ongoing_codegen.coordinator.sender,
ModuleCodegen { name: llmod_id, module_llvm, kind: ModuleKind::Allocator },
+ cost,
);
}
@@ -682,10 +654,10 @@ pub fn codegen_crate<B: ExtraBackendMethods>(
// are large size variations, this can reduce memory usage significantly.
let codegen_units: Vec<_> = {
let mut sorted_cgus = codegen_units.iter().collect::<Vec<_>>();
- sorted_cgus.sort_by_cached_key(|cgu| cgu.size_estimate());
+ sorted_cgus.sort_by_key(|cgu| cmp::Reverse(cgu.size_estimate()));
let (first_half, second_half) = sorted_cgus.split_at(sorted_cgus.len() / 2);
- second_half.iter().rev().interleave(first_half).copied().collect()
+ first_half.iter().interleave(second_half.iter().rev()).copied().collect()
};
// Calculate the CGU reuse
@@ -760,7 +732,6 @@ pub fn codegen_crate<B: ExtraBackendMethods>(
module,
cost,
);
- false
}
CguReuse::PreLto => {
submit_pre_lto_module_to_llvm(
@@ -772,7 +743,6 @@ pub fn codegen_crate<B: ExtraBackendMethods>(
source: cgu.previous_work_product(tcx),
},
);
- true
}
CguReuse::PostLto => {
submit_post_lto_module_to_llvm(
@@ -783,9 +753,8 @@ pub fn codegen_crate<B: ExtraBackendMethods>(
source: cgu.previous_work_product(tcx),
},
);
- true
}
- };
+ }
}
ongoing_codegen.codegen_finished(tcx);
@@ -810,18 +779,13 @@ pub fn codegen_crate<B: ExtraBackendMethods>(
impl CrateInfo {
pub fn new(tcx: TyCtxt<'_>, target_cpu: String) -> CrateInfo {
- let exported_symbols = tcx
- .sess
- .crate_types()
+ let crate_types = tcx.crate_types().to_vec();
+ let exported_symbols = crate_types
.iter()
.map(|&c| (c, crate::back::linker::exported_symbols(tcx, c)))
.collect();
- let linked_symbols = tcx
- .sess
- .crate_types()
- .iter()
- .map(|&c| (c, crate::back::linker::linked_symbols(tcx, c)))
- .collect();
+ let linked_symbols =
+ crate_types.iter().map(|&c| (c, crate::back::linker::linked_symbols(tcx, c))).collect();
let local_crate_name = tcx.crate_name(LOCAL_CRATE);
let crate_attrs = tcx.hir().attrs(rustc_hir::CRATE_HIR_ID);
let subsystem = attr::first_attr_value_str_by_name(crate_attrs, sym::windows_subsystem);
@@ -860,6 +824,7 @@ impl CrateInfo {
let mut info = CrateInfo {
target_cpu,
+ crate_types,
exported_symbols,
linked_symbols,
local_crate_name,
@@ -947,7 +912,7 @@ impl CrateInfo {
});
}
- let embed_visualizers = tcx.sess.crate_types().iter().any(|&crate_type| match crate_type {
+ let embed_visualizers = tcx.crate_types().iter().any(|&crate_type| match crate_type {
CrateType::Executable | CrateType::Dylib | CrateType::Cdylib => {
// These are crate types for which we invoke the linker and can embed
// NatVis visualizers.
@@ -1044,7 +1009,7 @@ fn determine_cgu_reuse<'tcx>(tcx: TyCtxt<'tcx>, cgu: &CodegenUnit<'tcx>) -> CguR
match compute_per_cgu_lto_type(
&tcx.sess.lto(),
&tcx.sess.opts,
- &tcx.sess.crate_types(),
+ tcx.crate_types(),
ModuleKind::Regular,
) {
ComputedLtoType::No => CguReuse::PostLto,
diff --git a/compiler/rustc_codegen_ssa/src/codegen_attrs.rs b/compiler/rustc_codegen_ssa/src/codegen_attrs.rs
index d6c230127..f6936c80b 100644
--- a/compiler/rustc_codegen_ssa/src/codegen_attrs.rs
+++ b/compiler/rustc_codegen_ssa/src/codegen_attrs.rs
@@ -1,4 +1,4 @@
-use rustc_ast::{ast, MetaItemKind, NestedMetaItem};
+use rustc_ast::{ast, attr, MetaItemKind, NestedMetaItem};
use rustc_attr::{list_contains_name, InlineAttr, InstructionSetAttr, OptimizeAttr};
use rustc_errors::struct_span_err;
use rustc_hir as hir;
@@ -60,6 +60,14 @@ fn codegen_fn_attrs(tcx: TyCtxt<'_>, did: LocalDefId) -> CodegenFnAttrs {
codegen_fn_attrs.flags |= CodegenFnAttrFlags::TRACK_CALLER;
}
+ // When `no_builtins` is applied at the crate level, we should add the
+ // `no-builtins` attribute to each function to ensure it takes effect in LTO.
+ let crate_attrs = tcx.hir().attrs(rustc_hir::CRATE_HIR_ID);
+ let no_builtins = attr::contains_name(crate_attrs, sym::no_builtins);
+ if no_builtins {
+ codegen_fn_attrs.flags |= CodegenFnAttrFlags::NO_BUILTINS;
+ }
+
let supported_target_features = tcx.supported_target_features(LOCAL_CRATE);
let mut inline_span = None;
@@ -207,14 +215,19 @@ fn codegen_fn_attrs(tcx: TyCtxt<'_>, did: LocalDefId) -> CodegenFnAttrs {
}
sym::thread_local => codegen_fn_attrs.flags |= CodegenFnAttrFlags::THREAD_LOCAL,
sym::track_caller => {
- if !tcx.is_closure(did.to_def_id())
+ let is_closure = tcx.is_closure(did.to_def_id());
+
+ if !is_closure
&& let Some(fn_sig) = fn_sig()
&& fn_sig.skip_binder().abi() != abi::Abi::Rust
{
struct_span_err!(tcx.sess, attr.span, E0737, "`#[track_caller]` requires Rust ABI")
.emit();
}
- if tcx.is_closure(did.to_def_id()) && !tcx.features().closure_track_caller {
+ if is_closure
+ && !tcx.features().closure_track_caller
+ && !attr.span.allows_unstable(sym::closure_track_caller)
+ {
feature_err(
&tcx.sess.parse_sess,
sym::closure_track_caller,
@@ -493,7 +506,22 @@ fn codegen_fn_attrs(tcx: TyCtxt<'_>, did: LocalDefId) -> CodegenFnAttrs {
});
// #73631: closures inherit `#[target_feature]` annotations
- if tcx.features().target_feature_11 && tcx.is_closure(did.to_def_id()) {
+ //
+ // If this closure is marked `#[inline(always)]`, simply skip adding `#[target_feature]`.
+ //
+ // At this point, `unsafe` has already been checked and `#[target_feature]` only affects codegen.
+ // Emitting both `#[inline(always)]` and `#[target_feature]` can potentially result in an
+ // ICE, because LLVM errors when the function fails to be inlined due to a target feature
+ // mismatch.
+ //
+ // Using `#[inline(always)]` implies that this closure will most likely be inlined into
+ // its parent function, which effectively inherits the features anyway. Boxing this closure
+ // would result in this closure being compiled without the inherited target features, but this
+ // is probably a poor usage of `#[inline(always)]` and easily avoided by not using the attribute.
+ if tcx.features().target_feature_11
+ && tcx.is_closure(did.to_def_id())
+ && codegen_fn_attrs.inline != InlineAttr::Always
+ {
let owner_id = tcx.parent(did.to_def_id());
if tcx.def_kind(owner_id).has_codegen_attrs() {
codegen_fn_attrs
diff --git a/compiler/rustc_codegen_ssa/src/debuginfo/type_names.rs b/compiler/rustc_codegen_ssa/src/debuginfo/type_names.rs
index e91f7b86e..067c824ab 100644
--- a/compiler/rustc_codegen_ssa/src/debuginfo/type_names.rs
+++ b/compiler/rustc_codegen_ssa/src/debuginfo/type_names.rs
@@ -17,8 +17,8 @@ use rustc_hir::def_id::DefId;
use rustc_hir::definitions::{DefPathData, DefPathDataName, DisambiguatedDefPathData};
use rustc_hir::{AsyncGeneratorKind, GeneratorKind, Mutability};
use rustc_middle::ty::layout::{IntegerExt, TyAndLayout};
-use rustc_middle::ty::subst::{GenericArgKind, SubstsRef};
use rustc_middle::ty::{self, ExistentialProjection, ParamEnv, Ty, TyCtxt};
+use rustc_middle::ty::{GenericArgKind, GenericArgsRef};
use rustc_target::abi::Integer;
use smallvec::SmallVec;
@@ -77,7 +77,7 @@ fn push_debuginfo_type_name<'tcx>(
ty::Uint(uint_ty) => output.push_str(uint_ty.name_str()),
ty::Float(float_ty) => output.push_str(float_ty.name_str()),
ty::Foreign(def_id) => push_item_name(tcx, def_id, qualified, output),
- ty::Adt(def, substs) => {
+ ty::Adt(def, args) => {
// `layout_for_cpp_like_fallback` will be `Some` if we want to use the fallback encoding.
let layout_for_cpp_like_fallback = if cpp_like_debuginfo && def.is_enum() {
match tcx.layout_of(ParamEnv::reveal_all().and(t)) {
@@ -106,14 +106,14 @@ fn push_debuginfo_type_name<'tcx>(
ty_and_layout,
&|output, visited| {
push_item_name(tcx, def.did(), true, output);
- push_generic_params_internal(tcx, substs, output, visited);
+ push_generic_params_internal(tcx, args, output, visited);
},
output,
visited,
);
} else {
push_item_name(tcx, def.did(), qualified, output);
- push_generic_params_internal(tcx, substs, output, visited);
+ push_generic_params_internal(tcx, args, output, visited);
}
}
ty::Tuple(component_types) => {
@@ -238,7 +238,7 @@ fn push_debuginfo_type_name<'tcx>(
tcx.normalize_erasing_late_bound_regions(ty::ParamEnv::reveal_all(), principal);
push_item_name(tcx, principal.def_id, qualified, output);
let principal_has_generic_params =
- push_generic_params_internal(tcx, principal.substs, output, visited);
+ push_generic_params_internal(tcx, principal.args, output, visited);
let projection_bounds: SmallVec<[_; 4]> = trait_data
.projection_bounds()
@@ -393,7 +393,7 @@ fn push_debuginfo_type_name<'tcx>(
// processing
visited.remove(&t);
}
- ty::Closure(def_id, substs) | ty::Generator(def_id, substs, ..) => {
+ ty::Closure(def_id, args) | ty::Generator(def_id, args, ..) => {
// Name will be "{closure_env#0}<T1, T2, ...>", "{generator_env#0}<T1, T2, ...>", or
// "{async_fn_env#0}<T1, T2, ...>", etc.
// In the case of cpp-like debuginfo, the name additionally gets wrapped inside of
@@ -403,18 +403,18 @@ fn push_debuginfo_type_name<'tcx>(
msvc_enum_fallback(
ty_and_layout,
&|output, visited| {
- push_closure_or_generator_name(tcx, def_id, substs, true, output, visited);
+ push_closure_or_generator_name(tcx, def_id, args, true, output, visited);
},
output,
visited,
);
} else {
- push_closure_or_generator_name(tcx, def_id, substs, qualified, output, visited);
+ push_closure_or_generator_name(tcx, def_id, args, qualified, output, visited);
}
}
// Type parameters from polymorphized functions.
ty::Param(_) => {
- write!(output, "{:?}", t).unwrap();
+ write!(output, "{t:?}").unwrap();
}
ty::Error(_)
| ty::Infer(_)
@@ -516,7 +516,7 @@ pub fn compute_debuginfo_vtable_name<'tcx>(
tcx.normalize_erasing_late_bound_regions(ty::ParamEnv::reveal_all(), trait_ref);
push_item_name(tcx, trait_ref.def_id, true, &mut vtable_name);
visited.clear();
- push_generic_params_internal(tcx, trait_ref.substs, &mut vtable_name, &mut visited);
+ push_generic_params_internal(tcx, trait_ref.args, &mut vtable_name, &mut visited);
} else {
vtable_name.push('_');
}
@@ -565,9 +565,9 @@ fn push_disambiguated_special_name(
output: &mut String,
) {
if cpp_like_debuginfo {
- write!(output, "{}${}", label, disambiguator).unwrap();
+ write!(output, "{label}${disambiguator}").unwrap();
} else {
- write!(output, "{{{}#{}}}", label, disambiguator).unwrap();
+ write!(output, "{{{label}#{disambiguator}}}").unwrap();
}
}
@@ -609,21 +609,21 @@ fn push_unqualified_item_name(
fn push_generic_params_internal<'tcx>(
tcx: TyCtxt<'tcx>,
- substs: SubstsRef<'tcx>,
+ args: GenericArgsRef<'tcx>,
output: &mut String,
visited: &mut FxHashSet<Ty<'tcx>>,
) -> bool {
- if substs.non_erasable_generics().next().is_none() {
+ if args.non_erasable_generics().next().is_none() {
return false;
}
- debug_assert_eq!(substs, tcx.normalize_erasing_regions(ty::ParamEnv::reveal_all(), substs));
+ debug_assert_eq!(args, tcx.normalize_erasing_regions(ty::ParamEnv::reveal_all(), args));
let cpp_like_debuginfo = cpp_like_debuginfo(tcx);
output.push('<');
- for type_parameter in substs.non_erasable_generics() {
+ for type_parameter in args.non_erasable_generics() {
match type_parameter {
GenericArgKind::Type(type_parameter) => {
push_debuginfo_type_name(tcx, type_parameter, true, output, visited);
@@ -651,15 +651,15 @@ fn push_const_param<'tcx>(tcx: TyCtxt<'tcx>, ct: ty::Const<'tcx>, output: &mut S
ty::Int(ity) => {
let bits = ct.eval_bits(tcx, ty::ParamEnv::reveal_all(), ct.ty());
let val = Integer::from_int_ty(&tcx, *ity).size().sign_extend(bits) as i128;
- write!(output, "{}", val)
+ write!(output, "{val}")
}
ty::Uint(_) => {
let val = ct.eval_bits(tcx, ty::ParamEnv::reveal_all(), ct.ty());
- write!(output, "{}", val)
+ write!(output, "{val}")
}
ty::Bool => {
let val = ct.try_eval_bool(tcx, ty::ParamEnv::reveal_all()).unwrap();
- write!(output, "{}", val)
+ write!(output, "{val}")
}
_ => {
// If we cannot evaluate the constant to a known type, we fall back
@@ -678,9 +678,9 @@ fn push_const_param<'tcx>(tcx: TyCtxt<'tcx>, ct: ty::Const<'tcx>, output: &mut S
});
if cpp_like_debuginfo(tcx) {
- write!(output, "CONST${:x}", hash_short)
+ write!(output, "CONST${hash_short:x}")
} else {
- write!(output, "{{CONST#{:x}}}", hash_short)
+ write!(output, "{{CONST#{hash_short:x}}}")
}
}
},
@@ -688,16 +688,20 @@ fn push_const_param<'tcx>(tcx: TyCtxt<'tcx>, ct: ty::Const<'tcx>, output: &mut S
.unwrap();
}
-pub fn push_generic_params<'tcx>(tcx: TyCtxt<'tcx>, substs: SubstsRef<'tcx>, output: &mut String) {
+pub fn push_generic_params<'tcx>(
+ tcx: TyCtxt<'tcx>,
+ args: GenericArgsRef<'tcx>,
+ output: &mut String,
+) {
let _prof = tcx.prof.generic_activity("compute_debuginfo_type_name");
let mut visited = FxHashSet::default();
- push_generic_params_internal(tcx, substs, output, &mut visited);
+ push_generic_params_internal(tcx, args, output, &mut visited);
}
fn push_closure_or_generator_name<'tcx>(
tcx: TyCtxt<'tcx>,
def_id: DefId,
- substs: SubstsRef<'tcx>,
+ args: GenericArgsRef<'tcx>,
qualified: bool,
output: &mut String,
visited: &mut FxHashSet<Ty<'tcx>>,
@@ -731,10 +735,10 @@ fn push_closure_or_generator_name<'tcx>(
let enclosing_fn_def_id = tcx.typeck_root_def_id(def_id);
let generics = tcx.generics_of(enclosing_fn_def_id);
- // Truncate the substs to the length of the above generics. This will cut off
+ // Truncate the args to the length of the above generics. This will cut off
// anything closure- or generator-specific.
- let substs = substs.truncate_to(tcx, generics);
- push_generic_params_internal(tcx, substs, output, visited);
+ let args = args.truncate_to(tcx, generics);
+ push_generic_params_internal(tcx, args, output, visited);
}
fn push_close_angle_bracket(cpp_like_debuginfo: bool, output: &mut String) {
@@ -748,7 +752,7 @@ fn push_close_angle_bracket(cpp_like_debuginfo: bool, output: &mut String) {
}
fn pop_close_angle_bracket(output: &mut String) {
- assert!(output.ends_with('>'), "'output' does not end with '>': {}", output);
+ assert!(output.ends_with('>'), "'output' does not end with '>': {output}");
output.pop();
if output.ends_with(' ') {
output.pop();
diff --git a/compiler/rustc_codegen_ssa/src/errors.rs b/compiler/rustc_codegen_ssa/src/errors.rs
index 056b4abd2..b7d8b9b45 100644
--- a/compiler/rustc_codegen_ssa/src/errors.rs
+++ b/compiler/rustc_codegen_ssa/src/errors.rs
@@ -177,31 +177,31 @@ impl IntoDiagnostic<'_> for ThorinErrorWrapper {
}
thorin::Error::NamelessSection(_, offset) => {
diag = handler.struct_err(fluent::codegen_ssa_thorin_section_without_name);
- diag.set_arg("offset", format!("0x{:08x}", offset));
+ diag.set_arg("offset", format!("0x{offset:08x}"));
diag
}
thorin::Error::RelocationWithInvalidSymbol(section, offset) => {
diag =
handler.struct_err(fluent::codegen_ssa_thorin_relocation_with_invalid_symbol);
diag.set_arg("section", section);
- diag.set_arg("offset", format!("0x{:08x}", offset));
+ diag.set_arg("offset", format!("0x{offset:08x}"));
diag
}
thorin::Error::MultipleRelocations(section, offset) => {
diag = handler.struct_err(fluent::codegen_ssa_thorin_multiple_relocations);
diag.set_arg("section", section);
- diag.set_arg("offset", format!("0x{:08x}", offset));
+ diag.set_arg("offset", format!("0x{offset:08x}"));
diag
}
thorin::Error::UnsupportedRelocation(section, offset) => {
diag = handler.struct_err(fluent::codegen_ssa_thorin_unsupported_relocation);
diag.set_arg("section", section);
- diag.set_arg("offset", format!("0x{:08x}", offset));
+ diag.set_arg("offset", format!("0x{offset:08x}"));
diag
}
thorin::Error::MissingDwoName(id) => {
diag = handler.struct_err(fluent::codegen_ssa_thorin_missing_dwo_name);
- diag.set_arg("id", format!("0x{:08x}", id));
+ diag.set_arg("id", format!("0x{id:08x}"));
diag
}
thorin::Error::NoCompilationUnits => {
@@ -251,7 +251,7 @@ impl IntoDiagnostic<'_> for ThorinErrorWrapper {
}
thorin::Error::StrAtOffset(_, offset) => {
diag = handler.struct_err(fluent::codegen_ssa_thorin_str_at_offset);
- diag.set_arg("offset", format!("0x{:08x}", offset));
+ diag.set_arg("offset", format!("0x{offset:08x}"));
diag
}
thorin::Error::ParseIndex(_, section) => {
@@ -261,7 +261,7 @@ impl IntoDiagnostic<'_> for ThorinErrorWrapper {
}
thorin::Error::UnitNotInIndex(unit) => {
diag = handler.struct_err(fluent::codegen_ssa_thorin_unit_not_in_index);
- diag.set_arg("unit", format!("0x{:08x}", unit));
+ diag.set_arg("unit", format!("0x{unit:08x}"));
diag
}
thorin::Error::RowNotInIndex(_, row) => {
@@ -275,7 +275,7 @@ impl IntoDiagnostic<'_> for ThorinErrorWrapper {
}
thorin::Error::EmptyUnit(unit) => {
diag = handler.struct_err(fluent::codegen_ssa_thorin_empty_unit);
- diag.set_arg("unit", format!("0x{:08x}", unit));
+ diag.set_arg("unit", format!("0x{unit:08x}"));
diag
}
thorin::Error::MultipleDebugInfoSection => {
@@ -292,12 +292,12 @@ impl IntoDiagnostic<'_> for ThorinErrorWrapper {
}
thorin::Error::DuplicateUnit(unit) => {
diag = handler.struct_err(fluent::codegen_ssa_thorin_duplicate_unit);
- diag.set_arg("unit", format!("0x{:08x}", unit));
+ diag.set_arg("unit", format!("0x{unit:08x}"));
diag
}
thorin::Error::MissingReferencedUnit(unit) => {
diag = handler.struct_err(fluent::codegen_ssa_thorin_missing_referenced_unit);
- diag.set_arg("unit", format!("0x{:08x}", unit));
+ diag.set_arg("unit", format!("0x{unit:08x}"));
diag
}
thorin::Error::NoOutputObjectCreated => {
@@ -353,7 +353,7 @@ impl IntoDiagnostic<'_> for LinkingFailed<'_> {
let contains_undefined_ref = self.escaped_output.contains("undefined reference to");
- diag.note(format!("{:?}", self.command)).note(self.escaped_output.to_string());
+ diag.note(format!("{:?}", self.command)).note(self.escaped_output);
// Trying to match an error from OS linkers
// which by now we have no way to translate.
@@ -456,6 +456,12 @@ pub struct LinkerFileStem;
pub struct StaticLibraryNativeArtifacts;
#[derive(Diagnostic)]
+#[diag(codegen_ssa_static_library_native_artifacts_to_file)]
+pub struct StaticLibraryNativeArtifactsToFile<'a> {
+ pub path: &'a Path,
+}
+
+#[derive(Diagnostic)]
#[diag(codegen_ssa_link_script_unavailable)]
pub struct LinkScriptUnavailable;
diff --git a/compiler/rustc_codegen_ssa/src/lib.rs b/compiler/rustc_codegen_ssa/src/lib.rs
index be4c81638..7bed3fa61 100644
--- a/compiler/rustc_codegen_ssa/src/lib.rs
+++ b/compiler/rustc_codegen_ssa/src/lib.rs
@@ -2,7 +2,6 @@
#![feature(associated_type_bounds)]
#![feature(box_patterns)]
#![feature(if_let_guard)]
-#![feature(int_roundings)]
#![feature(let_chains)]
#![feature(negative_impls)]
#![feature(never_type)]
@@ -150,6 +149,7 @@ impl From<&cstore::NativeLib> for NativeLib {
#[derive(Debug, Encodable, Decodable)]
pub struct CrateInfo {
pub target_cpu: String,
+ pub crate_types: Vec<CrateType>,
pub exported_symbols: FxHashMap<CrateType, Vec<String>>,
pub linked_symbols: FxHashMap<CrateType, Vec<(String, SymbolExportKind)>>,
pub local_crate_name: Symbol,
diff --git a/compiler/rustc_codegen_ssa/src/meth.rs b/compiler/rustc_codegen_ssa/src/meth.rs
index a8b935bd6..12146a54d 100644
--- a/compiler/rustc_codegen_ssa/src/meth.rs
+++ b/compiler/rustc_codegen_ssa/src/meth.rs
@@ -1,6 +1,6 @@
use crate::traits::*;
-use rustc_middle::ty::{self, subst::GenericArgKind, Ty};
+use rustc_middle::ty::{self, GenericArgKind, Ty};
use rustc_session::config::Lto;
use rustc_symbol_mangling::typeid_for_trait_ref;
use rustc_target::abi::call::FnAbi;
@@ -23,7 +23,6 @@ impl<'a, 'tcx> VirtualIndex {
// Load the data pointer from the object.
debug!("get_fn({llvtable:?}, {ty:?}, {self:?})");
let llty = bx.fn_ptr_backend_type(fn_abi);
- let llvtable = bx.pointercast(llvtable, bx.type_ptr_to(llty));
if bx.cx().sess().opts.unstable_opts.virtual_function_elimination
&& bx.cx().sess().lto() == Lto::Fat
@@ -33,7 +32,7 @@ impl<'a, 'tcx> VirtualIndex {
.unwrap();
let vtable_byte_offset = self.0 * bx.data_layout().pointer_size.bytes();
let func = bx.type_checked_load(llvtable, vtable_byte_offset, typeid);
- bx.pointercast(func, llty)
+ func
} else {
let ptr_align = bx.tcx().data_layout.pointer_align.abi;
let gep = bx.inbounds_gep(llty, llvtable, &[bx.const_usize(self.0)]);
@@ -54,7 +53,6 @@ impl<'a, 'tcx> VirtualIndex {
debug!("get_int({:?}, {:?})", llvtable, self);
let llty = bx.type_isize();
- let llvtable = bx.pointercast(llvtable, bx.type_ptr_to(llty));
let usize_align = bx.tcx().data_layout.pointer_align.abi;
let gep = bx.inbounds_gep(llty, llvtable, &[bx.const_usize(self.0)]);
let ptr = bx.load(llty, gep, usize_align);
diff --git a/compiler/rustc_codegen_ssa/src/mir/block.rs b/compiler/rustc_codegen_ssa/src/mir/block.rs
index 9d1b3ce82..4f26383ed 100644
--- a/compiler/rustc_codegen_ssa/src/mir/block.rs
+++ b/compiler/rustc_codegen_ssa/src/mir/block.rs
@@ -23,6 +23,8 @@ use rustc_target::abi::call::{ArgAbi, FnAbi, PassMode, Reg};
use rustc_target::abi::{self, HasDataLayout, WrappingRange};
use rustc_target::spec::abi::Abi;
+use std::cmp;
+
// Indicates if we are in the middle of merging a BB's successor into it. This
// can happen when BB jumps directly to its successor and the successor has no
// other predecessors.
@@ -437,8 +439,7 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
ZeroSized => bug!("ZST return value shouldn't be in PassMode::Cast"),
};
let ty = bx.cast_backend_type(cast_ty);
- let addr = bx.pointercast(llslot, bx.type_ptr_to(ty));
- bx.load(ty, addr, self.fn_abi.ret.layout.align.abi)
+ bx.load(ty, llslot, self.fn_abi.ret.layout.align.abi)
}
};
bx.ret(llval);
@@ -491,7 +492,7 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
//
let virtual_drop = Instance {
def: ty::InstanceDef::Virtual(drop_fn.def_id(), 0),
- substs: drop_fn.substs,
+ args: drop_fn.args,
};
debug!("ty = {:?}", ty);
debug!("drop_fn = {:?}", drop_fn);
@@ -531,7 +532,7 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
// SO THEN WE CAN USE THE ABOVE CODE.
let virtual_drop = Instance {
def: ty::InstanceDef::Virtual(drop_fn.def_id(), 0),
- substs: drop_fn.substs,
+ args: drop_fn.args,
};
debug!("ty = {:?}", ty);
debug!("drop_fn = {:?}", drop_fn);
@@ -687,7 +688,7 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
// which mentions the offending type, even from a const context.
let panic_intrinsic = intrinsic.and_then(|s| ValidityRequirement::from_intrinsic(s));
if let Some(requirement) = panic_intrinsic {
- let ty = instance.unwrap().substs.type_at(0);
+ let ty = instance.unwrap().args.type_at(0);
let do_panic = !bx
.tcx()
@@ -701,13 +702,12 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
with_no_trimmed_paths!({
if layout.abi.is_uninhabited() {
// Use this error even for the other intrinsics as it is more precise.
- format!("attempted to instantiate uninhabited type `{}`", ty)
+ format!("attempted to instantiate uninhabited type `{ty}`")
} else if requirement == ValidityRequirement::Zero {
- format!("attempted to zero-initialize type `{}`, which is invalid", ty)
+ format!("attempted to zero-initialize type `{ty}`, which is invalid")
} else {
format!(
- "attempted to leave type `{}` uninitialized, which is invalid",
- ty
+ "attempted to leave type `{ty}` uninitialized, which is invalid"
)
}
})
@@ -760,13 +760,13 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
let callee = self.codegen_operand(bx, func);
let (instance, mut llfn) = match *callee.layout.ty.kind() {
- ty::FnDef(def_id, substs) => (
+ ty::FnDef(def_id, args) => (
Some(
ty::Instance::expect_resolve(
bx.tcx(),
ty::ParamEnv::reveal_all(),
def_id,
- substs,
+ args,
)
.polymorphize(bx.tcx()),
),
@@ -851,9 +851,7 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
Some(intrinsic) => {
let dest = match ret_dest {
_ if fn_abi.ret.is_indirect() => llargs[0],
- ReturnDest::Nothing => {
- bx.const_undef(bx.type_ptr_to(bx.arg_memory_ty(&fn_abi.ret)))
- }
+ ReturnDest::Nothing => bx.const_undef(bx.type_ptr()),
ReturnDest::IndirectOperand(dst, _) | ReturnDest::Store(dst) => dst.llval,
ReturnDest::DirectOperand(_) => {
bug!("Cannot use direct operand with an intrinsic call")
@@ -864,11 +862,11 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
.iter()
.enumerate()
.map(|(i, arg)| {
- // The indices passed to simd_shuffle* in the
+ // The indices passed to simd_shuffle in the
// third argument must be constant. This is
// checked by const-qualification, which also
// promotes any complex rvalues to constants.
- if i == 2 && intrinsic.as_str().starts_with("simd_shuffle") {
+ if i == 2 && intrinsic == sym::simd_shuffle {
if let mir::Operand::Constant(constant) = arg {
let (llval, ty) = self.simd_shuffle_indices(&bx, constant);
return OperandRef {
@@ -1043,10 +1041,7 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
assert_eq!(
fn_abi.args.len(),
mir_args + 1,
- "#[track_caller] fn's must have 1 more argument in their ABI than in their MIR: {:?} {:?} {:?}",
- instance,
- fn_span,
- fn_abi,
+ "#[track_caller] fn's must have 1 more argument in their ABI than in their MIR: {instance:?} {fn_span:?} {fn_abi:?}",
);
let location =
self.get_caller_location(bx, mir::SourceInfo { span: fn_span, ..source_info });
@@ -1125,12 +1120,12 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
}
mir::InlineAsmOperand::SymFn { ref value } => {
let literal = self.monomorphize(value.literal);
- if let ty::FnDef(def_id, substs) = *literal.ty().kind() {
+ if let ty::FnDef(def_id, args) = *literal.ty().kind() {
let instance = ty::Instance::resolve_for_fn_ptr(
bx.tcx(),
ty::ParamEnv::reveal_all(),
def_id,
- substs,
+ args,
)
.unwrap();
InlineAsmOperandRef::SymFn { instance }
@@ -1360,36 +1355,58 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
// Force by-ref if we have to load through a cast pointer.
let (mut llval, align, by_ref) = match op.val {
Immediate(_) | Pair(..) => match arg.mode {
- PassMode::Indirect { .. } | PassMode::Cast(..) => {
+ PassMode::Indirect { attrs, .. } => {
+ // Indirect argument may have higher alignment requirements than the type's alignment.
+ // This can happen, e.g. when passing types with <4 byte alignment on the stack on x86.
+ let required_align = match attrs.pointee_align {
+ Some(pointee_align) => cmp::max(pointee_align, arg.layout.align.abi),
+ None => arg.layout.align.abi,
+ };
+ let scratch = PlaceRef::alloca_aligned(bx, arg.layout, required_align);
+ op.val.store(bx, scratch);
+ (scratch.llval, scratch.align, true)
+ }
+ PassMode::Cast(..) => {
let scratch = PlaceRef::alloca(bx, arg.layout);
op.val.store(bx, scratch);
(scratch.llval, scratch.align, true)
}
_ => (op.immediate_or_packed_pair(bx), arg.layout.align.abi, false),
},
- Ref(llval, _, align) => {
- if arg.is_indirect() && align < arg.layout.align.abi {
- // `foo(packed.large_field)`. We can't pass the (unaligned) field directly. I
- // think that ATM (Rust 1.16) we only pass temporaries, but we shouldn't
- // have scary latent bugs around.
-
- let scratch = PlaceRef::alloca(bx, arg.layout);
- base::memcpy_ty(
- bx,
- scratch.llval,
- scratch.align,
- llval,
- align,
- op.layout,
- MemFlags::empty(),
- );
- (scratch.llval, scratch.align, true)
- } else {
- (llval, align, true)
+ Ref(llval, _, align) => match arg.mode {
+ PassMode::Indirect { attrs, .. } => {
+ let required_align = match attrs.pointee_align {
+ Some(pointee_align) => cmp::max(pointee_align, arg.layout.align.abi),
+ None => arg.layout.align.abi,
+ };
+ if align < required_align {
+ // For `foo(packed.large_field)`, and types with <4 byte alignment on x86,
+ // alignment requirements may be higher than the type's alignment, so copy
+ // to a higher-aligned alloca.
+ let scratch = PlaceRef::alloca_aligned(bx, arg.layout, required_align);
+ base::memcpy_ty(
+ bx,
+ scratch.llval,
+ scratch.align,
+ llval,
+ align,
+ op.layout,
+ MemFlags::empty(),
+ );
+ (scratch.llval, scratch.align, true)
+ } else {
+ (llval, align, true)
+ }
}
- }
+ _ => (llval, align, true),
+ },
ZeroSized => match arg.mode {
- PassMode::Indirect { .. } => {
+ PassMode::Indirect { on_stack, .. } => {
+ if on_stack {
+ // It doesn't seem like any target can have `byval` ZSTs, so this assert
+ // is here to replace a would-be untested codepath.
+ bug!("ZST {op:?} passed on stack with abi {arg:?}");
+ }
// Though `extern "Rust"` doesn't pass ZSTs, some ABIs pass
// a pointer for `repr(C)` structs even when empty, so get
// one from an `alloca` (which can be left uninitialized).
@@ -1404,8 +1421,7 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
// Have to load the argument, maybe while casting it.
if let PassMode::Cast(ty, _) = &arg.mode {
let llty = bx.cast_backend_type(ty);
- let addr = bx.pointercast(llval, bx.type_ptr_to(llty));
- llval = bx.load(llty, addr, align.min(arg.layout.align.abi));
+ llval = bx.load(llty, llval, align.min(arg.layout.align.abi));
} else {
// We can't use `PlaceRef::load` here because the argument
// may have a type we don't treat as immediate, but the ABI
@@ -1531,7 +1547,7 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
fn landing_pad_for_uncached(&mut self, bb: mir::BasicBlock) -> Bx::BasicBlock {
let llbb = self.llbb(bb);
if base::wants_new_eh_instructions(self.cx.sess()) {
- let cleanup_bb = Bx::append_block(self.cx, self.llfn, &format!("funclet_{:?}", bb));
+ let cleanup_bb = Bx::append_block(self.cx, self.llfn, &format!("funclet_{bb:?}"));
let mut cleanup_bx = Bx::build(self.cx, cleanup_bb);
let funclet = cleanup_bx.cleanup_pad(None, &[]);
cleanup_bx.br(llbb);
@@ -1610,7 +1626,7 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
// represents that this is a catch-all block.
bx = Bx::build(self.cx, cp_llbb);
let null =
- bx.const_null(bx.type_i8p_ext(bx.cx().data_layout().instruction_address_space));
+ bx.const_null(bx.type_ptr_ext(bx.cx().data_layout().instruction_address_space));
let sixty_four = bx.const_i32(64);
funclet = Some(bx.catch_pad(cs, &[null, sixty_four, null]));
} else {
@@ -1651,7 +1667,7 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
match self.cached_llbbs[bb] {
CachedLlbb::None => {
// FIXME(eddyb) only name the block if `fewer_names` is `false`.
- let llbb = Bx::append_block(self.cx, self.llfn, &format!("{:?}", bb));
+ let llbb = Bx::append_block(self.cx, self.llfn, &format!("{bb:?}"));
self.cached_llbbs[bb] = CachedLlbb::Some(llbb);
Some(llbb)
}
diff --git a/compiler/rustc_codegen_ssa/src/mir/debuginfo.rs b/compiler/rustc_codegen_ssa/src/mir/debuginfo.rs
index 1ee89b3d5..526c16a59 100644
--- a/compiler/rustc_codegen_ssa/src/mir/debuginfo.rs
+++ b/compiler/rustc_codegen_ssa/src/mir/debuginfo.rs
@@ -42,9 +42,6 @@ pub struct PerLocalVarDebugInfo<'tcx, D> {
/// `.place.projection` from `mir::VarDebugInfo`.
pub projection: &'tcx ty::List<mir::PlaceElem<'tcx>>,
-
- /// `references` from `mir::VarDebugInfo`.
- pub references: u8,
}
#[derive(Clone, Copy, Debug)]
@@ -186,7 +183,11 @@ fn calculate_debuginfo_offset<
} => {
let offset = indirect_offsets.last_mut().unwrap_or(&mut direct_offset);
let FieldsShape::Array { stride, count: _ } = place.layout().fields else {
- span_bug!(var.source_info.span, "ConstantIndex on non-array type {:?}", place.layout())
+ span_bug!(
+ var.source_info.span,
+ "ConstantIndex on non-array type {:?}",
+ place.layout()
+ )
};
*offset += stride * index;
place = place.project_constant_index(bx, index);
@@ -319,7 +320,6 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
dbg_var,
fragment: None,
projection: ty::List::empty(),
- references: 0,
})
}
} else {
@@ -328,13 +328,12 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
let local_ref = &self.locals[local];
- // FIXME Should the return place be named?
- let name = if bx.sess().fewer_names() || local == mir::RETURN_PLACE {
+ let name = if bx.sess().fewer_names() {
None
} else {
Some(match whole_local_var.or(fallback_var.clone()) {
Some(var) if var.name != kw::Empty => var.name.to_string(),
- _ => format!("{:?}", local),
+ _ => format!("{local:?}"),
})
};
@@ -396,15 +395,14 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
&self,
bx: &mut Bx,
local: mir::Local,
- mut base: PlaceRef<'tcx, Bx::Value>,
+ base: PlaceRef<'tcx, Bx::Value>,
var: PerLocalVarDebugInfo<'tcx, Bx::DIVariable>,
) {
let Some(dbg_var) = var.dbg_var else { return };
let Some(dbg_loc) = self.dbg_loc(var.source_info) else { return };
- let DebugInfoOffset { mut direct_offset, indirect_offsets, result: _ } =
+ let DebugInfoOffset { direct_offset, indirect_offsets, result: _ } =
calculate_debuginfo_offset(bx, local, &var, base.layout);
- let mut indirect_offsets = &indirect_offsets[..];
// When targeting MSVC, create extra allocas for arguments instead of pointing multiple
// dbg_var_addr() calls into the same alloca with offsets. MSVC uses CodeView records
@@ -418,9 +416,12 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
// LLVM can handle simple things but anything more complex than just a direct
// offset or one indirect offset of 0 is too complex for it to generate CV records
// correctly.
- && (direct_offset != Size::ZERO || !matches!(indirect_offsets, [Size::ZERO] | []));
+ && (direct_offset != Size::ZERO || !matches!(&indirect_offsets[..], [Size::ZERO] | []));
+
+ if should_create_individual_allocas {
+ let DebugInfoOffset { direct_offset: _, indirect_offsets: _, result: place } =
+ calculate_debuginfo_offset(bx, local, &var, base);
- let create_alloca = |bx: &mut Bx, place: PlaceRef<'tcx, Bx::Value>, refcount| {
// Create a variable which will be a pointer to the actual value
let ptr_ty = Ty::new_ptr(
bx.tcx(),
@@ -428,35 +429,30 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
);
let ptr_layout = bx.layout_of(ptr_ty);
let alloca = PlaceRef::alloca(bx, ptr_layout);
- bx.set_var_name(alloca.llval, &format!("{}.ref{}.dbg.spill", var.name, refcount));
+ bx.set_var_name(alloca.llval, &(var.name.to_string() + ".dbg.spill"));
// Write the pointer to the variable
bx.store(place.llval, alloca.llval, alloca.align);
// Point the debug info to `*alloca` for the current variable
- alloca
- };
-
- if var.references > 0 {
- base = calculate_debuginfo_offset(bx, local, &var, base).result;
-
- // Point the debug info to `&...&base == alloca` for the current variable
- for refcount in 0..var.references {
- base = create_alloca(bx, base, refcount);
- }
-
- direct_offset = Size::ZERO;
- indirect_offsets = &[];
- } else if should_create_individual_allocas {
- let place = calculate_debuginfo_offset(bx, local, &var, base).result;
-
- // Point the debug info to `*alloca` for the current variable
- base = create_alloca(bx, place, 0);
- direct_offset = Size::ZERO;
- indirect_offsets = &[Size::ZERO];
+ bx.dbg_var_addr(
+ dbg_var,
+ dbg_loc,
+ alloca.llval,
+ Size::ZERO,
+ &[Size::ZERO],
+ var.fragment,
+ );
+ } else {
+ bx.dbg_var_addr(
+ dbg_var,
+ dbg_loc,
+ base.llval,
+ direct_offset,
+ &indirect_offsets,
+ var.fragment,
+ );
}
-
- bx.dbg_var_addr(dbg_var, dbg_loc, base.llval, direct_offset, indirect_offsets, None);
}
pub fn debug_introduce_locals(&self, bx: &mut Bx) {
@@ -489,7 +485,7 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
};
let dbg_var = dbg_scope_and_span.map(|(dbg_scope, _, span)| {
- let (mut var_ty, var_kind) = match var.value {
+ let (var_ty, var_kind) = match var.value {
mir::VarDebugInfoContents::Place(place) => {
let var_ty = self.monomorphized_place_ty(place.as_ref());
let var_kind = if let Some(arg_index) = var.argument_index
@@ -526,13 +522,6 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
}
};
- for _ in 0..var.references {
- var_ty = Ty::new_ptr(
- bx.tcx(),
- ty::TypeAndMut { mutbl: mir::Mutability::Mut, ty: var_ty },
- );
- }
-
self.cx.create_dbg_var(var.name, var_ty, dbg_scope, var_kind, span)
});
@@ -544,7 +533,6 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
dbg_var,
fragment: None,
projection: place.projection,
- references: var.references,
});
}
mir::VarDebugInfoContents::Const(c) => {
@@ -586,19 +574,23 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
}
let place = fragment.contents;
+ let fragment = if fragment_layout.size == Size::ZERO {
+ // Fragment is a ZST, so does not represent anything.
+ continue;
+ } else if fragment_layout.size == var_layout.size {
+ // Fragment covers entire variable, so as far as
+ // DWARF is concerned, it's not really a fragment.
+ None
+ } else {
+ Some(fragment_start..fragment_start + fragment_layout.size)
+ };
+
per_local[place.local].push(PerLocalVarDebugInfo {
name: var.name,
source_info: var.source_info,
dbg_var,
- fragment: if fragment_layout.size == var_layout.size {
- // Fragment covers entire variable, so as far as
- // DWARF is concerned, it's not really a fragment.
- None
- } else {
- Some(fragment_start..fragment_start + fragment_layout.size)
- },
+ fragment,
projection: place.projection,
- references: var.references,
});
}
}
diff --git a/compiler/rustc_codegen_ssa/src/mir/intrinsic.rs b/compiler/rustc_codegen_ssa/src/mir/intrinsic.rs
index 8a65dd593..8821fb21f 100644
--- a/compiler/rustc_codegen_ssa/src/mir/intrinsic.rs
+++ b/compiler/rustc_codegen_ssa/src/mir/intrinsic.rs
@@ -64,7 +64,7 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
) {
let callee_ty = instance.ty(bx.tcx(), ty::ParamEnv::reveal_all());
- let ty::FnDef(def_id, substs) = *callee_ty.kind() else {
+ let ty::FnDef(def_id, fn_args) = *callee_ty.kind() else {
bug!("expected fn item type, found {}", callee_ty);
};
@@ -87,7 +87,7 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
sym::va_start => bx.va_start(args[0].immediate()),
sym::va_end => bx.va_end(args[0].immediate()),
sym::size_of_val => {
- let tp_ty = substs.type_at(0);
+ let tp_ty = fn_args.type_at(0);
if let OperandValue::Pair(_, meta) = args[0].val {
let (llsize, _) = glue::size_and_align_of_dst(bx, tp_ty, Some(meta));
llsize
@@ -96,7 +96,7 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
}
}
sym::min_align_of_val => {
- let tp_ty = substs.type_at(0);
+ let tp_ty = fn_args.type_at(0);
if let OperandValue::Pair(_, meta) = args[0].val {
let (_, llalign) = glue::size_and_align_of_dst(bx, tp_ty, Some(meta));
llalign
@@ -136,7 +136,7 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
OperandRef::from_const(bx, value, ret_ty).immediate_or_packed_pair(bx)
}
sym::arith_offset => {
- let ty = substs.type_at(0);
+ let ty = fn_args.type_at(0);
let layout = bx.layout_of(ty);
let ptr = args[0].immediate();
let offset = args[1].immediate();
@@ -147,7 +147,7 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
bx,
true,
false,
- substs.type_at(0),
+ fn_args.type_at(0),
args[1].immediate(),
args[0].immediate(),
args[2].immediate(),
@@ -158,7 +158,7 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
memset_intrinsic(
bx,
false,
- substs.type_at(0),
+ fn_args.type_at(0),
args[0].immediate(),
args[1].immediate(),
args[2].immediate(),
@@ -171,7 +171,7 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
bx,
false,
true,
- substs.type_at(0),
+ fn_args.type_at(0),
args[0].immediate(),
args[1].immediate(),
args[2].immediate(),
@@ -183,7 +183,7 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
bx,
true,
true,
- substs.type_at(0),
+ fn_args.type_at(0),
args[0].immediate(),
args[1].immediate(),
args[2].immediate(),
@@ -194,7 +194,7 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
memset_intrinsic(
bx,
true,
- substs.type_at(0),
+ fn_args.type_at(0),
args[0].immediate(),
args[1].immediate(),
args[2].immediate(),
@@ -270,7 +270,7 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
sym::const_allocate => {
// returns a null pointer at runtime.
- bx.const_null(bx.type_i8p())
+ bx.const_null(bx.type_ptr())
}
sym::const_deallocate => {
@@ -307,17 +307,15 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
let Some((success, failure)) = ordering.split_once('_') else {
bx.sess().emit_fatal(errors::AtomicCompareExchange);
};
- let ty = substs.type_at(0);
+ let ty = fn_args.type_at(0);
if int_type_width_signed(ty, bx.tcx()).is_some() || ty.is_unsafe_ptr() {
let weak = instruction == "cxchgweak";
- let mut dst = args[0].immediate();
+ let dst = args[0].immediate();
let mut cmp = args[1].immediate();
let mut src = args[2].immediate();
if ty.is_unsafe_ptr() {
// Some platforms do not support atomic operations on pointers,
// so we cast to integer first.
- let ptr_llty = bx.type_ptr_to(bx.type_isize());
- dst = bx.pointercast(dst, ptr_llty);
cmp = bx.ptrtoint(cmp, bx.type_isize());
src = bx.ptrtoint(src, bx.type_isize());
}
@@ -338,17 +336,15 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
}
"load" => {
- let ty = substs.type_at(0);
+ let ty = fn_args.type_at(0);
if int_type_width_signed(ty, bx.tcx()).is_some() || ty.is_unsafe_ptr() {
let layout = bx.layout_of(ty);
let size = layout.size;
- let mut source = args[0].immediate();
+ let source = args[0].immediate();
if ty.is_unsafe_ptr() {
// Some platforms do not support atomic operations on pointers,
// so we cast to integer first...
let llty = bx.type_isize();
- let ptr_llty = bx.type_ptr_to(llty);
- source = bx.pointercast(source, ptr_llty);
let result = bx.atomic_load(llty, source, parse_ordering(bx, ordering), size);
// ... and then cast the result back to a pointer
bx.inttoptr(result, bx.backend_type(layout))
@@ -361,16 +357,14 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
}
"store" => {
- let ty = substs.type_at(0);
+ let ty = fn_args.type_at(0);
if int_type_width_signed(ty, bx.tcx()).is_some() || ty.is_unsafe_ptr() {
let size = bx.layout_of(ty).size;
let mut val = args[1].immediate();
- let mut ptr = args[0].immediate();
+ let ptr = args[0].immediate();
if ty.is_unsafe_ptr() {
// Some platforms do not support atomic operations on pointers,
// so we cast to integer first.
- let ptr_llty = bx.type_ptr_to(bx.type_isize());
- ptr = bx.pointercast(ptr, ptr_llty);
val = bx.ptrtoint(val, bx.type_isize());
}
bx.atomic_store(val, ptr, parse_ordering(bx, ordering), size);
@@ -407,15 +401,13 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
_ => bx.sess().emit_fatal(errors::UnknownAtomicOperation),
};
- let ty = substs.type_at(0);
+ let ty = fn_args.type_at(0);
if int_type_width_signed(ty, bx.tcx()).is_some() || ty.is_unsafe_ptr() {
- let mut ptr = args[0].immediate();
+ let ptr = args[0].immediate();
let mut val = args[1].immediate();
if ty.is_unsafe_ptr() {
// Some platforms do not support atomic operations on pointers,
// so we cast to integer first.
- let ptr_llty = bx.type_ptr_to(bx.type_isize());
- ptr = bx.pointercast(ptr, ptr_llty);
val = bx.ptrtoint(val, bx.type_isize());
}
bx.atomic_rmw(atom_op, ptr, val, parse_ordering(bx, ordering))
@@ -439,7 +431,7 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
}
sym::ptr_offset_from | sym::ptr_offset_from_unsigned => {
- let ty = substs.type_at(0);
+ let ty = fn_args.type_at(0);
let pointee_size = bx.layout_of(ty).size;
let a = args[0].immediate();
@@ -470,10 +462,8 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
};
if !fn_abi.ret.is_ignore() {
- if let PassMode::Cast(ty, _) = &fn_abi.ret.mode {
- let ptr_llty = bx.type_ptr_to(bx.cast_backend_type(ty));
- let ptr = bx.pointercast(result.llval, ptr_llty);
- bx.store(llval, ptr, result.align);
+ if let PassMode::Cast(..) = &fn_abi.ret.mode {
+ bx.store(llval, result.llval, result.align);
} else {
OperandRef::from_immediate_or_packed_pair(bx, llval, result.layout)
.val
diff --git a/compiler/rustc_codegen_ssa/src/mir/mod.rs b/compiler/rustc_codegen_ssa/src/mir/mod.rs
index 9ff6a2497..3464f9108 100644
--- a/compiler/rustc_codegen_ssa/src/mir/mod.rs
+++ b/compiler/rustc_codegen_ssa/src/mir/mod.rs
@@ -159,7 +159,7 @@ pub fn codegen_mir<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>>(
cx: &'a Bx::CodegenCx,
instance: Instance<'tcx>,
) {
- assert!(!instance.substs.has_infer());
+ assert!(!instance.args.has_infer());
let llfn = cx.get_fn(instance);
diff --git a/compiler/rustc_codegen_ssa/src/mir/operand.rs b/compiler/rustc_codegen_ssa/src/mir/operand.rs
index 31c293d7c..f90d1a0fc 100644
--- a/compiler/rustc_codegen_ssa/src/mir/operand.rs
+++ b/compiler/rustc_codegen_ssa/src/mir/operand.rs
@@ -2,7 +2,6 @@ use super::place::PlaceRef;
use super::{FunctionCx, LocalRef};
use crate::base;
-use crate::common::TypeKind;
use crate::glue;
use crate::traits::*;
use crate::MemFlags;
@@ -132,7 +131,6 @@ impl<'a, 'tcx, V: CodegenObject> OperandRef<'tcx, V> {
) -> Self {
let alloc_align = alloc.inner().align;
assert_eq!(alloc_align, layout.align.abi);
- let ty = bx.type_ptr_to(bx.cx().backend_type(layout));
let read_scalar = |start, size, s: abi::Scalar, ty| {
let val = alloc
@@ -156,7 +154,7 @@ impl<'a, 'tcx, V: CodegenObject> OperandRef<'tcx, V> {
Abi::Scalar(s @ abi::Scalar::Initialized { .. }) => {
let size = s.size(bx);
assert_eq!(size, layout.size, "abi::Scalar size does not match layout size");
- let val = read_scalar(Size::ZERO, size, s, ty);
+ let val = read_scalar(Size::ZERO, size, s, bx.type_ptr());
OperandRef { val: OperandValue::Immediate(val), layout }
}
Abi::ScalarPair(
@@ -187,7 +185,6 @@ impl<'a, 'tcx, V: CodegenObject> OperandRef<'tcx, V> {
let base_addr = bx.static_addr_of(init, alloc_align, None);
let llval = bx.const_ptr_byte_offset(base_addr, offset);
- let llval = bx.const_bitcast(llval, ty);
bx.load_operand(PlaceRef::new_sized(llval, layout))
}
}
@@ -314,38 +311,22 @@ impl<'a, 'tcx, V: CodegenObject> OperandRef<'tcx, V> {
) => {
// Bools in union fields needs to be truncated.
*llval = bx.to_immediate(*llval, field);
- // HACK(eddyb) have to bitcast pointers until LLVM removes pointee types.
- let ty = bx.cx().immediate_backend_type(field);
- if bx.type_kind(ty) == TypeKind::Pointer {
- *llval = bx.pointercast(*llval, ty);
- }
}
(OperandValue::Pair(a, b), Abi::ScalarPair(a_abi, b_abi)) => {
// Bools in union fields needs to be truncated.
*a = bx.to_immediate_scalar(*a, a_abi);
*b = bx.to_immediate_scalar(*b, b_abi);
- // HACK(eddyb) have to bitcast pointers until LLVM removes pointee types.
- let a_ty = bx.cx().scalar_pair_element_backend_type(field, 0, true);
- let b_ty = bx.cx().scalar_pair_element_backend_type(field, 1, true);
- if bx.type_kind(a_ty) == TypeKind::Pointer {
- *a = bx.pointercast(*a, a_ty);
- }
- if bx.type_kind(b_ty) == TypeKind::Pointer {
- *b = bx.pointercast(*b, b_ty);
- }
}
// Newtype vector of array, e.g. #[repr(simd)] struct S([i32; 4]);
(OperandValue::Immediate(llval), Abi::Aggregate { sized: true }) => {
assert!(matches!(self.layout.abi, Abi::Vector { .. }));
- let llty = bx.cx().backend_type(self.layout);
let llfield_ty = bx.cx().backend_type(field);
// Can't bitcast an aggregate, so round trip through memory.
- let lltemp = bx.alloca(llfield_ty, field.align.abi);
- let llptr = bx.pointercast(lltemp, bx.cx().type_ptr_to(llty));
+ let llptr = bx.alloca(llfield_ty, field.align.abi);
bx.store(*llval, llptr, field.align.abi);
- *llval = bx.load(llfield_ty, lltemp, field.align.abi);
+ *llval = bx.load(llfield_ty, llptr, field.align.abi);
}
(OperandValue::Immediate(_), Abi::Uninhabited | Abi::Aggregate { sized: false }) => {
bug!()
@@ -380,9 +361,8 @@ impl<'a, 'tcx, V: CodegenObject> OperandValue<V> {
let ibty1 = bx.cx().scalar_pair_element_backend_type(layout, 1, true);
OperandValue::Pair(bx.const_poison(ibty0), bx.const_poison(ibty1))
} else {
- let bty = bx.cx().backend_type(layout);
- let ptr_bty = bx.cx().type_ptr_to(bty);
- OperandValue::Ref(bx.const_poison(ptr_bty), None, layout.align.abi)
+ let ptr = bx.cx().type_ptr();
+ OperandValue::Ref(bx.const_poison(ptr), None, layout.align.abi)
}
}
@@ -434,8 +414,7 @@ impl<'a, 'tcx, V: CodegenObject> OperandValue<V> {
if flags.contains(MemFlags::NONTEMPORAL) {
// HACK(nox): This is inefficient but there is no nontemporal memcpy.
let ty = bx.backend_type(dest.layout);
- let ptr = bx.pointercast(r, bx.type_ptr_to(ty));
- let val = bx.load(ty, ptr, source_align);
+ let val = bx.load(ty, r, source_align);
bx.store_with_flags(val, dest.llval, dest.align, flags);
return;
}
diff --git a/compiler/rustc_codegen_ssa/src/mir/place.rs b/compiler/rustc_codegen_ssa/src/mir/place.rs
index ab493ae5c..e7c3906d9 100644
--- a/compiler/rustc_codegen_ssa/src/mir/place.rs
+++ b/compiler/rustc_codegen_ssa/src/mir/place.rs
@@ -48,9 +48,17 @@ impl<'a, 'tcx, V: CodegenObject> PlaceRef<'tcx, V> {
bx: &mut Bx,
layout: TyAndLayout<'tcx>,
) -> Self {
+ Self::alloca_aligned(bx, layout, layout.align.abi)
+ }
+
+ pub fn alloca_aligned<Bx: BuilderMethods<'a, 'tcx, Value = V>>(
+ bx: &mut Bx,
+ layout: TyAndLayout<'tcx>,
+ align: Align,
+ ) -> Self {
assert!(layout.is_sized(), "tried to statically allocate unsized place");
- let tmp = bx.alloca(bx.cx().backend_type(layout), layout.align.abi);
- Self::new_sized(tmp, layout)
+ let tmp = bx.alloca(bx.cx().backend_type(layout), align);
+ Self::new_sized_aligned(tmp, layout, align)
}
/// Returns a place for an indirect reference to an unsized place.
@@ -107,8 +115,7 @@ impl<'a, 'tcx, V: CodegenObject> PlaceRef<'tcx, V> {
}
Abi::Scalar(_) | Abi::ScalarPair(..) | Abi::Vector { .. } if field.is_zst() => {
// ZST fields are not included in Scalar, ScalarPair, and Vector layouts, so manually offset the pointer.
- let byte_ptr = bx.pointercast(self.llval, bx.cx().type_i8p());
- bx.gep(bx.cx().type_i8(), byte_ptr, &[bx.const_usize(offset.bytes())])
+ bx.gep(bx.cx().type_i8(), self.llval, &[bx.const_usize(offset.bytes())])
}
Abi::Scalar(_) | Abi::ScalarPair(..) => {
// All fields of Scalar and ScalarPair layouts must have been handled by this point.
@@ -125,8 +132,7 @@ impl<'a, 'tcx, V: CodegenObject> PlaceRef<'tcx, V> {
}
};
PlaceRef {
- // HACK(eddyb): have to bitcast pointers until LLVM removes pointee types.
- llval: bx.pointercast(llval, bx.cx().type_ptr_to(bx.cx().backend_type(field))),
+ llval,
llextra: if bx.cx().type_has_metadata(field.ty) { self.llextra } else { None },
layout: field,
align: effective_field_align,
@@ -186,20 +192,10 @@ impl<'a, 'tcx, V: CodegenObject> PlaceRef<'tcx, V> {
debug!("struct_field_ptr: DST field offset: {:?}", offset);
- // Cast and adjust pointer.
- let byte_ptr = bx.pointercast(self.llval, bx.cx().type_i8p());
- let byte_ptr = bx.gep(bx.cx().type_i8(), byte_ptr, &[offset]);
+ // Adjust pointer.
+ let ptr = bx.gep(bx.cx().type_i8(), self.llval, &[offset]);
- // Finally, cast back to the type expected.
- let ll_fty = bx.cx().backend_type(field);
- debug!("struct_field_ptr: Field type is {:?}", ll_fty);
-
- PlaceRef {
- llval: bx.pointercast(byte_ptr, bx.cx().type_ptr_to(ll_fty)),
- llextra: self.llextra,
- layout: field,
- align: effective_field_align,
- }
+ PlaceRef { llval: ptr, llextra: self.llextra, layout: field, align: effective_field_align }
}
/// Obtain the actual discriminant of a value.
@@ -408,11 +404,6 @@ impl<'a, 'tcx, V: CodegenObject> PlaceRef<'tcx, V> {
) -> Self {
let mut downcast = *self;
downcast.layout = self.layout.for_variant(bx.cx(), variant_index);
-
- // Cast to the appropriate variant struct type.
- let variant_ty = bx.cx().backend_type(downcast.layout);
- downcast.llval = bx.pointercast(downcast.llval, bx.cx().type_ptr_to(variant_ty));
-
downcast
}
@@ -423,11 +414,6 @@ impl<'a, 'tcx, V: CodegenObject> PlaceRef<'tcx, V> {
) -> Self {
let mut downcast = *self;
downcast.layout = bx.cx().layout_of(ty);
-
- // Cast to the appropriate type.
- let variant_ty = bx.cx().backend_type(downcast.layout);
- downcast.llval = bx.pointercast(downcast.llval, bx.cx().type_ptr_to(variant_ty));
-
downcast
}
@@ -455,7 +441,7 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
LocalRef::Place(place) => place,
LocalRef::UnsizedPlace(place) => bx.load_operand(place).deref(cx),
LocalRef::Operand(..) => {
- if place_ref.has_deref() {
+ if place_ref.is_indirect_first_projection() {
base = 1;
let cg_base = self.codegen_consume(
bx,
@@ -507,13 +493,6 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
));
}
- // Cast the place pointer type to the new
- // array or slice type (`*[%_; new_len]`).
- subslice.llval = bx.pointercast(
- subslice.llval,
- bx.cx().type_ptr_to(bx.cx().backend_type(subslice.layout)),
- );
-
subslice
}
mir::ProjectionElem::Downcast(_, v) => cg_base.project_downcast(bx, v),
diff --git a/compiler/rustc_codegen_ssa/src/mir/rvalue.rs b/compiler/rustc_codegen_ssa/src/mir/rvalue.rs
index 956f03d25..07c61df21 100644
--- a/compiler/rustc_codegen_ssa/src/mir/rvalue.rs
+++ b/compiler/rustc_codegen_ssa/src/mir/rvalue.rs
@@ -182,9 +182,7 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
OperandValue::Immediate(..) | OperandValue::Pair(..) => {
// When we have immediate(s), the alignment of the source is irrelevant,
// so we can store them using the destination's alignment.
- let llty = bx.backend_type(src.layout);
- let cast_ptr = bx.pointercast(dst.llval, bx.type_ptr_to(llty));
- src.val.store(bx, PlaceRef::new_sized_aligned(cast_ptr, src.layout, dst.align));
+ src.val.store(bx, PlaceRef::new_sized_aligned(dst.llval, src.layout, dst.align));
}
}
}
@@ -222,9 +220,7 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
OperandValue::Ref(ptr, meta, align) => {
debug_assert_eq!(meta, None);
debug_assert!(matches!(operand_kind, OperandValueKind::Ref));
- let cast_bty = bx.backend_type(cast);
- let cast_ptr = bx.pointercast(ptr, bx.type_ptr_to(cast_bty));
- let fake_place = PlaceRef::new_sized_aligned(cast_ptr, cast, align);
+ let fake_place = PlaceRef::new_sized_aligned(ptr, cast, align);
Some(bx.load_operand(fake_place).val)
}
OperandValue::ZeroSized => {
@@ -397,8 +393,7 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
) -> OperandRef<'tcx, Bx::Value> {
assert!(
self.rvalue_creates_operand(rvalue, DUMMY_SP),
- "cannot codegen {:?} to operand",
- rvalue,
+ "cannot codegen {rvalue:?} to operand",
);
match *rvalue {
@@ -417,12 +412,12 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
}
mir::CastKind::PointerCoercion(PointerCoercion::ReifyFnPointer) => {
match *operand.layout.ty.kind() {
- ty::FnDef(def_id, substs) => {
+ ty::FnDef(def_id, args) => {
let instance = ty::Instance::resolve_for_fn_ptr(
bx.tcx(),
ty::ParamEnv::reveal_all(),
def_id,
- substs,
+ args,
)
.unwrap()
.polymorphize(bx.cx().tcx());
@@ -433,11 +428,11 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
}
mir::CastKind::PointerCoercion(PointerCoercion::ClosureFnPointer(_)) => {
match *operand.layout.ty.kind() {
- ty::Closure(def_id, substs) => {
+ ty::Closure(def_id, args) => {
let instance = Instance::resolve_closure(
bx.cx().tcx(),
def_id,
- substs,
+ args,
ty::ClosureKind::FnOnce,
)
.expect("failed to normalize and resolve closure during codegen")
@@ -480,18 +475,10 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
{
if let OperandValue::Pair(data_ptr, meta) = operand.val {
if bx.cx().is_backend_scalar_pair(cast) {
- let data_cast = bx.pointercast(
- data_ptr,
- bx.cx().scalar_pair_element_backend_type(cast, 0, true),
- );
- OperandValue::Pair(data_cast, meta)
+ OperandValue::Pair(data_ptr, meta)
} else {
- // cast to thin-ptr
- // Cast of fat-ptr to thin-ptr is an extraction of data-ptr and
- // pointer-cast of that pointer to desired pointer type.
- let llcast_ty = bx.cx().immediate_backend_type(cast);
- let llval = bx.pointercast(data_ptr, llcast_ty);
- OperandValue::Immediate(llval)
+ // Cast of fat-ptr to thin-ptr is an extraction of data-ptr.
+ OperandValue::Immediate(data_ptr)
}
} else {
bug!("unexpected non-pair operand");
@@ -711,7 +698,7 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
{
let instance = ty::Instance {
def: ty::InstanceDef::ThreadLocalShim(def_id),
- substs: ty::InternalSubsts::empty(),
+ args: ty::GenericArgs::empty(),
};
let fn_ptr = bx.get_fn_addr(instance);
let fn_abi = bx.fn_abi_of_instance(instance, ty::List::empty());
@@ -736,13 +723,11 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
}
mir::Rvalue::ShallowInitBox(ref operand, content_ty) => {
let operand = self.codegen_operand(bx, operand);
- let lloperand = operand.immediate();
+ let val = operand.immediate();
let content_ty = self.monomorphize(content_ty);
let box_layout = bx.cx().layout_of(Ty::new_box(bx.tcx(), content_ty));
- let llty_ptr = bx.cx().backend_type(box_layout);
- let val = bx.pointercast(lloperand, llty_ptr);
OperandRef { val: OperandValue::Immediate(val), layout: box_layout }
}
}
diff --git a/compiler/rustc_codegen_ssa/src/mono_item.rs b/compiler/rustc_codegen_ssa/src/mono_item.rs
index 27da33581..6fbf992ed 100644
--- a/compiler/rustc_codegen_ssa/src/mono_item.rs
+++ b/compiler/rustc_codegen_ssa/src/mono_item.rs
@@ -64,7 +64,7 @@ impl<'a, 'tcx: 'a> MonoItemExt<'a, 'tcx> for MonoItem<'tcx> {
.typeck_body(anon_const.body)
.node_type(anon_const.hir_id);
let instance = match ty.kind() {
- &ty::FnDef(def_id, substs) => Instance::new(def_id, substs),
+ &ty::FnDef(def_id, args) => Instance::new(def_id, args),
_ => span_bug!(*op_sp, "asm sym is not a function"),
};
@@ -138,10 +138,10 @@ impl<'a, 'tcx: 'a> MonoItemExt<'a, 'tcx> for MonoItem<'tcx> {
fn to_raw_string(&self) -> String {
match *self {
MonoItem::Fn(instance) => {
- format!("Fn({:?}, {})", instance.def, instance.substs.as_ptr().addr())
+ format!("Fn({:?}, {})", instance.def, instance.args.as_ptr().addr())
}
- MonoItem::Static(id) => format!("Static({:?})", id),
- MonoItem::GlobalAsm(id) => format!("GlobalAsm({:?})", id),
+ MonoItem::Static(id) => format!("Static({id:?})"),
+ MonoItem::GlobalAsm(id) => format!("GlobalAsm({id:?})"),
}
}
}
diff --git a/compiler/rustc_codegen_ssa/src/target_features.rs b/compiler/rustc_codegen_ssa/src/target_features.rs
index 9e06fec55..baf6b19d3 100644
--- a/compiler/rustc_codegen_ssa/src/target_features.rs
+++ b/compiler/rustc_codegen_ssa/src/target_features.rs
@@ -29,7 +29,6 @@ const ARM_ALLOWED_FEATURES: &[(&str, Option<Symbol>)] = &[
("aclass", Some(sym::arm_target_feature)),
("aes", Some(sym::arm_target_feature)),
("crc", Some(sym::arm_target_feature)),
- ("crypto", Some(sym::arm_target_feature)),
("d32", Some(sym::arm_target_feature)),
("dotprod", Some(sym::arm_target_feature)),
("dsp", Some(sym::arm_target_feature)),
@@ -297,6 +296,52 @@ const WASM_ALLOWED_FEATURES: &[(&str, Option<Symbol>)] = &[
const BPF_ALLOWED_FEATURES: &[(&str, Option<Symbol>)] = &[("alu32", Some(sym::bpf_target_feature))];
+const CSKY_ALLOWED_FEATURES: &[(&str, Option<Symbol>)] = &[
+ // tidy-alphabetical-start
+ ("10e60", Some(sym::csky_target_feature)),
+ ("2e3", Some(sym::csky_target_feature)),
+ ("3e3r1", Some(sym::csky_target_feature)),
+ ("3e3r2", Some(sym::csky_target_feature)),
+ ("3e3r3", Some(sym::csky_target_feature)),
+ ("3e7", Some(sym::csky_target_feature)),
+ ("7e10", Some(sym::csky_target_feature)),
+ ("cache", Some(sym::csky_target_feature)),
+ ("doloop", Some(sym::csky_target_feature)),
+ ("dsp1e2", Some(sym::csky_target_feature)),
+ ("dspe60", Some(sym::csky_target_feature)),
+ ("e1", Some(sym::csky_target_feature)),
+ ("e2", Some(sym::csky_target_feature)),
+ ("edsp", Some(sym::csky_target_feature)),
+ ("elrw", Some(sym::csky_target_feature)),
+ ("float1e2", Some(sym::csky_target_feature)),
+ ("float1e3", Some(sym::csky_target_feature)),
+ ("float3e4", Some(sym::csky_target_feature)),
+ ("float7e60", Some(sym::csky_target_feature)),
+ ("floate1", Some(sym::csky_target_feature)),
+ ("hard-tp", Some(sym::csky_target_feature)),
+ ("high-registers", Some(sym::csky_target_feature)),
+ ("hwdiv", Some(sym::csky_target_feature)),
+ ("mp", Some(sym::csky_target_feature)),
+ ("mp1e2", Some(sym::csky_target_feature)),
+ ("nvic", Some(sym::csky_target_feature)),
+ ("trust", Some(sym::csky_target_feature)),
+ ("vdsp2e60f", Some(sym::csky_target_feature)),
+ ("vdspv1", Some(sym::csky_target_feature)),
+ ("vdspv2", Some(sym::csky_target_feature)),
+ // tidy-alphabetical-end
+ //fpu
+ // tidy-alphabetical-start
+ ("fdivdu", Some(sym::csky_target_feature)),
+ ("fpuv2_df", Some(sym::csky_target_feature)),
+ ("fpuv2_sf", Some(sym::csky_target_feature)),
+ ("fpuv3_df", Some(sym::csky_target_feature)),
+ ("fpuv3_hf", Some(sym::csky_target_feature)),
+ ("fpuv3_hi", Some(sym::csky_target_feature)),
+ ("fpuv3_sf", Some(sym::csky_target_feature)),
+ ("hard-float", Some(sym::csky_target_feature)),
+ ("hard-float-abi", Some(sym::csky_target_feature)),
+ // tidy-alphabetical-end
+];
/// When rustdoc is running, provide a list of all known features so that all their respective
/// primitives may be documented.
///
@@ -312,6 +357,7 @@ pub fn all_known_features() -> impl Iterator<Item = (&'static str, Option<Symbol
.chain(RISCV_ALLOWED_FEATURES.iter())
.chain(WASM_ALLOWED_FEATURES.iter())
.chain(BPF_ALLOWED_FEATURES.iter())
+ .chain(CSKY_ALLOWED_FEATURES)
.cloned()
}
@@ -321,11 +367,12 @@ pub fn supported_target_features(sess: &Session) -> &'static [(&'static str, Opt
"aarch64" => AARCH64_ALLOWED_FEATURES,
"x86" | "x86_64" => X86_ALLOWED_FEATURES,
"hexagon" => HEXAGON_ALLOWED_FEATURES,
- "mips" | "mips64" => MIPS_ALLOWED_FEATURES,
+ "mips" | "mips32r6" | "mips64" | "mips64r6" => MIPS_ALLOWED_FEATURES,
"powerpc" | "powerpc64" => POWERPC_ALLOWED_FEATURES,
"riscv32" | "riscv64" => RISCV_ALLOWED_FEATURES,
"wasm32" | "wasm64" => WASM_ALLOWED_FEATURES,
"bpf" => BPF_ALLOWED_FEATURES,
+ "csky" => CSKY_ALLOWED_FEATURES,
_ => &[],
}
}
@@ -369,13 +416,9 @@ pub fn from_target_feature(
// We allow comma separation to enable multiple features.
target_features.extend(value.as_str().split(',').filter_map(|feature| {
let Some(feature_gate) = supported_target_features.get(feature) else {
- let msg =
- format!("the feature named `{}` is not valid for this target", feature);
+ let msg = format!("the feature named `{feature}` is not valid for this target");
let mut err = tcx.sess.struct_span_err(item.span(), msg);
- err.span_label(
- item.span(),
- format!("`{}` is not valid for this target", feature),
- );
+ err.span_label(item.span(), format!("`{feature}` is not valid for this target"));
if let Some(stripped) = feature.strip_prefix('+') {
let valid = supported_target_features.contains_key(stripped);
if valid {
@@ -401,6 +444,7 @@ pub fn from_target_feature(
Some(sym::ermsb_target_feature) => rust_features.ermsb_target_feature,
Some(sym::bpf_target_feature) => rust_features.bpf_target_feature,
Some(sym::aarch64_ver_target_feature) => rust_features.aarch64_ver_target_feature,
+ Some(sym::csky_target_feature) => rust_features.csky_target_feature,
Some(name) => bug!("unknown target feature gate {}", name),
None => true,
};
@@ -409,7 +453,7 @@ pub fn from_target_feature(
&tcx.sess.parse_sess,
feature_gate.unwrap(),
item.span(),
- format!("the target feature `{}` is currently unstable", feature),
+ format!("the target feature `{feature}` is currently unstable"),
)
.emit();
}
diff --git a/compiler/rustc_codegen_ssa/src/traits/backend.rs b/compiler/rustc_codegen_ssa/src/traits/backend.rs
index b3c9ecf8b..0a02ca6b3 100644
--- a/compiler/rustc_codegen_ssa/src/traits/backend.rs
+++ b/compiler/rustc_codegen_ssa/src/traits/backend.rs
@@ -23,6 +23,8 @@ use rustc_span::symbol::Symbol;
use rustc_target::abi::call::FnAbi;
use rustc_target::spec::Target;
+use std::fmt;
+
pub trait BackendTypes {
type Value: CodegenObject;
type Function: CodegenObject;
@@ -61,7 +63,7 @@ pub trait CodegenBackend {
fn locale_resource(&self) -> &'static str;
fn init(&self, _sess: &Session) {}
- fn print(&self, _req: PrintRequest, _sess: &Session) {}
+ fn print(&self, _req: &PrintRequest, _out: &mut dyn PrintBackendInfo, _sess: &Session) {}
fn target_features(&self, _sess: &Session, _allow_unstable: bool) -> Vec<Symbol> {
vec![]
}
@@ -140,15 +142,6 @@ pub trait ExtraBackendMethods:
target_features: &[String],
) -> TargetMachineFactoryFn<Self>;
- fn spawn_thread<F, T>(_time_trace: bool, f: F) -> std::thread::JoinHandle<T>
- where
- F: FnOnce() -> T,
- F: Send + 'static,
- T: Send + 'static,
- {
- std::thread::spawn(f)
- }
-
fn spawn_named_thread<F, T>(
_time_trace: bool,
name: String,
@@ -162,3 +155,19 @@ pub trait ExtraBackendMethods:
std::thread::Builder::new().name(name).spawn(f)
}
}
+
+pub trait PrintBackendInfo {
+ fn infallible_write_fmt(&mut self, args: fmt::Arguments<'_>);
+}
+
+impl PrintBackendInfo for String {
+ fn infallible_write_fmt(&mut self, args: fmt::Arguments<'_>) {
+ fmt::Write::write_fmt(self, args).unwrap();
+ }
+}
+
+impl dyn PrintBackendInfo + '_ {
+ pub fn write_fmt(&mut self, args: fmt::Arguments<'_>) {
+ self.infallible_write_fmt(args);
+ }
+}
diff --git a/compiler/rustc_codegen_ssa/src/traits/consts.rs b/compiler/rustc_codegen_ssa/src/traits/consts.rs
index d6e9bfce1..4dff9c768 100644
--- a/compiler/rustc_codegen_ssa/src/traits/consts.rs
+++ b/compiler/rustc_codegen_ssa/src/traits/consts.rs
@@ -5,7 +5,13 @@ use rustc_target::abi;
pub trait ConstMethods<'tcx>: BackendTypes {
// Constant constructors
fn const_null(&self, t: Self::Type) -> Self::Value;
+ /// Generate an uninitialized value (matching uninitialized memory in MIR).
+ /// Whether memory is initialized or not is tracked byte-for-byte.
fn const_undef(&self, t: Self::Type) -> Self::Value;
+ /// Generate a fake value. Poison always affects the entire value, even if just a single byte is
+ /// poison. This can only be used in codepaths that are already UB, i.e., UB-free Rust code
+ /// (including code that e.g. copies uninit memory with `MaybeUninit`) can never encounter a
+ /// poison value.
fn const_poison(&self, t: Self::Type) -> Self::Value;
fn const_int(&self, t: Self::Type, i: i64) -> Self::Value;
fn const_uint(&self, t: Self::Type, i: u64) -> Self::Value;
@@ -30,7 +36,6 @@ pub trait ConstMethods<'tcx>: BackendTypes {
fn scalar_to_backend(&self, cv: Scalar, layout: abi::Scalar, llty: Self::Type) -> Self::Value;
- fn const_ptrcast(&self, val: Self::Value, ty: Self::Type) -> Self::Value;
fn const_bitcast(&self, val: Self::Value, ty: Self::Type) -> Self::Value;
fn const_ptr_byte_offset(&self, val: Self::Value, offset: abi::Size) -> Self::Value;
}
diff --git a/compiler/rustc_codegen_ssa/src/traits/mod.rs b/compiler/rustc_codegen_ssa/src/traits/mod.rs
index 8cb58bd4c..728c2bc8c 100644
--- a/compiler/rustc_codegen_ssa/src/traits/mod.rs
+++ b/compiler/rustc_codegen_ssa/src/traits/mod.rs
@@ -30,7 +30,9 @@ mod write;
pub use self::abi::AbiBuilderMethods;
pub use self::asm::{AsmBuilderMethods, AsmMethods, GlobalAsmOperandRef, InlineAsmOperandRef};
-pub use self::backend::{Backend, BackendTypes, CodegenBackend, ExtraBackendMethods};
+pub use self::backend::{
+ Backend, BackendTypes, CodegenBackend, ExtraBackendMethods, PrintBackendInfo,
+};
pub use self::builder::{BuilderMethods, OverflowOp};
pub use self::consts::ConstMethods;
pub use self::coverageinfo::CoverageInfoBuilderMethods;
diff --git a/compiler/rustc_codegen_ssa/src/traits/type_.rs b/compiler/rustc_codegen_ssa/src/traits/type_.rs
index e64417e1a..dc3dbd9d8 100644
--- a/compiler/rustc_codegen_ssa/src/traits/type_.rs
+++ b/compiler/rustc_codegen_ssa/src/traits/type_.rs
@@ -26,8 +26,8 @@ pub trait BaseTypeMethods<'tcx>: Backend<'tcx> {
fn type_func(&self, args: &[Self::Type], ret: Self::Type) -> Self::Type;
fn type_struct(&self, els: &[Self::Type], packed: bool) -> Self::Type;
fn type_kind(&self, ty: Self::Type) -> TypeKind;
- fn type_ptr_to(&self, ty: Self::Type) -> Self::Type;
- fn type_ptr_to_ext(&self, ty: Self::Type, address_space: AddressSpace) -> Self::Type;
+ fn type_ptr(&self) -> Self::Type;
+ fn type_ptr_ext(&self, address_space: AddressSpace) -> Self::Type;
fn element_type(&self, ty: Self::Type) -> Self::Type;
/// Returns the number of elements in `self` if it is a LLVM vector type.
@@ -42,14 +42,6 @@ pub trait BaseTypeMethods<'tcx>: Backend<'tcx> {
}
pub trait DerivedTypeMethods<'tcx>: BaseTypeMethods<'tcx> + MiscMethods<'tcx> {
- fn type_i8p(&self) -> Self::Type {
- self.type_i8p_ext(AddressSpace::DATA)
- }
-
- fn type_i8p_ext(&self, address_space: AddressSpace) -> Self::Type {
- self.type_ptr_to_ext(self.type_i8(), address_space)
- }
-
fn type_int(&self) -> Self::Type {
match &self.sess().target.c_int_width[..] {
"16" => self.type_i16(),
diff --git a/compiler/rustc_codegen_ssa/src/traits/write.rs b/compiler/rustc_codegen_ssa/src/traits/write.rs
index 9826256a4..ecf5095d8 100644
--- a/compiler/rustc_codegen_ssa/src/traits/write.rs
+++ b/compiler/rustc_codegen_ssa/src/traits/write.rs
@@ -1,5 +1,5 @@
use crate::back::lto::{LtoModuleCodegen, SerializedModule, ThinModule};
-use crate::back::write::{CodegenContext, FatLTOInput, ModuleConfig};
+use crate::back::write::{CodegenContext, FatLtoInput, ModuleConfig};
use crate::{CompiledModule, ModuleCodegen};
use rustc_errors::{FatalError, Handler};
@@ -23,7 +23,7 @@ pub trait WriteBackendMethods: 'static + Sized + Clone {
/// for further optimization.
fn run_fat_lto(
cgcx: &CodegenContext<Self>,
- modules: Vec<FatLTOInput<Self>>,
+ modules: Vec<FatLtoInput<Self>>,
cached_modules: Vec<(SerializedModule<Self::ModuleBuffer>, WorkProduct)>,
) -> Result<LtoModuleCodegen<Self>, FatalError>;
/// Performs thin LTO by performing necessary global analysis and returning two
@@ -35,6 +35,7 @@ pub trait WriteBackendMethods: 'static + Sized + Clone {
cached_modules: Vec<(SerializedModule<Self::ModuleBuffer>, WorkProduct)>,
) -> Result<(Vec<LtoModuleCodegen<Self>>, Vec<WorkProduct>), FatalError>;
fn print_pass_timings(&self);
+ fn print_statistics(&self);
unsafe fn optimize(
cgcx: &CodegenContext<Self>,
diag_handler: &Handler,
diff --git a/compiler/rustc_const_eval/Cargo.toml b/compiler/rustc_const_eval/Cargo.toml
index 74030a43c..4e47fed86 100644
--- a/compiler/rustc_const_eval/Cargo.toml
+++ b/compiler/rustc_const_eval/Cargo.toml
@@ -8,7 +8,7 @@ edition = "2021"
[dependencies]
tracing = "0.1"
either = "1"
-rustc_apfloat = { path = "../rustc_apfloat" }
+rustc_apfloat = "0.2.0"
rustc_ast = { path = "../rustc_ast" }
rustc_attr = { path = "../rustc_attr" }
rustc_data_structures = { path = "../rustc_data_structures" }
diff --git a/compiler/rustc_const_eval/messages.ftl b/compiler/rustc_const_eval/messages.ftl
index e99005316..e5dd5729d 100644
--- a/compiler/rustc_const_eval/messages.ftl
+++ b/compiler/rustc_const_eval/messages.ftl
@@ -15,9 +15,6 @@ const_eval_await_non_const =
cannot convert `{$ty}` into a future in {const_eval_const_context}s
const_eval_bounds_check_failed =
indexing out of bounds: the len is {$len} but the index is {$index}
-const_eval_box_to_mut = {$front_matter}: encountered a box pointing to mutable memory in a constant
-const_eval_box_to_static = {$front_matter}: encountered a box pointing to a static variable in a constant
-const_eval_box_to_uninhabited = {$front_matter}: encountered a box pointing to uninhabited type {$ty}
const_eval_call_nonzero_intrinsic =
`{$name}` called on 0
@@ -41,18 +38,12 @@ const_eval_const_context = {$kind ->
const_eval_copy_nonoverlapping_overlapping =
`copy_nonoverlapping` called on overlapping ranges
-const_eval_dangling_box_no_provenance = {$front_matter}: encountered a dangling box ({$pointer} has no provenance)
-const_eval_dangling_box_out_of_bounds = {$front_matter}: encountered a dangling box (going beyond the bounds of its allocation)
-const_eval_dangling_box_use_after_free = {$front_matter}: encountered a dangling box (use-after-free)
const_eval_dangling_int_pointer =
{$bad_pointer_message}: {$pointer} is a dangling pointer (it has no provenance)
const_eval_dangling_null_pointer =
{$bad_pointer_message}: null pointer is a dangling pointer (it has no provenance)
const_eval_dangling_ptr_in_final = encountered dangling pointer in final constant
-const_eval_dangling_ref_no_provenance = {$front_matter}: encountered a dangling reference ({$pointer} has no provenance)
-const_eval_dangling_ref_out_of_bounds = {$front_matter}: encountered a dangling reference (going beyond the bounds of its allocation)
-const_eval_dangling_ref_use_after_free = {$front_matter}: encountered a dangling reference (use-after-free)
const_eval_dead_local =
accessing a dead local variable
const_eval_dealloc_immutable =
@@ -105,7 +96,6 @@ const_eval_error = {$error_kind ->
const_eval_exact_div_has_remainder =
exact_div: {$a} cannot be divided by {$b} without remainder
-const_eval_expected_non_ptr = {$front_matter}: encountered `{$value}`, but expected plain (non-pointer) bytes
const_eval_fn_ptr_call =
function pointers need an RFC before allowed to be called in {const_eval_const_context}s
const_eval_for_loop_into_iter_non_const =
@@ -156,8 +146,6 @@ const_eval_invalid_align_details =
const_eval_invalid_bool =
interpreting an invalid 8-bit value as a bool: 0x{$value}
-const_eval_invalid_box_meta = {$front_matter}: encountered invalid box metadata: total size is bigger than largest supported object
-const_eval_invalid_box_slice_meta = {$front_matter}: encountered invalid box metadata: slice is bigger than largest supported object
const_eval_invalid_char =
interpreting an invalid 32-bit value as a char: 0x{$value}
const_eval_invalid_dealloc =
@@ -168,16 +156,12 @@ const_eval_invalid_dealloc =
*[other] {""}
}
-const_eval_invalid_enum_tag = {$front_matter}: encountered {$value}, but expected a valid enum tag
-const_eval_invalid_fn_ptr = {$front_matter}: encountered {$value}, but expected a function pointer
const_eval_invalid_function_pointer =
using {$pointer} as function pointer but it does not point to a function
const_eval_invalid_meta =
invalid metadata in wide pointer: total size is bigger than largest supported object
const_eval_invalid_meta_slice =
invalid metadata in wide pointer: slice is bigger than largest supported object
-const_eval_invalid_ref_meta = {$front_matter}: encountered invalid reference metadata: total size is bigger than largest supported object
-const_eval_invalid_ref_slice_meta = {$front_matter}: encountered invalid reference metadata: slice is bigger than largest supported object
const_eval_invalid_str =
this string is not valid UTF-8: {$err}
const_eval_invalid_tag =
@@ -189,14 +173,10 @@ const_eval_invalid_uninit_bytes =
reading memory at {$alloc}{$access}, but memory is uninitialized at {$uninit}, and this operation requires initialized memory
const_eval_invalid_uninit_bytes_unknown =
using uninitialized data, but this operation requires initialized memory
-const_eval_invalid_value = constructing invalid value
-const_eval_invalid_value_with_path = constructing invalid value at {$path}
-## The `front_matter`s here refer to either `middle_invalid_value` or `middle_invalid_value_with_path`.
const_eval_invalid_vtable_pointer =
using {$pointer} as vtable pointer but it does not point to a vtable
-const_eval_invalid_vtable_ptr = {$front_matter}: encountered {$value}, but expected a vtable pointer
const_eval_live_drop =
destructor of `{$dropped_ty}` cannot be evaluated at compile-time
@@ -218,14 +198,13 @@ const_eval_max_num_nodes_in_const = maximum number of nodes exceeded in constant
const_eval_memory_access_test = memory access failed
const_eval_memory_exhausted =
tried to allocate more memory than available to compiler
+
const_eval_modified_global =
modifying a static's initial value from another static's initializer
const_eval_mut_deref =
mutation through a reference is not allowed in {const_eval_const_context}s
-const_eval_mutable_ref_in_const = {$front_matter}: encountered mutable reference in a `const`
-const_eval_never_val = {$front_matter}: encountered a value of the never type `!`
const_eval_non_const_fmt_macro_call =
cannot call non-const formatting macro in {const_eval_const_context}s
@@ -241,10 +220,6 @@ const_eval_noreturn_asm_returned =
const_eval_not_enough_caller_args =
calling a function with fewer arguments than it requires
-const_eval_null_box = {$front_matter}: encountered a null box
-const_eval_null_fn_ptr = {$front_matter}: encountered a null function pointer
-const_eval_null_ref = {$front_matter}: encountered a null reference
-const_eval_nullable_ptr_out_of_range = {$front_matter}: encountered a potentially null pointer, but expected something that cannot possibly fail to be {$in_range}
const_eval_nullary_intrinsic_fail =
could not evaluate nullary intrinsic
@@ -257,7 +232,6 @@ const_eval_offset_from_underflow =
const_eval_operator_non_const =
cannot call non-const operator in {const_eval_const_context}s
-const_eval_out_of_range = {$front_matter}: encountered {$value}, but expected something {$in_range}
const_eval_overflow =
overflow executing `{$name}`
@@ -282,12 +256,11 @@ const_eval_pointer_out_of_bounds =
*[many] bytes
} starting at offset {$ptr_offset} is out-of-bounds
const_eval_pointer_use_after_free =
- pointer to {$allocation} was dereferenced after this allocation got freed
+ {$bad_pointer_message}: {$alloc_id} has been freed, so this pointer is dangling
const_eval_ptr_as_bytes_1 =
this code performed an operation that depends on the underlying bytes representing a pointer
const_eval_ptr_as_bytes_2 =
the absolute address of a pointer is not known at compile-time, so such operations are not supported
-const_eval_ptr_out_of_range = {$front_matter}: encountered a pointer, but expected something that cannot possibly fail to be {$in_range}
const_eval_question_branch_non_const =
`?` cannot determine the branch of `{$ty}` in {const_eval_const_context}s
@@ -315,8 +288,8 @@ const_eval_raw_ptr_to_int =
const_eval_read_extern_static =
cannot read from extern static ({$did})
-const_eval_read_pointer_as_bytes =
- unable to turn pointer into raw bytes
+const_eval_read_pointer_as_int =
+ unable to turn pointer into integer
const_eval_realloc_or_alloc_with_offset =
{$kind ->
[dealloc] deallocating
@@ -324,17 +297,12 @@ const_eval_realloc_or_alloc_with_offset =
*[other] {""}
} {$ptr} which does not point to the beginning of an object
-const_eval_ref_to_mut = {$front_matter}: encountered a reference pointing to mutable memory in a constant
-const_eval_ref_to_static = {$front_matter}: encountered a reference pointing to a static variable in a constant
-const_eval_ref_to_uninhabited = {$front_matter}: encountered a reference pointing to uninhabited type {$ty}
const_eval_remainder_by_zero =
calculating the remainder with a divisor of zero
const_eval_remainder_overflow =
overflow in signed remainder (dividing MIN by -1)
const_eval_scalar_size_mismatch =
scalar size mismatch: expected {$target_size} bytes but got {$data_size} bytes instead
-const_eval_size_of_unsized =
- size_of called on unsized type `{$ty}`
const_eval_size_overflow =
overflow computing total size of `{$name}`
@@ -363,8 +331,6 @@ const_eval_transient_mut_borrow_raw = raw mutable references are not allowed in
const_eval_try_block_from_output_non_const =
`try` block cannot convert `{$ty}` to the result in {const_eval_const_context}s
-const_eval_unaligned_box = {$front_matter}: encountered an unaligned box (required {$required_bytes} byte alignment but found {$found_bytes})
-const_eval_unaligned_ref = {$front_matter}: encountered an unaligned reference (required {$required_bytes} byte alignment but found {$found_bytes})
const_eval_unallowed_fn_pointer_call = function pointer calls are not allowed in {const_eval_const_context}s
const_eval_unallowed_heap_allocations =
@@ -399,34 +365,23 @@ const_eval_unallowed_mutable_refs_raw =
const_eval_unallowed_op_in_const_context =
{$msg}
+const_eval_unavailable_target_features_for_fn =
+ calling a function that requires unavailable target features: {$unavailable_feats}
+
const_eval_undefined_behavior =
it is undefined behavior to use this value
const_eval_undefined_behavior_note =
The rules on what exactly is undefined behavior aren't clear, so this check might be overzealous. Please open an issue on the rustc repository if you believe it should not be considered undefined behavior.
+const_eval_uninhabited_enum_variant_read =
+ read discriminant of an uninhabited enum variant
const_eval_uninhabited_enum_variant_written =
- writing discriminant of an uninhabited enum
-const_eval_uninhabited_val = {$front_matter}: encountered a value of uninhabited type `{$ty}`
-const_eval_uninit = {$front_matter}: encountered uninitialized bytes
-const_eval_uninit_bool = {$front_matter}: encountered uninitialized memory, but expected a boolean
-const_eval_uninit_box = {$front_matter}: encountered uninitialized memory, but expected a box
-const_eval_uninit_char = {$front_matter}: encountered uninitialized memory, but expected a unicode scalar value
-const_eval_uninit_enum_tag = {$front_matter}: encountered uninitialized bytes, but expected a valid enum tag
-const_eval_uninit_float = {$front_matter}: encountered uninitialized memory, but expected a floating point number
-const_eval_uninit_fn_ptr = {$front_matter}: encountered uninitialized memory, but expected a function pointer
-const_eval_uninit_init_scalar = {$front_matter}: encountered uninitialized memory, but expected initialized scalar value
-const_eval_uninit_int = {$front_matter}: encountered uninitialized memory, but expected an integer
-const_eval_uninit_raw_ptr = {$front_matter}: encountered uninitialized memory, but expected a raw pointer
-const_eval_uninit_ref = {$front_matter}: encountered uninitialized memory, but expected a reference
-const_eval_uninit_str = {$front_matter}: encountered uninitialized data in `str`
-const_eval_uninit_unsized_local =
- unsized local is used while uninitialized
+ writing discriminant of an uninhabited enum variant
const_eval_unreachable = entering unreachable code
const_eval_unreachable_unwind =
unwinding past a stack frame that does not allow unwinding
-const_eval_unsafe_cell = {$front_matter}: encountered `UnsafeCell` in a `const`
const_eval_unsigned_offset_from_overflow =
`ptr_offset_from_unsigned` called when first pointer has smaller offset than second: {$a_offset} < {$b_offset}
@@ -449,8 +404,63 @@ const_eval_unwind_past_top =
const_eval_upcast_mismatch =
upcast on a pointer whose vtable does not match its type
+## The `front_matter`s here refer to either `const_eval_front_matter_invalid_value` or `const_eval_front_matter_invalid_value_with_path`.
+## (We'd love to sort this differently to make that more clear but tidy won't let us...)
+const_eval_validation_box_to_mut = {$front_matter}: encountered a box pointing to mutable memory in a constant
+const_eval_validation_box_to_static = {$front_matter}: encountered a box pointing to a static variable in a constant
+const_eval_validation_box_to_uninhabited = {$front_matter}: encountered a box pointing to uninhabited type {$ty}
+const_eval_validation_dangling_box_no_provenance = {$front_matter}: encountered a dangling box ({$pointer} has no provenance)
+const_eval_validation_dangling_box_out_of_bounds = {$front_matter}: encountered a dangling box (going beyond the bounds of its allocation)
+const_eval_validation_dangling_box_use_after_free = {$front_matter}: encountered a dangling box (use-after-free)
+const_eval_validation_dangling_ref_no_provenance = {$front_matter}: encountered a dangling reference ({$pointer} has no provenance)
+const_eval_validation_dangling_ref_out_of_bounds = {$front_matter}: encountered a dangling reference (going beyond the bounds of its allocation)
+const_eval_validation_dangling_ref_use_after_free = {$front_matter}: encountered a dangling reference (use-after-free)
+
+const_eval_validation_expected_bool = expected a boolean
+const_eval_validation_expected_box = expected a box
+const_eval_validation_expected_char = expected a unicode scalar value
+const_eval_validation_expected_enum_tag = expected a valid enum tag
+const_eval_validation_expected_float = expected a floating point number
+const_eval_validation_expected_fn_ptr = expected a function pointer
+const_eval_validation_expected_init_scalar = expected initialized scalar value
+const_eval_validation_expected_int = expected an integer
+const_eval_validation_expected_raw_ptr = expected a raw pointer
+const_eval_validation_expected_ref = expected a reference
+const_eval_validation_expected_str = expected a string
+
+const_eval_validation_front_matter_invalid_value = constructing invalid value
+const_eval_validation_front_matter_invalid_value_with_path = constructing invalid value at {$path}
+
const_eval_validation_invalid_bool = {$front_matter}: encountered {$value}, but expected a boolean
+const_eval_validation_invalid_box_meta = {$front_matter}: encountered invalid box metadata: total size is bigger than largest supported object
+const_eval_validation_invalid_box_slice_meta = {$front_matter}: encountered invalid box metadata: slice is bigger than largest supported object
const_eval_validation_invalid_char = {$front_matter}: encountered {$value}, but expected a valid unicode scalar value (in `0..=0x10FFFF` but not in `0xD800..=0xDFFF`)
+
+const_eval_validation_invalid_enum_tag = {$front_matter}: encountered {$value}, but expected a valid enum tag
+const_eval_validation_invalid_fn_ptr = {$front_matter}: encountered {$value}, but expected a function pointer
+const_eval_validation_invalid_ref_meta = {$front_matter}: encountered invalid reference metadata: total size is bigger than largest supported object
+const_eval_validation_invalid_ref_slice_meta = {$front_matter}: encountered invalid reference metadata: slice is bigger than largest supported object
+const_eval_validation_invalid_vtable_ptr = {$front_matter}: encountered {$value}, but expected a vtable pointer
+const_eval_validation_mutable_ref_in_const = {$front_matter}: encountered mutable reference in a `const`
+const_eval_validation_never_val = {$front_matter}: encountered a value of the never type `!`
+const_eval_validation_null_box = {$front_matter}: encountered a null box
+const_eval_validation_null_fn_ptr = {$front_matter}: encountered a null function pointer
+const_eval_validation_null_ref = {$front_matter}: encountered a null reference
+const_eval_validation_nullable_ptr_out_of_range = {$front_matter}: encountered a potentially null pointer, but expected something that cannot possibly fail to be {$in_range}
+const_eval_validation_out_of_range = {$front_matter}: encountered {$value}, but expected something {$in_range}
+const_eval_validation_partial_pointer = {$front_matter}: encountered a partial pointer or a mix of pointers
+const_eval_validation_pointer_as_int = {$front_matter}: encountered a pointer, but {$expected}
+const_eval_validation_ptr_out_of_range = {$front_matter}: encountered a pointer, but expected something that cannot possibly fail to be {$in_range}
+const_eval_validation_ref_to_mut = {$front_matter}: encountered a reference pointing to mutable memory in a constant
+const_eval_validation_ref_to_static = {$front_matter}: encountered a reference pointing to a static variable in a constant
+const_eval_validation_ref_to_uninhabited = {$front_matter}: encountered a reference pointing to uninhabited type {$ty}
+const_eval_validation_unaligned_box = {$front_matter}: encountered an unaligned box (required {$required_bytes} byte alignment but found {$found_bytes})
+const_eval_validation_unaligned_ref = {$front_matter}: encountered an unaligned reference (required {$required_bytes} byte alignment but found {$found_bytes})
+const_eval_validation_uninhabited_enum_variant = {$front_matter}: encountered an uninhabited enum variant
+const_eval_validation_uninhabited_val = {$front_matter}: encountered a value of uninhabited type `{$ty}`
+const_eval_validation_uninit = {$front_matter}: encountered uninitialized memory, but {$expected}
+const_eval_validation_unsafe_cell = {$front_matter}: encountered `UnsafeCell` in a `const`
+
const_eval_write_to_read_only =
writing to {$allocation} which is read-only
const_eval_zst_pointer_out_of_bounds =
diff --git a/compiler/rustc_const_eval/src/const_eval/error.rs b/compiler/rustc_const_eval/src/const_eval/error.rs
index 7890d878d..d39a7e8a1 100644
--- a/compiler/rustc_const_eval/src/const_eval/error.rs
+++ b/compiler/rustc_const_eval/src/const_eval/error.rs
@@ -138,7 +138,10 @@ where
err_inval!(Layout(LayoutError::Unknown(_))) | err_inval!(TooGeneric) => {
ErrorHandled::TooGeneric
}
- err_inval!(AlreadyReported(error_reported)) => ErrorHandled::Reported(error_reported),
+ err_inval!(AlreadyReported(guar)) => ErrorHandled::Reported(guar),
+ err_inval!(Layout(LayoutError::ReferencesError(guar))) => {
+ ErrorHandled::Reported(guar.into())
+ }
err_inval!(Layout(layout_error @ LayoutError::SizeOverflow(_))) => {
// We must *always* hard error on these, even if the caller wants just a lint.
// The `message` makes little sense here, this is a more serious error than the
@@ -150,8 +153,8 @@ where
tcx.sess.create_err(Spanned { span, node: layout_error.into_diagnostic() });
err.code(rustc_errors::error_code!(E0080));
let Some((mut err, handler)) = err.into_diagnostic() else {
- panic!("did not emit diag");
- };
+ panic!("did not emit diag");
+ };
for frame in frames {
err.eager_subdiagnostic(handler, frame);
}
diff --git a/compiler/rustc_const_eval/src/const_eval/eval_queries.rs b/compiler/rustc_const_eval/src/const_eval/eval_queries.rs
index 417ab78fd..4c7e91944 100644
--- a/compiler/rustc_const_eval/src/const_eval/eval_queries.rs
+++ b/compiler/rustc_const_eval/src/const_eval/eval_queries.rs
@@ -45,20 +45,20 @@ fn eval_body_using_ecx<'mir, 'tcx>(
"Unexpected DefKind: {:?}",
ecx.tcx.def_kind(cid.instance.def_id())
);
- let layout = ecx.layout_of(body.bound_return_ty().subst(tcx, cid.instance.substs))?;
+ let layout = ecx.layout_of(body.bound_return_ty().instantiate(tcx, cid.instance.args))?;
assert!(layout.is_sized());
let ret = ecx.allocate(layout, MemoryKind::Stack)?;
trace!(
"eval_body_using_ecx: pushing stack frame for global: {}{}",
with_no_trimmed_paths!(ecx.tcx.def_path_str(cid.instance.def_id())),
- cid.promoted.map_or_else(String::new, |p| format!("::promoted[{:?}]", p))
+ cid.promoted.map_or_else(String::new, |p| format!("::promoted[{p:?}]"))
);
ecx.push_stack_frame(
cid.instance,
body,
- &ret.into(),
+ &ret.clone().into(),
StackPopCleanup::Root { cleanup: false },
)?;
@@ -228,7 +228,6 @@ pub fn eval_to_const_value_raw_provider<'tcx>(
tcx: TyCtxt<'tcx>,
key: ty::ParamEnvAnd<'tcx, GlobalId<'tcx>>,
) -> ::rustc_middle::mir::interpret::EvalToConstValueResult<'tcx> {
- assert!(key.param_env.is_const());
// see comment in eval_to_allocation_raw_provider for what we're doing here
if key.param_env.reveal() == Reveal::All {
let mut key = key;
@@ -245,10 +244,10 @@ pub fn eval_to_const_value_raw_provider<'tcx>(
// Catch such calls and evaluate them instead of trying to load a constant's MIR.
if let ty::InstanceDef::Intrinsic(def_id) = key.value.instance.def {
let ty = key.value.instance.ty(tcx, key.param_env);
- let ty::FnDef(_, substs) = ty.kind() else {
+ let ty::FnDef(_, args) = ty.kind() else {
bug!("intrinsic with type {:?}", ty);
};
- return eval_nullary_intrinsic(tcx, key.param_env, def_id, substs).map_err(|error| {
+ return eval_nullary_intrinsic(tcx, key.param_env, def_id, args).map_err(|error| {
let span = tcx.def_span(def_id);
super::report(
@@ -269,7 +268,6 @@ pub fn eval_to_allocation_raw_provider<'tcx>(
tcx: TyCtxt<'tcx>,
key: ty::ParamEnvAnd<'tcx, GlobalId<'tcx>>,
) -> ::rustc_middle::mir::interpret::EvalToAllocationRawResult<'tcx> {
- assert!(key.param_env.is_const());
// Because the constant is computed twice (once per value of `Reveal`), we are at risk of
// reporting the same error twice here. To resolve this, we check whether we can evaluate the
// constant in the more restrictive `Reveal::UserFacing`, which most likely already was
@@ -328,10 +326,10 @@ pub fn eval_to_allocation_raw_provider<'tcx>(
("static", String::new())
} else {
// If the current item has generics, we'd like to enrich the message with the
- // instance and its substs: to show the actual compile-time values, in addition to
+ // instance and its args: to show the actual compile-time values, in addition to
// the expression, leading to the const eval error.
let instance = &key.value.instance;
- if !instance.substs.is_empty() {
+ if !instance.args.is_empty() {
let instance = with_no_trimmed_paths!(instance.to_string());
("const_with_path", instance)
} else {
@@ -356,7 +354,7 @@ pub fn eval_to_allocation_raw_provider<'tcx>(
// Since evaluation had no errors, validate the resulting constant.
// This is a separate `try` block to provide more targeted error reporting.
let validation: Result<_, InterpErrorInfo<'_>> = try {
- let mut ref_tracking = RefTracking::new(mplace);
+ let mut ref_tracking = RefTracking::new(mplace.clone());
let mut inner = false;
while let Some((mplace, path)) = ref_tracking.todo.pop() {
let mode = match tcx.static_mutability(cid.instance.def_id()) {
diff --git a/compiler/rustc_const_eval/src/const_eval/fn_queries.rs b/compiler/rustc_const_eval/src/const_eval/fn_queries.rs
index fa8253d5e..4ee4ebbb9 100644
--- a/compiler/rustc_const_eval/src/const_eval/fn_queries.rs
+++ b/compiler/rustc_const_eval/src/const_eval/fn_queries.rs
@@ -28,16 +28,19 @@ pub fn is_parent_const_impl_raw(tcx: TyCtxt<'_>, def_id: LocalDefId) -> bool {
&& tcx.constness(parent_id) == hir::Constness::Const
}
-/// Checks whether an item is considered to be `const`. If it is a constructor, it is const. If
-/// it is a trait impl/function, return if it has a `const` modifier. If it is an intrinsic,
-/// report whether said intrinsic has a `rustc_const_{un,}stable` attribute. Otherwise, return
-/// `Constness::NotConst`.
+/// Checks whether an item is considered to be `const`. If it is a constructor, anonymous const,
+/// const block, const item or associated const, it is const. If it is a trait impl/function,
+/// return if it has a `const` modifier. If it is an intrinsic, report whether said intrinsic
+/// has a `rustc_const_{un,}stable` attribute. Otherwise, return `Constness::NotConst`.
fn constness(tcx: TyCtxt<'_>, def_id: LocalDefId) -> hir::Constness {
let node = tcx.hir().get_by_def_id(def_id);
match node {
- hir::Node::Ctor(_) => hir::Constness::Const,
- hir::Node::Item(hir::Item { kind: hir::ItemKind::Impl(impl_), .. }) => impl_.constness,
+ hir::Node::Ctor(_)
+ | hir::Node::AnonConst(_)
+ | hir::Node::ConstBlock(_)
+ | hir::Node::ImplItem(hir::ImplItem { kind: hir::ImplItemKind::Const(..), .. }) => hir::Constness::Const,
+ hir::Node::Item(hir::Item { kind: hir::ItemKind::Impl(_), .. }) => tcx.generics_of(def_id).host_effect_index.map_or(hir::Constness::NotConst, |_| hir::Constness::Const),
hir::Node::ForeignItem(hir::ForeignItem { kind: hir::ForeignItemKind::Fn(..), .. }) => {
// Intrinsics use `rustc_const_{un,}stable` attributes to indicate constness. All other
// foreign items cannot be evaluated at compile-time.
diff --git a/compiler/rustc_const_eval/src/const_eval/machine.rs b/compiler/rustc_const_eval/src/const_eval/machine.rs
index f9f645af4..b740b79d1 100644
--- a/compiler/rustc_const_eval/src/const_eval/machine.rs
+++ b/compiler/rustc_const_eval/src/const_eval/machine.rs
@@ -22,7 +22,7 @@ use rustc_target::spec::abi::Abi as CallAbi;
use crate::errors::{LongRunning, LongRunningWarn};
use crate::interpret::{
- self, compile_time_machine, AllocId, ConstAllocation, FnVal, Frame, ImmTy, InterpCx,
+ self, compile_time_machine, AllocId, ConstAllocation, FnArg, FnVal, Frame, ImmTy, InterpCx,
InterpResult, OpTy, PlaceTy, Pointer, Scalar,
};
use crate::{errors, fluent_generated as fluent};
@@ -201,7 +201,7 @@ impl<'mir, 'tcx: 'mir> CompileTimeEvalContext<'mir, 'tcx> {
fn hook_special_const_fn(
&mut self,
instance: ty::Instance<'tcx>,
- args: &[OpTy<'tcx>],
+ args: &[FnArg<'tcx>],
dest: &PlaceTy<'tcx>,
ret: Option<mir::BasicBlock>,
) -> InterpResult<'tcx, Option<ty::Instance<'tcx>>> {
@@ -210,12 +210,13 @@ impl<'mir, 'tcx: 'mir> CompileTimeEvalContext<'mir, 'tcx> {
if Some(def_id) == self.tcx.lang_items().panic_display()
|| Some(def_id) == self.tcx.lang_items().begin_panic_fn()
{
+ let args = self.copy_fn_args(args)?;
// &str or &&str
assert!(args.len() == 1);
- let mut msg_place = self.deref_operand(&args[0])?;
+ let mut msg_place = self.deref_pointer(&args[0])?;
while msg_place.layout.ty.is_ref() {
- msg_place = self.deref_operand(&msg_place.into())?;
+ msg_place = self.deref_pointer(&msg_place)?;
}
let msg = Symbol::intern(self.read_str(&msg_place)?);
@@ -229,15 +230,16 @@ impl<'mir, 'tcx: 'mir> CompileTimeEvalContext<'mir, 'tcx> {
*self.tcx,
ty::ParamEnv::reveal_all(),
const_def_id,
- instance.substs,
+ instance.args,
)
.unwrap()
.unwrap();
return Ok(Some(new_instance));
} else if Some(def_id) == self.tcx.lang_items().align_offset_fn() {
+ let args = self.copy_fn_args(args)?;
// For align_offset, we replace the function call if the pointer has no address.
- match self.align_offset(instance, args, dest, ret)? {
+ match self.align_offset(instance, &args, dest, ret)? {
ControlFlow::Continue(()) => return Ok(Some(instance)),
ControlFlow::Break(()) => return Ok(None),
}
@@ -293,7 +295,7 @@ impl<'mir, 'tcx: 'mir> CompileTimeEvalContext<'mir, 'tcx> {
self.eval_fn_call(
FnVal::Instance(instance),
(CallAbi::Rust, fn_abi),
- &[addr, align],
+ &[FnArg::Copy(addr), FnArg::Copy(align)],
/* with_caller_location = */ false,
dest,
ret,
@@ -425,52 +427,41 @@ impl<'mir, 'tcx> interpret::Machine<'mir, 'tcx> for CompileTimeInterpreter<'mir,
fn find_mir_or_eval_fn(
ecx: &mut InterpCx<'mir, 'tcx, Self>,
- instance: ty::Instance<'tcx>,
+ orig_instance: ty::Instance<'tcx>,
_abi: CallAbi,
- args: &[OpTy<'tcx>],
+ args: &[FnArg<'tcx>],
dest: &PlaceTy<'tcx>,
ret: Option<mir::BasicBlock>,
_unwind: mir::UnwindAction, // unwinding is not supported in consts
) -> InterpResult<'tcx, Option<(&'mir mir::Body<'tcx>, ty::Instance<'tcx>)>> {
- debug!("find_mir_or_eval_fn: {:?}", instance);
+ debug!("find_mir_or_eval_fn: {:?}", orig_instance);
+
+ // Replace some functions.
+ let Some(instance) = ecx.hook_special_const_fn(orig_instance, args, dest, ret)? else {
+ // Call has already been handled.
+ return Ok(None);
+ };
// Only check non-glue functions
if let ty::InstanceDef::Item(def) = instance.def {
// Execution might have wandered off into other crates, so we cannot do a stability-
- // sensitive check here. But we can at least rule out functions that are not const
- // at all.
- if !ecx.tcx.is_const_fn_raw(def) {
- // allow calling functions inside a trait marked with #[const_trait].
- if !ecx.tcx.is_const_default_method(def) {
- // We certainly do *not* want to actually call the fn
- // though, so be sure we return here.
- throw_unsup_format!("calling non-const function `{}`", instance)
- }
- }
-
- let Some(new_instance) = ecx.hook_special_const_fn(instance, args, dest, ret)? else {
- return Ok(None);
- };
-
- if new_instance != instance {
- // We call another const fn instead.
- // However, we return the *original* instance to make backtraces work out
- // (and we hope this does not confuse the FnAbi checks too much).
- return Ok(Self::find_mir_or_eval_fn(
- ecx,
- new_instance,
- _abi,
- args,
- dest,
- ret,
- _unwind,
- )?
- .map(|(body, _instance)| (body, instance)));
+ // sensitive check here. But we can at least rule out functions that are not const at
+ // all. That said, we have to allow calling functions inside a trait marked with
+ // #[const_trait]. These *are* const-checked!
+ // FIXME: why does `is_const_fn_raw` not classify them as const?
+ if (!ecx.tcx.is_const_fn_raw(def) && !ecx.tcx.is_const_default_method(def))
+ || ecx.tcx.has_attr(def, sym::rustc_do_not_const_check)
+ {
+ // We certainly do *not* want to actually call the fn
+ // though, so be sure we return here.
+ throw_unsup_format!("calling non-const function `{}`", instance)
}
}
// This is a const fn. Call it.
- Ok(Some((ecx.load_mir(instance.def, None)?, instance)))
+ // In case of replacement, we return the *original* instance to make backtraces work out
+ // (and we hope this does not confuse the FnAbi checks too much).
+ Ok(Some((ecx.load_mir(instance.def, None)?, orig_instance)))
}
fn call_intrinsic(
diff --git a/compiler/rustc_const_eval/src/const_eval/mod.rs b/compiler/rustc_const_eval/src/const_eval/mod.rs
index a3064b53d..854104622 100644
--- a/compiler/rustc_const_eval/src/const_eval/mod.rs
+++ b/compiler/rustc_const_eval/src/const_eval/mod.rs
@@ -85,7 +85,7 @@ pub(crate) fn eval_to_valtree<'tcx>(
}
#[instrument(skip(tcx), level = "debug")]
-pub(crate) fn try_destructure_mir_constant_for_diagnostics<'tcx>(
+pub fn try_destructure_mir_constant_for_diagnostics<'tcx>(
tcx: TyCtxt<'tcx>,
val: ConstValue<'tcx>,
ty: Ty<'tcx>,
@@ -101,17 +101,17 @@ pub(crate) fn try_destructure_mir_constant_for_diagnostics<'tcx>(
return None;
}
ty::Adt(def, _) => {
- let variant = ecx.read_discriminant(&op).ok()?.1;
- let down = ecx.operand_downcast(&op, variant).ok()?;
+ let variant = ecx.read_discriminant(&op).ok()?;
+ let down = ecx.project_downcast(&op, variant).ok()?;
(def.variants()[variant].fields.len(), Some(variant), down)
}
- ty::Tuple(substs) => (substs.len(), None, op),
+ ty::Tuple(args) => (args.len(), None, op),
_ => bug!("cannot destructure mir constant {:?}", val),
};
let fields_iter = (0..field_count)
.map(|i| {
- let field_op = ecx.operand_field(&down, i).ok()?;
+ let field_op = ecx.project_field(&down, i).ok()?;
let val = op_to_const(&ecx, &field_op);
Some((val, field_op.layout.ty))
})
diff --git a/compiler/rustc_const_eval/src/const_eval/valtrees.rs b/compiler/rustc_const_eval/src/const_eval/valtrees.rs
index e574df276..b15a65d67 100644
--- a/compiler/rustc_const_eval/src/const_eval/valtrees.rs
+++ b/compiler/rustc_const_eval/src/const_eval/valtrees.rs
@@ -2,14 +2,15 @@ use super::eval_queries::{mk_eval_cx, op_to_const};
use super::machine::CompileTimeEvalContext;
use super::{ValTreeCreationError, ValTreeCreationResult, VALTREE_MAX_NODES};
use crate::const_eval::CanAccessStatics;
+use crate::interpret::MPlaceTy;
use crate::interpret::{
intern_const_alloc_recursive, ConstValue, ImmTy, Immediate, InternKind, MemPlaceMeta,
- MemoryKind, PlaceTy, Scalar,
+ MemoryKind, Place, Projectable, Scalar,
};
-use crate::interpret::{MPlaceTy, Value};
+use rustc_middle::ty::layout::{LayoutOf, TyAndLayout};
use rustc_middle::ty::{self, ScalarInt, Ty, TyCtxt};
use rustc_span::source_map::DUMMY_SP;
-use rustc_target::abi::{Align, FieldIdx, VariantIdx, FIRST_VARIANT};
+use rustc_target::abi::VariantIdx;
#[instrument(skip(ecx), level = "debug")]
fn branches<'tcx>(
@@ -20,15 +21,15 @@ fn branches<'tcx>(
num_nodes: &mut usize,
) -> ValTreeCreationResult<'tcx> {
let place = match variant {
- Some(variant) => ecx.mplace_downcast(&place, variant).unwrap(),
- None => *place,
+ Some(variant) => ecx.project_downcast(place, variant).unwrap(),
+ None => place.clone(),
};
let variant = variant.map(|variant| Some(ty::ValTree::Leaf(ScalarInt::from(variant.as_u32()))));
debug!(?place, ?variant);
let mut fields = Vec::with_capacity(n);
for i in 0..n {
- let field = ecx.mplace_field(&place, i).unwrap();
+ let field = ecx.project_field(&place, i).unwrap();
let valtree = const_to_valtree_inner(ecx, &field, num_nodes)?;
fields.push(Some(valtree));
}
@@ -55,13 +56,11 @@ fn slice_branches<'tcx>(
place: &MPlaceTy<'tcx>,
num_nodes: &mut usize,
) -> ValTreeCreationResult<'tcx> {
- let n = place
- .len(&ecx.tcx.tcx)
- .unwrap_or_else(|_| panic!("expected to use len of place {:?}", place));
+ let n = place.len(ecx).unwrap_or_else(|_| panic!("expected to use len of place {place:?}"));
let mut elems = Vec::with_capacity(n as usize);
for i in 0..n {
- let place_elem = ecx.mplace_index(place, i).unwrap();
+ let place_elem = ecx.project_index(place, i).unwrap();
let valtree = const_to_valtree_inner(ecx, &place_elem, num_nodes)?;
elems.push(valtree);
}
@@ -88,7 +87,7 @@ pub(crate) fn const_to_valtree_inner<'tcx>(
Ok(ty::ValTree::zst())
}
ty::Bool | ty::Int(_) | ty::Uint(_) | ty::Float(_) | ty::Char => {
- let Ok(val) = ecx.read_immediate(&place.into()) else {
+ let Ok(val) = ecx.read_immediate(place) else {
return Err(ValTreeCreationError::Other);
};
let val = val.to_scalar();
@@ -104,7 +103,7 @@ pub(crate) fn const_to_valtree_inner<'tcx>(
ty::FnPtr(_) | ty::RawPtr(_) => Err(ValTreeCreationError::NonSupportedType),
ty::Ref(_, _, _) => {
- let Ok(derefd_place)= ecx.deref_operand(&place.into()) else {
+ let Ok(derefd_place)= ecx.deref_pointer(place) else {
return Err(ValTreeCreationError::Other);
};
debug!(?derefd_place);
@@ -132,7 +131,7 @@ pub(crate) fn const_to_valtree_inner<'tcx>(
bug!("uninhabited types should have errored and never gotten converted to valtree")
}
- let Ok((_, variant)) = ecx.read_discriminant(&place.into()) else {
+ let Ok(variant) = ecx.read_discriminant(place) else {
return Err(ValTreeCreationError::Other);
};
branches(ecx, place, def.variant(variant).fields.len(), def.is_enum().then_some(variant), num_nodes)
@@ -156,52 +155,37 @@ pub(crate) fn const_to_valtree_inner<'tcx>(
}
}
-#[instrument(skip(ecx), level = "debug")]
-fn create_mplace_from_layout<'tcx>(
- ecx: &mut CompileTimeEvalContext<'tcx, 'tcx>,
- ty: Ty<'tcx>,
-) -> MPlaceTy<'tcx> {
- let tcx = ecx.tcx;
- let param_env = ecx.param_env;
- let layout = tcx.layout_of(param_env.and(ty)).unwrap();
- debug!(?layout);
-
- ecx.allocate(layout, MemoryKind::Stack).unwrap()
-}
-
-// Walks custom DSTs and gets the type of the unsized field and the number of elements
-// in the unsized field.
-fn get_info_on_unsized_field<'tcx>(
- ty: Ty<'tcx>,
+/// Valtrees don't store the `MemPlaceMeta` that all dynamically sized values have in the interpreter.
+/// This function reconstructs it.
+fn reconstruct_place_meta<'tcx>(
+ layout: TyAndLayout<'tcx>,
valtree: ty::ValTree<'tcx>,
tcx: TyCtxt<'tcx>,
-) -> (Ty<'tcx>, usize) {
+) -> MemPlaceMeta {
+ if layout.is_sized() {
+ return MemPlaceMeta::None;
+ }
+
let mut last_valtree = valtree;
+ // Traverse the type, and update `last_valtree` as we go.
let tail = tcx.struct_tail_with_normalize(
- ty,
+ layout.ty,
|ty| ty,
|| {
let branches = last_valtree.unwrap_branch();
- last_valtree = branches[branches.len() - 1];
+ last_valtree = *branches.last().unwrap();
debug!(?branches, ?last_valtree);
},
);
- let unsized_inner_ty = match tail.kind() {
- ty::Slice(t) => *t,
- ty::Str => tail,
- _ => bug!("expected Slice or Str"),
- };
-
- // Have to adjust type for ty::Str
- let unsized_inner_ty = match unsized_inner_ty.kind() {
- ty::Str => tcx.types.u8,
- _ => unsized_inner_ty,
+ // Sanity-check that we got a tail we support.
+ match tail.kind() {
+ ty::Slice(..) | ty::Str => {}
+ _ => bug!("unsized tail of a valtree must be Slice or Str"),
};
- // Get the number of elements in the unsized field
+ // Get the number of elements in the unsized field.
let num_elems = last_valtree.unwrap_branch().len();
-
- (unsized_inner_ty, num_elems)
+ MemPlaceMeta::Meta(Scalar::from_target_usize(num_elems as u64, &tcx))
}
#[instrument(skip(ecx), level = "debug", ret)]
@@ -210,41 +194,9 @@ fn create_pointee_place<'tcx>(
ty: Ty<'tcx>,
valtree: ty::ValTree<'tcx>,
) -> MPlaceTy<'tcx> {
- let tcx = ecx.tcx.tcx;
-
- if !ty.is_sized(*ecx.tcx, ty::ParamEnv::empty()) {
- // We need to create `Allocation`s for custom DSTs
-
- let (unsized_inner_ty, num_elems) = get_info_on_unsized_field(ty, valtree, tcx);
- let unsized_inner_ty = match unsized_inner_ty.kind() {
- ty::Str => tcx.types.u8,
- _ => unsized_inner_ty,
- };
- let unsized_inner_ty_size =
- tcx.layout_of(ty::ParamEnv::empty().and(unsized_inner_ty)).unwrap().layout.size();
- debug!(?unsized_inner_ty, ?unsized_inner_ty_size, ?num_elems);
-
- // for custom DSTs only the last field/element is unsized, but we need to also allocate
- // space for the other fields/elements
- let layout = tcx.layout_of(ty::ParamEnv::empty().and(ty)).unwrap();
- let size_of_sized_part = layout.layout.size();
-
- // Get the size of the memory behind the DST
- let dst_size = unsized_inner_ty_size.checked_mul(num_elems as u64, &tcx).unwrap();
-
- let size = size_of_sized_part.checked_add(dst_size, &tcx).unwrap();
- let align = Align::from_bytes(size.bytes().next_power_of_two()).unwrap();
- let ptr = ecx.allocate_ptr(size, align, MemoryKind::Stack).unwrap();
- debug!(?ptr);
-
- MPlaceTy::from_aligned_ptr_with_meta(
- ptr.into(),
- layout,
- MemPlaceMeta::Meta(Scalar::from_target_usize(num_elems as u64, &tcx)),
- )
- } else {
- create_mplace_from_layout(ecx, ty)
- }
+ let layout = ecx.layout_of(ty).unwrap();
+ let meta = reconstruct_place_meta(layout, valtree, ecx.tcx.tcx);
+ ecx.allocate_dyn(layout, MemoryKind::Stack, meta).unwrap()
}
/// Converts a `ValTree` to a `ConstValue`, which is needed after mir
@@ -282,17 +234,20 @@ pub fn valtree_to_const_value<'tcx>(
),
},
ty::Ref(_, _, _) | ty::Tuple(_) | ty::Array(_, _) | ty::Adt(..) => {
- let mut place = match ty.kind() {
+ let place = match ty.kind() {
ty::Ref(_, inner_ty, _) => {
- // Need to create a place for the pointee to fill for Refs
+ // Need to create a place for the pointee (the reference itself will be an immediate)
create_pointee_place(&mut ecx, *inner_ty, valtree)
}
- _ => create_mplace_from_layout(&mut ecx, ty),
+ _ => {
+ // Need to create a place for this valtree.
+ create_pointee_place(&mut ecx, ty, valtree)
+ }
};
debug!(?place);
- valtree_into_mplace(&mut ecx, &mut place, valtree);
- dump_place(&ecx, place.into());
+ valtree_into_mplace(&mut ecx, &place, valtree);
+ dump_place(&ecx, &place);
intern_const_alloc_recursive(&mut ecx, InternKind::Constant, &place).unwrap();
match ty.kind() {
@@ -331,7 +286,7 @@ pub fn valtree_to_const_value<'tcx>(
#[instrument(skip(ecx), level = "debug")]
fn valtree_into_mplace<'tcx>(
ecx: &mut CompileTimeEvalContext<'tcx, 'tcx>,
- place: &mut MPlaceTy<'tcx>,
+ place: &MPlaceTy<'tcx>,
valtree: ty::ValTree<'tcx>,
) {
// This will match on valtree and write the value(s) corresponding to the ValTree
@@ -347,14 +302,14 @@ fn valtree_into_mplace<'tcx>(
ty::Bool | ty::Int(_) | ty::Uint(_) | ty::Float(_) | ty::Char => {
let scalar_int = valtree.unwrap_leaf();
debug!("writing trivial valtree {:?} to place {:?}", scalar_int, place);
- ecx.write_immediate(Immediate::Scalar(scalar_int.into()), &place.into()).unwrap();
+ ecx.write_immediate(Immediate::Scalar(scalar_int.into()), place).unwrap();
}
ty::Ref(_, inner_ty, _) => {
- let mut pointee_place = create_pointee_place(ecx, *inner_ty, valtree);
+ let pointee_place = create_pointee_place(ecx, *inner_ty, valtree);
debug!(?pointee_place);
- valtree_into_mplace(ecx, &mut pointee_place, valtree);
- dump_place(ecx, pointee_place.into());
+ valtree_into_mplace(ecx, &pointee_place, valtree);
+ dump_place(ecx, &pointee_place);
intern_const_alloc_recursive(ecx, InternKind::Constant, &pointee_place).unwrap();
let imm = match inner_ty.kind() {
@@ -371,7 +326,7 @@ fn valtree_into_mplace<'tcx>(
};
debug!(?imm);
- ecx.write_immediate(imm, &place.into()).unwrap();
+ ecx.write_immediate(imm, place).unwrap();
}
ty::Adt(_, _) | ty::Tuple(_) | ty::Array(_, _) | ty::Str | ty::Slice(_) => {
let branches = valtree.unwrap_branch();
@@ -386,12 +341,12 @@ fn valtree_into_mplace<'tcx>(
debug!(?variant);
(
- place.project_downcast(ecx, variant_idx).unwrap(),
+ ecx.project_downcast(place, variant_idx).unwrap(),
&branches[1..],
Some(variant_idx),
)
}
- _ => (*place, branches, None),
+ _ => (place.clone(), branches, None),
};
debug!(?place_adjusted, ?branches);
@@ -400,70 +355,33 @@ fn valtree_into_mplace<'tcx>(
for (i, inner_valtree) in branches.iter().enumerate() {
debug!(?i, ?inner_valtree);
- let mut place_inner = match ty.kind() {
- ty::Str | ty::Slice(_) => ecx.mplace_index(&place, i as u64).unwrap(),
- _ if !ty.is_sized(*ecx.tcx, ty::ParamEnv::empty())
- && i == branches.len() - 1 =>
- {
- // Note: For custom DSTs we need to manually process the last unsized field.
- // We created a `Pointer` for the `Allocation` of the complete sized version of
- // the Adt in `create_pointee_place` and now we fill that `Allocation` with the
- // values in the ValTree. For the unsized field we have to additionally add the meta
- // data.
-
- let (unsized_inner_ty, num_elems) =
- get_info_on_unsized_field(ty, valtree, tcx);
- debug!(?unsized_inner_ty);
-
- let inner_ty = match ty.kind() {
- ty::Adt(def, substs) => {
- let i = FieldIdx::from_usize(i);
- def.variant(FIRST_VARIANT).fields[i].ty(tcx, substs)
- }
- ty::Tuple(inner_tys) => inner_tys[i],
- _ => bug!("unexpected unsized type {:?}", ty),
- };
-
- let inner_layout =
- tcx.layout_of(ty::ParamEnv::empty().and(inner_ty)).unwrap();
- debug!(?inner_layout);
-
- let offset = place_adjusted.layout.fields.offset(i);
- place
- .offset_with_meta(
- offset,
- MemPlaceMeta::Meta(Scalar::from_target_usize(
- num_elems as u64,
- &tcx,
- )),
- inner_layout,
- &tcx,
- )
- .unwrap()
+ let place_inner = match ty.kind() {
+ ty::Str | ty::Slice(_) | ty::Array(..) => {
+ ecx.project_index(place, i as u64).unwrap()
}
- _ => ecx.mplace_field(&place_adjusted, i).unwrap(),
+ _ => ecx.project_field(&place_adjusted, i).unwrap(),
};
debug!(?place_inner);
- valtree_into_mplace(ecx, &mut place_inner, *inner_valtree);
- dump_place(&ecx, place_inner.into());
+ valtree_into_mplace(ecx, &place_inner, *inner_valtree);
+ dump_place(&ecx, &place_inner);
}
debug!("dump of place_adjusted:");
- dump_place(ecx, place_adjusted.into());
+ dump_place(ecx, &place_adjusted);
if let Some(variant_idx) = variant_idx {
// don't forget filling the place with the discriminant of the enum
- ecx.write_discriminant(variant_idx, &place.into()).unwrap();
+ ecx.write_discriminant(variant_idx, place).unwrap();
}
debug!("dump of place after writing discriminant:");
- dump_place(ecx, place.into());
+ dump_place(ecx, place);
}
_ => bug!("shouldn't have created a ValTree for {:?}", ty),
}
}
-fn dump_place<'tcx>(ecx: &CompileTimeEvalContext<'tcx, 'tcx>, place: PlaceTy<'tcx>) {
- trace!("{:?}", ecx.dump_place(*place));
+fn dump_place<'tcx>(ecx: &CompileTimeEvalContext<'tcx, 'tcx>, place: &MPlaceTy<'tcx>) {
+ trace!("{:?}", ecx.dump_place(Place::Ptr(**place)));
}
diff --git a/compiler/rustc_const_eval/src/errors.rs b/compiler/rustc_const_eval/src/errors.rs
index ca38cce71..4362cae7e 100644
--- a/compiler/rustc_const_eval/src/errors.rs
+++ b/compiler/rustc_const_eval/src/errors.rs
@@ -492,7 +492,7 @@ impl<'a> ReportErrorExt for UndefinedBehaviorInfo<'a> {
InvalidMeta(InvalidMetaKind::SliceTooBig) => const_eval_invalid_meta_slice,
InvalidMeta(InvalidMetaKind::TooBig) => const_eval_invalid_meta,
UnterminatedCString(_) => const_eval_unterminated_c_string,
- PointerUseAfterFree(_) => const_eval_pointer_use_after_free,
+ PointerUseAfterFree(_, _) => const_eval_pointer_use_after_free,
PointerOutOfBounds { ptr_size: Size::ZERO, .. } => const_eval_zst_pointer_out_of_bounds,
PointerOutOfBounds { .. } => const_eval_pointer_out_of_bounds,
DanglingIntPointer(0, _) => const_eval_dangling_null_pointer,
@@ -511,8 +511,9 @@ impl<'a> ReportErrorExt for UndefinedBehaviorInfo<'a> {
InvalidUninitBytes(Some(_)) => const_eval_invalid_uninit_bytes,
DeadLocal => const_eval_dead_local,
ScalarSizeMismatch(_) => const_eval_scalar_size_mismatch,
- UninhabitedEnumVariantWritten => const_eval_uninhabited_enum_variant_written,
- Validation(e) => e.diagnostic_message(),
+ UninhabitedEnumVariantWritten(_) => const_eval_uninhabited_enum_variant_written,
+ UninhabitedEnumVariantRead(_) => const_eval_uninhabited_enum_variant_read,
+ ValidationError(e) => e.diagnostic_message(),
Custom(x) => (x.msg)(),
}
}
@@ -535,7 +536,8 @@ impl<'a> ReportErrorExt for UndefinedBehaviorInfo<'a> {
| InvalidMeta(InvalidMetaKind::TooBig)
| InvalidUninitBytes(None)
| DeadLocal
- | UninhabitedEnumVariantWritten => {}
+ | UninhabitedEnumVariantWritten(_)
+ | UninhabitedEnumVariantRead(_) => {}
BoundsCheckFailed { len, index } => {
builder.set_arg("len", len);
builder.set_arg("index", index);
@@ -543,8 +545,10 @@ impl<'a> ReportErrorExt for UndefinedBehaviorInfo<'a> {
UnterminatedCString(ptr) | InvalidFunctionPointer(ptr) | InvalidVTablePointer(ptr) => {
builder.set_arg("pointer", ptr);
}
- PointerUseAfterFree(allocation) => {
- builder.set_arg("allocation", allocation);
+ PointerUseAfterFree(alloc_id, msg) => {
+ builder
+ .set_arg("alloc_id", alloc_id)
+ .set_arg("bad_pointer_message", bad_pointer_message(msg, handler));
}
PointerOutOfBounds { alloc_id, alloc_size, ptr_offset, ptr_size, msg } => {
builder
@@ -583,13 +587,13 @@ impl<'a> ReportErrorExt for UndefinedBehaviorInfo<'a> {
InvalidUninitBytes(Some((alloc, info))) => {
builder.set_arg("alloc", alloc);
builder.set_arg("access", info.access);
- builder.set_arg("uninit", info.uninit);
+ builder.set_arg("uninit", info.bad);
}
ScalarSizeMismatch(info) => {
builder.set_arg("target_size", info.target_size);
builder.set_arg("data_size", info.data_size);
}
- Validation(e) => e.add_args(handler, builder),
+ ValidationError(e) => e.add_args(handler, builder),
Custom(custom) => {
(custom.add_args)(&mut |name, value| {
builder.set_arg(name, value);
@@ -604,73 +608,72 @@ impl<'tcx> ReportErrorExt for ValidationErrorInfo<'tcx> {
use crate::fluent_generated::*;
use rustc_middle::mir::interpret::ValidationErrorKind::*;
match self.kind {
- PtrToUninhabited { ptr_kind: PointerKind::Box, .. } => const_eval_box_to_uninhabited,
- PtrToUninhabited { ptr_kind: PointerKind::Ref, .. } => const_eval_ref_to_uninhabited,
-
- PtrToStatic { ptr_kind: PointerKind::Box } => const_eval_box_to_static,
- PtrToStatic { ptr_kind: PointerKind::Ref } => const_eval_ref_to_static,
-
- PtrToMut { ptr_kind: PointerKind::Box } => const_eval_box_to_mut,
- PtrToMut { ptr_kind: PointerKind::Ref } => const_eval_ref_to_mut,
-
- ExpectedNonPtr { .. } => const_eval_expected_non_ptr,
- MutableRefInConst => const_eval_mutable_ref_in_const,
- NullFnPtr => const_eval_null_fn_ptr,
- NeverVal => const_eval_never_val,
- NullablePtrOutOfRange { .. } => const_eval_nullable_ptr_out_of_range,
- PtrOutOfRange { .. } => const_eval_ptr_out_of_range,
- OutOfRange { .. } => const_eval_out_of_range,
- UnsafeCell => const_eval_unsafe_cell,
- UninhabitedVal { .. } => const_eval_uninhabited_val,
- InvalidEnumTag { .. } => const_eval_invalid_enum_tag,
- UninitEnumTag => const_eval_uninit_enum_tag,
- UninitStr => const_eval_uninit_str,
- Uninit { expected: ExpectedKind::Bool } => const_eval_uninit_bool,
- Uninit { expected: ExpectedKind::Reference } => const_eval_uninit_ref,
- Uninit { expected: ExpectedKind::Box } => const_eval_uninit_box,
- Uninit { expected: ExpectedKind::RawPtr } => const_eval_uninit_raw_ptr,
- Uninit { expected: ExpectedKind::InitScalar } => const_eval_uninit_init_scalar,
- Uninit { expected: ExpectedKind::Char } => const_eval_uninit_char,
- Uninit { expected: ExpectedKind::Float } => const_eval_uninit_float,
- Uninit { expected: ExpectedKind::Int } => const_eval_uninit_int,
- Uninit { expected: ExpectedKind::FnPtr } => const_eval_uninit_fn_ptr,
- UninitVal => const_eval_uninit,
- InvalidVTablePtr { .. } => const_eval_invalid_vtable_ptr,
+ PtrToUninhabited { ptr_kind: PointerKind::Box, .. } => {
+ const_eval_validation_box_to_uninhabited
+ }
+ PtrToUninhabited { ptr_kind: PointerKind::Ref, .. } => {
+ const_eval_validation_ref_to_uninhabited
+ }
+
+ PtrToStatic { ptr_kind: PointerKind::Box } => const_eval_validation_box_to_static,
+ PtrToStatic { ptr_kind: PointerKind::Ref } => const_eval_validation_ref_to_static,
+
+ PtrToMut { ptr_kind: PointerKind::Box } => const_eval_validation_box_to_mut,
+ PtrToMut { ptr_kind: PointerKind::Ref } => const_eval_validation_ref_to_mut,
+
+ PointerAsInt { .. } => const_eval_validation_pointer_as_int,
+ PartialPointer => const_eval_validation_partial_pointer,
+ MutableRefInConst => const_eval_validation_mutable_ref_in_const,
+ NullFnPtr => const_eval_validation_null_fn_ptr,
+ NeverVal => const_eval_validation_never_val,
+ NullablePtrOutOfRange { .. } => const_eval_validation_nullable_ptr_out_of_range,
+ PtrOutOfRange { .. } => const_eval_validation_ptr_out_of_range,
+ OutOfRange { .. } => const_eval_validation_out_of_range,
+ UnsafeCell => const_eval_validation_unsafe_cell,
+ UninhabitedVal { .. } => const_eval_validation_uninhabited_val,
+ InvalidEnumTag { .. } => const_eval_validation_invalid_enum_tag,
+ UninhabitedEnumVariant => const_eval_validation_uninhabited_enum_variant,
+ Uninit { .. } => const_eval_validation_uninit,
+ InvalidVTablePtr { .. } => const_eval_validation_invalid_vtable_ptr,
InvalidMetaSliceTooLarge { ptr_kind: PointerKind::Box } => {
- const_eval_invalid_box_slice_meta
+ const_eval_validation_invalid_box_slice_meta
}
InvalidMetaSliceTooLarge { ptr_kind: PointerKind::Ref } => {
- const_eval_invalid_ref_slice_meta
+ const_eval_validation_invalid_ref_slice_meta
}
- InvalidMetaTooLarge { ptr_kind: PointerKind::Box } => const_eval_invalid_box_meta,
- InvalidMetaTooLarge { ptr_kind: PointerKind::Ref } => const_eval_invalid_ref_meta,
- UnalignedPtr { ptr_kind: PointerKind::Ref, .. } => const_eval_unaligned_ref,
- UnalignedPtr { ptr_kind: PointerKind::Box, .. } => const_eval_unaligned_box,
+ InvalidMetaTooLarge { ptr_kind: PointerKind::Box } => {
+ const_eval_validation_invalid_box_meta
+ }
+ InvalidMetaTooLarge { ptr_kind: PointerKind::Ref } => {
+ const_eval_validation_invalid_ref_meta
+ }
+ UnalignedPtr { ptr_kind: PointerKind::Ref, .. } => const_eval_validation_unaligned_ref,
+ UnalignedPtr { ptr_kind: PointerKind::Box, .. } => const_eval_validation_unaligned_box,
- NullPtr { ptr_kind: PointerKind::Box } => const_eval_null_box,
- NullPtr { ptr_kind: PointerKind::Ref } => const_eval_null_ref,
+ NullPtr { ptr_kind: PointerKind::Box } => const_eval_validation_null_box,
+ NullPtr { ptr_kind: PointerKind::Ref } => const_eval_validation_null_ref,
DanglingPtrNoProvenance { ptr_kind: PointerKind::Box, .. } => {
- const_eval_dangling_box_no_provenance
+ const_eval_validation_dangling_box_no_provenance
}
DanglingPtrNoProvenance { ptr_kind: PointerKind::Ref, .. } => {
- const_eval_dangling_ref_no_provenance
+ const_eval_validation_dangling_ref_no_provenance
}
DanglingPtrOutOfBounds { ptr_kind: PointerKind::Box } => {
- const_eval_dangling_box_out_of_bounds
+ const_eval_validation_dangling_box_out_of_bounds
}
DanglingPtrOutOfBounds { ptr_kind: PointerKind::Ref } => {
- const_eval_dangling_ref_out_of_bounds
+ const_eval_validation_dangling_ref_out_of_bounds
}
DanglingPtrUseAfterFree { ptr_kind: PointerKind::Box } => {
- const_eval_dangling_box_use_after_free
+ const_eval_validation_dangling_box_use_after_free
}
DanglingPtrUseAfterFree { ptr_kind: PointerKind::Ref } => {
- const_eval_dangling_ref_use_after_free
+ const_eval_validation_dangling_ref_use_after_free
}
InvalidBool { .. } => const_eval_validation_invalid_bool,
InvalidChar { .. } => const_eval_validation_invalid_char,
- InvalidFnPtr { .. } => const_eval_invalid_fn_ptr,
+ InvalidFnPtr { .. } => const_eval_validation_invalid_fn_ptr,
}
}
@@ -678,13 +681,21 @@ impl<'tcx> ReportErrorExt for ValidationErrorInfo<'tcx> {
use crate::fluent_generated as fluent;
use rustc_middle::mir::interpret::ValidationErrorKind::*;
+ if let PointerAsInt { .. } | PartialPointer = self.kind {
+ err.help(fluent::const_eval_ptr_as_bytes_1);
+ err.help(fluent::const_eval_ptr_as_bytes_2);
+ }
+
let message = if let Some(path) = self.path {
handler.eagerly_translate_to_string(
- fluent::const_eval_invalid_value_with_path,
+ fluent::const_eval_validation_front_matter_invalid_value_with_path,
[("path".into(), DiagnosticArgValue::Str(path.into()))].iter().map(|(a, b)| (a, b)),
)
} else {
- handler.eagerly_translate_to_string(fluent::const_eval_invalid_value, [].into_iter())
+ handler.eagerly_translate_to_string(
+ fluent::const_eval_validation_front_matter_invalid_value,
+ [].into_iter(),
+ )
};
err.set_arg("front_matter", message);
@@ -724,8 +735,24 @@ impl<'tcx> ReportErrorExt for ValidationErrorInfo<'tcx> {
PtrToUninhabited { ty, .. } | UninhabitedVal { ty } => {
err.set_arg("ty", ty);
}
- ExpectedNonPtr { value }
- | InvalidEnumTag { value }
+ PointerAsInt { expected } | Uninit { expected } => {
+ let msg = match expected {
+ ExpectedKind::Reference => fluent::const_eval_validation_expected_ref,
+ ExpectedKind::Box => fluent::const_eval_validation_expected_box,
+ ExpectedKind::RawPtr => fluent::const_eval_validation_expected_raw_ptr,
+ ExpectedKind::InitScalar => fluent::const_eval_validation_expected_init_scalar,
+ ExpectedKind::Bool => fluent::const_eval_validation_expected_bool,
+ ExpectedKind::Char => fluent::const_eval_validation_expected_char,
+ ExpectedKind::Float => fluent::const_eval_validation_expected_float,
+ ExpectedKind::Int => fluent::const_eval_validation_expected_int,
+ ExpectedKind::FnPtr => fluent::const_eval_validation_expected_fn_ptr,
+ ExpectedKind::EnumTag => fluent::const_eval_validation_expected_enum_tag,
+ ExpectedKind::Str => fluent::const_eval_validation_expected_str,
+ };
+ let msg = handler.eagerly_translate_to_string(msg, [].into_iter());
+ err.set_arg("expected", msg);
+ }
+ InvalidEnumTag { value }
| InvalidVTablePtr { value }
| InvalidBool { value }
| InvalidChar { value }
@@ -753,14 +780,12 @@ impl<'tcx> ReportErrorExt for ValidationErrorInfo<'tcx> {
| NullFnPtr
| NeverVal
| UnsafeCell
- | UninitEnumTag
- | UninitStr
- | Uninit { .. }
- | UninitVal
| InvalidMetaSliceTooLarge { .. }
| InvalidMetaTooLarge { .. }
| DanglingPtrUseAfterFree { .. }
- | DanglingPtrOutOfBounds { .. } => {}
+ | DanglingPtrOutOfBounds { .. }
+ | UninhabitedEnumVariant
+ | PartialPointer => {}
}
}
}
@@ -770,9 +795,9 @@ impl ReportErrorExt for UnsupportedOpInfo {
use crate::fluent_generated::*;
match self {
UnsupportedOpInfo::Unsupported(s) => s.clone().into(),
- UnsupportedOpInfo::PartialPointerOverwrite(_) => const_eval_partial_pointer_overwrite,
- UnsupportedOpInfo::PartialPointerCopy(_) => const_eval_partial_pointer_copy,
- UnsupportedOpInfo::ReadPointerAsBytes => const_eval_read_pointer_as_bytes,
+ UnsupportedOpInfo::OverwritePartialPointer(_) => const_eval_partial_pointer_overwrite,
+ UnsupportedOpInfo::ReadPartialPointer(_) => const_eval_partial_pointer_copy,
+ UnsupportedOpInfo::ReadPointerAsInt(_) => const_eval_read_pointer_as_int,
UnsupportedOpInfo::ThreadLocalStatic(_) => const_eval_thread_local_static,
UnsupportedOpInfo::ReadExternStatic(_) => const_eval_read_extern_static,
}
@@ -781,13 +806,16 @@ impl ReportErrorExt for UnsupportedOpInfo {
use crate::fluent_generated::*;
use UnsupportedOpInfo::*;
- if let ReadPointerAsBytes | PartialPointerOverwrite(_) | PartialPointerCopy(_) = self {
+ if let ReadPointerAsInt(_) | OverwritePartialPointer(_) | ReadPartialPointer(_) = self {
builder.help(const_eval_ptr_as_bytes_1);
builder.help(const_eval_ptr_as_bytes_2);
}
match self {
- Unsupported(_) | ReadPointerAsBytes => {}
- PartialPointerOverwrite(ptr) | PartialPointerCopy(ptr) => {
+ // `ReadPointerAsInt(Some(info))` is never printed anyway, it only serves as an error to
+ // be further processed by validity checking which then turns it into something nice to
+ // print. So it's not worth the effort of having diagnostics that can print the `info`.
+ Unsupported(_) | ReadPointerAsInt(_) => {}
+ OverwritePartialPointer(ptr) | ReadPartialPointer(ptr) => {
builder.set_arg("ptr", ptr);
}
ThreadLocalStatic(did) | ReadExternStatic(did) => {
@@ -834,8 +862,9 @@ impl<'tcx> ReportErrorExt for InvalidProgramInfo<'tcx> {
InvalidProgramInfo::FnAbiAdjustForForeignAbi(_) => {
rustc_middle::error::middle_adjust_for_foreign_abi_error
}
- InvalidProgramInfo::SizeOfUnsizedType(_) => const_eval_size_of_unsized,
- InvalidProgramInfo::UninitUnsizedLocal => const_eval_uninit_unsized_local,
+ InvalidProgramInfo::ConstPropNonsense => {
+ panic!("We had const-prop nonsense, this should never be printed")
+ }
}
}
fn add_args<G: EmissionGuarantee>(
@@ -846,7 +875,7 @@ impl<'tcx> ReportErrorExt for InvalidProgramInfo<'tcx> {
match self {
InvalidProgramInfo::TooGeneric
| InvalidProgramInfo::AlreadyReported(_)
- | InvalidProgramInfo::UninitUnsizedLocal => {}
+ | InvalidProgramInfo::ConstPropNonsense => {}
InvalidProgramInfo::Layout(e) => {
let diag: DiagnosticBuilder<'_, ()> = e.into_diagnostic().into_diagnostic(handler);
for (name, val) in diag.args() {
@@ -860,9 +889,6 @@ impl<'tcx> ReportErrorExt for InvalidProgramInfo<'tcx> {
builder.set_arg("arch", arch);
builder.set_arg("abi", abi.name());
}
- InvalidProgramInfo::SizeOfUnsizedType(ty) => {
- builder.set_arg("ty", ty);
- }
}
}
}
diff --git a/compiler/rustc_const_eval/src/interpret/cast.rs b/compiler/rustc_const_eval/src/interpret/cast.rs
index 83a072d6f..98e853dc4 100644
--- a/compiler/rustc_const_eval/src/interpret/cast.rs
+++ b/compiler/rustc_const_eval/src/interpret/cast.rs
@@ -56,7 +56,7 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
}
CastKind::FnPtrToPtr | CastKind::PtrToPtr => {
- let src = self.read_immediate(&src)?;
+ let src = self.read_immediate(src)?;
let res = self.ptr_to_ptr(&src, cast_ty)?;
self.write_immediate(res, dest)?;
}
@@ -75,12 +75,12 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
// The src operand does not matter, just its type
match *src.layout.ty.kind() {
- ty::FnDef(def_id, substs) => {
+ ty::FnDef(def_id, args) => {
let instance = ty::Instance::resolve_for_fn_ptr(
*self.tcx,
self.param_env,
def_id,
- substs,
+ args,
)
.ok_or_else(|| err_inval!(TooGeneric))?;
@@ -108,11 +108,11 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
// The src operand does not matter, just its type
match *src.layout.ty.kind() {
- ty::Closure(def_id, substs) => {
+ ty::Closure(def_id, args) => {
let instance = ty::Instance::resolve_closure(
*self.tcx,
def_id,
- substs,
+ args,
ty::ClosureKind::FnOnce,
)
.ok_or_else(|| err_inval!(TooGeneric))?;
@@ -420,8 +420,8 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
if cast_ty_field.is_zst() {
continue;
}
- let src_field = self.operand_field(src, i)?;
- let dst_field = self.place_field(dest, i)?;
+ let src_field = self.project_field(src, i)?;
+ let dst_field = self.project_field(dest, i)?;
if src_field.layout.ty == cast_ty_field.ty {
self.copy_op(&src_field, &dst_field, /*allow_transmute*/ false)?;
} else {
diff --git a/compiler/rustc_const_eval/src/interpret/discriminant.rs b/compiler/rustc_const_eval/src/interpret/discriminant.rs
index 015a9beab..6c35fb01a 100644
--- a/compiler/rustc_const_eval/src/interpret/discriminant.rs
+++ b/compiler/rustc_const_eval/src/interpret/discriminant.rs
@@ -1,11 +1,11 @@
//! Functions for reading and writing discriminants of multi-variant layouts (enums and generators).
-use rustc_middle::ty::layout::{LayoutOf, PrimitiveExt};
+use rustc_middle::ty::layout::{LayoutOf, PrimitiveExt, TyAndLayout};
use rustc_middle::{mir, ty};
use rustc_target::abi::{self, TagEncoding};
use rustc_target::abi::{VariantIdx, Variants};
-use super::{ImmTy, InterpCx, InterpResult, Machine, OpTy, PlaceTy, Scalar};
+use super::{ImmTy, InterpCx, InterpResult, Machine, Readable, Scalar, Writeable};
impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
/// Writes the discriminant of the given variant.
@@ -13,7 +13,7 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
pub fn write_discriminant(
&mut self,
variant_index: VariantIdx,
- dest: &PlaceTy<'tcx, M::Provenance>,
+ dest: &impl Writeable<'tcx, M::Provenance>,
) -> InterpResult<'tcx> {
// Layout computation excludes uninhabited variants from consideration
// therefore there's no way to represent those variants in the given layout.
@@ -21,11 +21,11 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
// discriminant, so we cannot do anything here.
// When evaluating we will always error before even getting here, but ConstProp 'executes'
// dead code, so we cannot ICE here.
- if dest.layout.for_variant(self, variant_index).abi.is_uninhabited() {
- throw_ub!(UninhabitedEnumVariantWritten)
+ if dest.layout().for_variant(self, variant_index).abi.is_uninhabited() {
+ throw_ub!(UninhabitedEnumVariantWritten(variant_index))
}
- match dest.layout.variants {
+ match dest.layout().variants {
abi::Variants::Single { index } => {
assert_eq!(index, variant_index);
}
@@ -38,8 +38,12 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
// No need to validate that the discriminant here because the
// `TyAndLayout::for_variant()` call earlier already checks the variant is valid.
- let discr_val =
- dest.layout.ty.discriminant_for_variant(*self.tcx, variant_index).unwrap().val;
+ let discr_val = dest
+ .layout()
+ .ty
+ .discriminant_for_variant(*self.tcx, variant_index)
+ .unwrap()
+ .val;
// raw discriminants for enums are isize or bigger during
// their computation, but the in-memory tag is the smallest possible
@@ -47,7 +51,7 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
let size = tag_layout.size(self);
let tag_val = size.truncate(discr_val);
- let tag_dest = self.place_field(dest, tag_field)?;
+ let tag_dest = self.project_field(dest, tag_field)?;
self.write_scalar(Scalar::from_uint(tag_val, size), &tag_dest)?;
}
abi::Variants::Multiple {
@@ -78,7 +82,7 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
&niche_start_val,
)?;
// Write result.
- let niche_dest = self.place_field(dest, tag_field)?;
+ let niche_dest = self.project_field(dest, tag_field)?;
self.write_immediate(*tag_val, &niche_dest)?;
}
}
@@ -92,11 +96,12 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
#[instrument(skip(self), level = "trace")]
pub fn read_discriminant(
&self,
- op: &OpTy<'tcx, M::Provenance>,
- ) -> InterpResult<'tcx, (Scalar<M::Provenance>, VariantIdx)> {
- trace!("read_discriminant_value {:#?}", op.layout);
+ op: &impl Readable<'tcx, M::Provenance>,
+ ) -> InterpResult<'tcx, VariantIdx> {
+ let ty = op.layout().ty;
+ trace!("read_discriminant_value {:#?}", op.layout());
// Get type and layout of the discriminant.
- let discr_layout = self.layout_of(op.layout.ty.discriminant_ty(*self.tcx))?;
+ let discr_layout = self.layout_of(ty.discriminant_ty(*self.tcx))?;
trace!("discriminant type: {:?}", discr_layout.ty);
// We use "discriminant" to refer to the value associated with a particular enum variant.
@@ -104,21 +109,23 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
// declared list of variants -- they can differ with explicitly assigned discriminants.
// We use "tag" to refer to how the discriminant is encoded in memory, which can be either
// straight-forward (`TagEncoding::Direct`) or with a niche (`TagEncoding::Niche`).
- let (tag_scalar_layout, tag_encoding, tag_field) = match op.layout.variants {
+ let (tag_scalar_layout, tag_encoding, tag_field) = match op.layout().variants {
Variants::Single { index } => {
- let discr = match op.layout.ty.discriminant_for_variant(*self.tcx, index) {
- Some(discr) => {
- // This type actually has discriminants.
- assert_eq!(discr.ty, discr_layout.ty);
- Scalar::from_uint(discr.val, discr_layout.size)
+ // Do some extra checks on enums.
+ if ty.is_enum() {
+ // Hilariously, `Single` is used even for 0-variant enums.
+ // (See https://github.com/rust-lang/rust/issues/89765).
+ if matches!(ty.kind(), ty::Adt(def, ..) if def.variants().is_empty()) {
+ throw_ub!(UninhabitedEnumVariantRead(index))
}
- None => {
- // On a type without actual discriminants, variant is 0.
- assert_eq!(index.as_u32(), 0);
- Scalar::from_uint(index.as_u32(), discr_layout.size)
+ // For consisteny with `write_discriminant`, and to make sure that
+ // `project_downcast` cannot fail due to strange layouts, we declare immediate UB
+ // for uninhabited variants.
+ if op.layout().for_variant(self, index).abi.is_uninhabited() {
+ throw_ub!(UninhabitedEnumVariantRead(index))
}
- };
- return Ok((discr, index));
+ }
+ return Ok(index);
}
Variants::Multiple { tag, ref tag_encoding, tag_field, .. } => {
(tag, tag_encoding, tag_field)
@@ -138,13 +145,13 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
let tag_layout = self.layout_of(tag_scalar_layout.primitive().to_int_ty(*self.tcx))?;
// Read tag and sanity-check `tag_layout`.
- let tag_val = self.read_immediate(&self.operand_field(op, tag_field)?)?;
+ let tag_val = self.read_immediate(&self.project_field(op, tag_field)?)?;
assert_eq!(tag_layout.size, tag_val.layout.size);
assert_eq!(tag_layout.abi.is_signed(), tag_val.layout.abi.is_signed());
trace!("tag value: {}", tag_val);
// Figure out which discriminant and variant this corresponds to.
- Ok(match *tag_encoding {
+ let index = match *tag_encoding {
TagEncoding::Direct => {
let scalar = tag_val.to_scalar();
// Generate a specific error if `tag_val` is not an integer.
@@ -160,21 +167,19 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
self.cast_from_int_like(scalar, tag_val.layout, discr_layout.ty).unwrap();
let discr_bits = discr_val.assert_bits(discr_layout.size);
// Convert discriminant to variant index, and catch invalid discriminants.
- let index = match *op.layout.ty.kind() {
+ let index = match *ty.kind() {
ty::Adt(adt, _) => {
adt.discriminants(*self.tcx).find(|(_, var)| var.val == discr_bits)
}
- ty::Generator(def_id, substs, _) => {
- let substs = substs.as_generator();
- substs
- .discriminants(def_id, *self.tcx)
- .find(|(_, var)| var.val == discr_bits)
+ ty::Generator(def_id, args, _) => {
+ let args = args.as_generator();
+ args.discriminants(def_id, *self.tcx).find(|(_, var)| var.val == discr_bits)
}
_ => span_bug!(self.cur_span(), "tagged layout for non-adt non-generator"),
}
.ok_or_else(|| err_ub!(InvalidTag(Scalar::from_uint(tag_bits, tag_layout.size))))?;
// Return the cast value, and the index.
- (discr_val, index.0)
+ index.0
}
TagEncoding::Niche { untagged_variant, ref niche_variants, niche_start } => {
let tag_val = tag_val.to_scalar();
@@ -216,12 +221,8 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
.checked_add(variant_index_relative)
.expect("overflow computing absolute variant idx"),
);
- let variants = op
- .layout
- .ty
- .ty_adt_def()
- .expect("tagged layout for non adt")
- .variants();
+ let variants =
+ ty.ty_adt_def().expect("tagged layout for non adt").variants();
assert!(variant_index < variants.next_index());
variant_index
} else {
@@ -232,7 +233,32 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
// Compute the size of the scalar we need to return.
// No need to cast, because the variant index directly serves as discriminant and is
// encoded in the tag.
- (Scalar::from_uint(variant.as_u32(), discr_layout.size), variant)
+ variant
+ }
+ };
+ // For consisteny with `write_discriminant`, and to make sure that `project_downcast` cannot fail due to strange layouts, we declare immediate UB for uninhabited variants.
+ if op.layout().for_variant(self, index).abi.is_uninhabited() {
+ throw_ub!(UninhabitedEnumVariantRead(index))
+ }
+ Ok(index)
+ }
+
+ pub fn discriminant_for_variant(
+ &self,
+ layout: TyAndLayout<'tcx>,
+ variant: VariantIdx,
+ ) -> InterpResult<'tcx, Scalar<M::Provenance>> {
+ let discr_layout = self.layout_of(layout.ty.discriminant_ty(*self.tcx))?;
+ Ok(match layout.ty.discriminant_for_variant(*self.tcx, variant) {
+ Some(discr) => {
+ // This type actually has discriminants.
+ assert_eq!(discr.ty, discr_layout.ty);
+ Scalar::from_uint(discr.val, discr_layout.size)
+ }
+ None => {
+ // On a type without actual discriminants, variant is 0.
+ assert_eq!(variant.as_u32(), 0);
+ Scalar::from_uint(variant.as_u32(), discr_layout.size)
}
})
}
diff --git a/compiler/rustc_const_eval/src/interpret/eval_context.rs b/compiler/rustc_const_eval/src/interpret/eval_context.rs
index 36606ff69..3ac6f07e8 100644
--- a/compiler/rustc_const_eval/src/interpret/eval_context.rs
+++ b/compiler/rustc_const_eval/src/interpret/eval_context.rs
@@ -13,7 +13,7 @@ use rustc_middle::ty::layout::{
self, FnAbiError, FnAbiOfHelpers, FnAbiRequest, LayoutError, LayoutOf, LayoutOfHelpers,
TyAndLayout,
};
-use rustc_middle::ty::{self, subst::SubstsRef, ParamEnv, Ty, TyCtxt, TypeFoldable};
+use rustc_middle::ty::{self, GenericArgsRef, ParamEnv, Ty, TyCtxt, TypeFoldable};
use rustc_mir_dataflow::storage::always_storage_live_locals;
use rustc_session::Limit;
use rustc_span::Span;
@@ -91,7 +91,7 @@ pub struct Frame<'mir, 'tcx, Prov: Provenance = AllocId, Extra = ()> {
/// The MIR for the function called on this frame.
pub body: &'mir mir::Body<'tcx>,
- /// The def_id and substs of the current function.
+ /// The def_id and args of the current function.
pub instance: ty::Instance<'tcx>,
/// Extra data for the machine.
@@ -529,16 +529,16 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
.map_err(|_| err_inval!(TooGeneric))
}
- /// The `substs` are assumed to already be in our interpreter "universe" (param_env).
+ /// The `args` are assumed to already be in our interpreter "universe" (param_env).
pub(super) fn resolve(
&self,
def: DefId,
- substs: SubstsRef<'tcx>,
+ args: GenericArgsRef<'tcx>,
) -> InterpResult<'tcx, ty::Instance<'tcx>> {
- trace!("resolve: {:?}, {:#?}", def, substs);
+ trace!("resolve: {:?}, {:#?}", def, args);
trace!("param_env: {:#?}", self.param_env);
- trace!("substs: {:#?}", substs);
- match ty::Instance::resolve(*self.tcx, self.param_env, def, substs) {
+ trace!("args: {:#?}", args);
+ match ty::Instance::resolve(*self.tcx, self.param_env, def, args) {
Ok(Some(instance)) => Ok(instance),
Ok(None) => throw_inval!(TooGeneric),
@@ -604,7 +604,9 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
// the last field). Can't have foreign types here, how would we
// adjust alignment and size for them?
let field = layout.field(self, layout.fields.count() - 1);
- let Some((unsized_size, mut unsized_align)) = self.size_and_align_of(metadata, &field)? else {
+ let Some((unsized_size, mut unsized_align)) =
+ self.size_and_align_of(metadata, &field)?
+ else {
// A field with an extern type. We don't know the actual dynamic size
// or the alignment.
return Ok(None);
@@ -682,11 +684,7 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
return_to_block: StackPopCleanup,
) -> InterpResult<'tcx> {
trace!("body: {:#?}", body);
- // Clobber previous return place contents, nobody is supposed to be able to see them any more
- // This also checks dereferenceable, but not align. We rely on all constructed places being
- // sufficiently aligned (in particular we rely on `deref_operand` checking alignment).
- self.write_uninit(return_place)?;
- // first push a stack frame so we have access to the local substs
+ // First push a stack frame so we have access to the local args
let pre_frame = Frame {
body,
loc: Right(body.span), // Span used for errors caused during preamble.
@@ -805,6 +803,8 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
throw_ub_custom!(fluent::const_eval_unwind_past_top);
}
+ M::before_stack_pop(self, self.frame())?;
+
// Copy return value. Must of course happen *before* we deallocate the locals.
let copy_ret_result = if !unwinding {
let op = self
@@ -958,7 +958,6 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
} else {
self.param_env
};
- let param_env = param_env.with_const();
let val = self.ctfe_query(span, |tcx| tcx.eval_to_allocation_raw(param_env.and(gid)))?;
self.raw_const_to_mplace(val)
}
@@ -1014,9 +1013,12 @@ impl<'a, 'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> std::fmt::Debug
{
fn fmt(&self, fmt: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
match self.place {
- Place::Local { frame, local } => {
+ Place::Local { frame, local, offset } => {
let mut allocs = Vec::new();
- write!(fmt, "{:?}", local)?;
+ write!(fmt, "{local:?}")?;
+ if let Some(offset) = offset {
+ write!(fmt, "+{:#x}", offset.bytes())?;
+ }
if frame != self.ecx.frame_idx() {
write!(fmt, " ({} frames up)", self.ecx.frame_idx() - frame)?;
}
@@ -1032,7 +1034,7 @@ impl<'a, 'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> std::fmt::Debug
fmt,
" by {} ref {:?}:",
match mplace.meta {
- MemPlaceMeta::Meta(meta) => format!(" meta({:?})", meta),
+ MemPlaceMeta::Meta(meta) => format!(" meta({meta:?})"),
MemPlaceMeta::None => String::new(),
},
mplace.ptr,
@@ -1040,13 +1042,13 @@ impl<'a, 'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> std::fmt::Debug
allocs.extend(mplace.ptr.provenance.map(Provenance::get_alloc_id));
}
LocalValue::Live(Operand::Immediate(Immediate::Scalar(val))) => {
- write!(fmt, " {:?}", val)?;
+ write!(fmt, " {val:?}")?;
if let Scalar::Ptr(ptr, _size) = val {
allocs.push(ptr.provenance.get_alloc_id());
}
}
LocalValue::Live(Operand::Immediate(Immediate::ScalarPair(val1, val2))) => {
- write!(fmt, " ({:?}, {:?})", val1, val2)?;
+ write!(fmt, " ({val1:?}, {val2:?})")?;
if let Scalar::Ptr(ptr, _size) = val1 {
allocs.push(ptr.provenance.get_alloc_id());
}
@@ -1062,7 +1064,7 @@ impl<'a, 'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> std::fmt::Debug
Some(alloc_id) => {
write!(fmt, "by ref {:?}: {:?}", mplace.ptr, self.ecx.dump_alloc(alloc_id))
}
- ptr => write!(fmt, " integral by ref: {:?}", ptr),
+ ptr => write!(fmt, " integral by ref: {ptr:?}"),
},
}
}
diff --git a/compiler/rustc_const_eval/src/interpret/intern.rs b/compiler/rustc_const_eval/src/interpret/intern.rs
index 7b11ad330..910c3ca5d 100644
--- a/compiler/rustc_const_eval/src/interpret/intern.rs
+++ b/compiler/rustc_const_eval/src/interpret/intern.rs
@@ -30,7 +30,7 @@ use super::{
use crate::const_eval;
use crate::errors::{DanglingPtrInFinal, UnsupportedUntypedPointer};
-pub trait CompileTimeMachine<'mir, 'tcx, T> = Machine<
+pub trait CompileTimeMachine<'mir, 'tcx: 'mir, T> = Machine<
'mir,
'tcx,
MemoryKind = T,
@@ -164,82 +164,13 @@ impl<'rt, 'mir, 'tcx: 'mir, M: CompileTimeMachine<'mir, 'tcx, const_eval::Memory
&self.ecx
}
- fn visit_aggregate(
- &mut self,
- mplace: &MPlaceTy<'tcx>,
- fields: impl Iterator<Item = InterpResult<'tcx, Self::V>>,
- ) -> InterpResult<'tcx> {
- // We want to walk the aggregate to look for references to intern. While doing that we
- // also need to take special care of interior mutability.
- //
- // As an optimization, however, if the allocation does not contain any references: we don't
- // need to do the walk. It can be costly for big arrays for example (e.g. issue #93215).
- let is_walk_needed = |mplace: &MPlaceTy<'tcx>| -> InterpResult<'tcx, bool> {
- // ZSTs cannot contain pointers, we can avoid the interning walk.
- if mplace.layout.is_zst() {
- return Ok(false);
- }
-
- // Now, check whether this allocation could contain references.
- //
- // Note, this check may sometimes not be cheap, so we only do it when the walk we'd like
- // to avoid could be expensive: on the potentially larger types, arrays and slices,
- // rather than on all aggregates unconditionally.
- if matches!(mplace.layout.ty.kind(), ty::Array(..) | ty::Slice(..)) {
- let Some((size, align)) = self.ecx.size_and_align_of_mplace(&mplace)? else {
- // We do the walk if we can't determine the size of the mplace: we may be
- // dealing with extern types here in the future.
- return Ok(true);
- };
-
- // If there is no provenance in this allocation, it does not contain references
- // that point to another allocation, and we can avoid the interning walk.
- if let Some(alloc) = self.ecx.get_ptr_alloc(mplace.ptr, size, align)? {
- if !alloc.has_provenance() {
- return Ok(false);
- }
- } else {
- // We're encountering a ZST here, and can avoid the walk as well.
- return Ok(false);
- }
- }
-
- // In the general case, we do the walk.
- Ok(true)
- };
-
- // If this allocation contains no references to intern, we avoid the potentially costly
- // walk.
- //
- // We can do this before the checks for interior mutability below, because only references
- // are relevant in that situation, and we're checking if there are any here.
- if !is_walk_needed(mplace)? {
- return Ok(());
- }
-
- if let Some(def) = mplace.layout.ty.ty_adt_def() {
- if def.is_unsafe_cell() {
- // We are crossing over an `UnsafeCell`, we can mutate again. This means that
- // References we encounter inside here are interned as pointing to mutable
- // allocations.
- // Remember the `old` value to handle nested `UnsafeCell`.
- let old = std::mem::replace(&mut self.inside_unsafe_cell, true);
- let walked = self.walk_aggregate(mplace, fields);
- self.inside_unsafe_cell = old;
- return walked;
- }
- }
-
- self.walk_aggregate(mplace, fields)
- }
-
fn visit_value(&mut self, mplace: &MPlaceTy<'tcx>) -> InterpResult<'tcx> {
// Handle Reference types, as these are the only types with provenance supported by const eval.
// Raw pointers (and boxes) are handled by the `leftover_allocations` logic.
let tcx = self.ecx.tcx;
let ty = mplace.layout.ty;
if let ty::Ref(_, referenced_ty, ref_mutability) = *ty.kind() {
- let value = self.ecx.read_immediate(&mplace.into())?;
+ let value = self.ecx.read_immediate(mplace)?;
let mplace = self.ecx.ref_to_mplace(&value)?;
assert_eq!(mplace.layout.ty, referenced_ty);
// Handle trait object vtables.
@@ -315,7 +246,63 @@ impl<'rt, 'mir, 'tcx: 'mir, M: CompileTimeMachine<'mir, 'tcx, const_eval::Memory
}
Ok(())
} else {
- // Not a reference -- proceed recursively.
+ // Not a reference. Check if we want to recurse.
+ let is_walk_needed = |mplace: &MPlaceTy<'tcx>| -> InterpResult<'tcx, bool> {
+ // ZSTs cannot contain pointers, we can avoid the interning walk.
+ if mplace.layout.is_zst() {
+ return Ok(false);
+ }
+
+ // Now, check whether this allocation could contain references.
+ //
+ // Note, this check may sometimes not be cheap, so we only do it when the walk we'd like
+ // to avoid could be expensive: on the potentially larger types, arrays and slices,
+ // rather than on all aggregates unconditionally.
+ if matches!(mplace.layout.ty.kind(), ty::Array(..) | ty::Slice(..)) {
+ let Some((size, align)) = self.ecx.size_and_align_of_mplace(&mplace)? else {
+ // We do the walk if we can't determine the size of the mplace: we may be
+ // dealing with extern types here in the future.
+ return Ok(true);
+ };
+
+ // If there is no provenance in this allocation, it does not contain references
+ // that point to another allocation, and we can avoid the interning walk.
+ if let Some(alloc) = self.ecx.get_ptr_alloc(mplace.ptr, size, align)? {
+ if !alloc.has_provenance() {
+ return Ok(false);
+ }
+ } else {
+ // We're encountering a ZST here, and can avoid the walk as well.
+ return Ok(false);
+ }
+ }
+
+ // In the general case, we do the walk.
+ Ok(true)
+ };
+
+ // If this allocation contains no references to intern, we avoid the potentially costly
+ // walk.
+ //
+ // We can do this before the checks for interior mutability below, because only references
+ // are relevant in that situation, and we're checking if there are any here.
+ if !is_walk_needed(mplace)? {
+ return Ok(());
+ }
+
+ if let Some(def) = mplace.layout.ty.ty_adt_def() {
+ if def.is_unsafe_cell() {
+ // We are crossing over an `UnsafeCell`, we can mutate again. This means that
+ // References we encounter inside here are interned as pointing to mutable
+ // allocations.
+ // Remember the `old` value to handle nested `UnsafeCell`.
+ let old = std::mem::replace(&mut self.inside_unsafe_cell, true);
+ let walked = self.walk_value(mplace);
+ self.inside_unsafe_cell = old;
+ return walked;
+ }
+ }
+
self.walk_value(mplace)
}
}
@@ -371,7 +358,7 @@ pub fn intern_const_alloc_recursive<
Some(ret.layout.ty),
);
- ref_tracking.track((*ret, base_intern_mode), || ());
+ ref_tracking.track((ret.clone(), base_intern_mode), || ());
while let Some(((mplace, mode), _)) = ref_tracking.todo.pop() {
let res = InternVisitor {
@@ -477,7 +464,7 @@ impl<'mir, 'tcx: 'mir, M: super::intern::CompileTimeMachine<'mir, 'tcx, !>>
) -> InterpResult<'tcx, ()>,
) -> InterpResult<'tcx, ConstAllocation<'tcx>> {
let dest = self.allocate(layout, MemoryKind::Stack)?;
- f(self, &dest.into())?;
+ f(self, &dest.clone().into())?;
let mut alloc = self.memory.alloc_map.remove(&dest.ptr.provenance.unwrap()).unwrap().1;
alloc.mutability = Mutability::Not;
Ok(self.tcx.mk_const_alloc(alloc))
diff --git a/compiler/rustc_const_eval/src/interpret/intrinsics.rs b/compiler/rustc_const_eval/src/interpret/intrinsics.rs
index ed64a7655..f22cd919c 100644
--- a/compiler/rustc_const_eval/src/interpret/intrinsics.rs
+++ b/compiler/rustc_const_eval/src/interpret/intrinsics.rs
@@ -1,4 +1,4 @@
-//! Intrinsics and other functions that the miri engine executes without
+//! Intrinsics and other functions that the interpreter executes without
//! looking at their MIR. Intrinsics/functions supported here are shared by CTFE
//! and miri.
@@ -12,7 +12,7 @@ use rustc_middle::mir::{
};
use rustc_middle::ty;
use rustc_middle::ty::layout::{LayoutOf as _, ValidityRequirement};
-use rustc_middle::ty::subst::SubstsRef;
+use rustc_middle::ty::GenericArgsRef;
use rustc_middle::ty::{Ty, TyCtxt};
use rustc_span::symbol::{sym, Symbol};
use rustc_target::abi::{Abi, Align, Primitive, Size};
@@ -56,9 +56,9 @@ pub(crate) fn eval_nullary_intrinsic<'tcx>(
tcx: TyCtxt<'tcx>,
param_env: ty::ParamEnv<'tcx>,
def_id: DefId,
- substs: SubstsRef<'tcx>,
+ args: GenericArgsRef<'tcx>,
) -> InterpResult<'tcx, ConstValue<'tcx>> {
- let tp_ty = substs.type_at(0);
+ let tp_ty = args.type_at(0);
let name = tcx.item_name(def_id);
Ok(match name {
sym::type_name => {
@@ -123,7 +123,7 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
dest: &PlaceTy<'tcx, M::Provenance>,
ret: Option<mir::BasicBlock>,
) -> InterpResult<'tcx, bool> {
- let substs = instance.substs;
+ let instance_args = instance.args;
let intrinsic_name = self.tcx.item_name(instance.def_id());
// First handle intrinsics without return place.
@@ -144,7 +144,7 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
}
sym::min_align_of_val | sym::size_of_val => {
- // Avoid `deref_operand` -- this is not a deref, the ptr does not have to be
+ // Avoid `deref_pointer` -- this is not a deref, the ptr does not have to be
// dereferenceable!
let place = self.ref_to_mplace(&self.read_immediate(&args[0])?)?;
let (size, align) = self
@@ -187,7 +187,7 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
| sym::ctlz_nonzero
| sym::bswap
| sym::bitreverse => {
- let ty = substs.type_at(0);
+ let ty = instance_args.type_at(0);
let layout_of = self.layout_of(ty)?;
let val = self.read_scalar(&args[0])?;
let bits = val.to_bits(layout_of.size)?;
@@ -225,9 +225,10 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
self.write_scalar(val, dest)?;
}
sym::discriminant_value => {
- let place = self.deref_operand(&args[0])?;
- let discr_val = self.read_discriminant(&place.into())?.0;
- self.write_scalar(discr_val, dest)?;
+ let place = self.deref_pointer(&args[0])?;
+ let variant = self.read_discriminant(&place)?;
+ let discr = self.discriminant_for_variant(place.layout, variant)?;
+ self.write_scalar(discr, dest)?;
}
sym::exact_div => {
let l = self.read_immediate(&args[0])?;
@@ -237,7 +238,7 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
sym::rotate_left | sym::rotate_right => {
// rotate_left: (X << (S % BW)) | (X >> ((BW - S) % BW))
// rotate_right: (X << ((BW - S) % BW)) | (X >> (S % BW))
- let layout = self.layout_of(substs.type_at(0))?;
+ let layout = self.layout_of(instance_args.type_at(0))?;
let val = self.read_scalar(&args[0])?;
let val_bits = val.to_bits(layout.size)?;
let raw_shift = self.read_scalar(&args[1])?;
@@ -260,10 +261,14 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
sym::write_bytes => {
self.write_bytes_intrinsic(&args[0], &args[1], &args[2])?;
}
+ sym::compare_bytes => {
+ let result = self.compare_bytes_intrinsic(&args[0], &args[1], &args[2])?;
+ self.write_scalar(result, dest)?;
+ }
sym::arith_offset => {
let ptr = self.read_pointer(&args[0])?;
let offset_count = self.read_target_isize(&args[1])?;
- let pointee_ty = substs.type_at(0);
+ let pointee_ty = instance_args.type_at(0);
let pointee_size = i64::try_from(self.layout_of(pointee_ty)?.size.bytes()).unwrap();
let offset_bytes = offset_count.wrapping_mul(pointee_size);
@@ -368,7 +373,7 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
assert!(self.target_isize_min() <= dist && dist <= self.target_isize_max());
isize_layout
};
- let pointee_layout = self.layout_of(substs.type_at(0))?;
+ let pointee_layout = self.layout_of(instance_args.type_at(0))?;
// If ret_layout is unsigned, we checked that so is the distance, so we are good.
let val = ImmTy::from_int(dist, ret_layout);
let size = ImmTy::from_int(pointee_layout.size.bytes(), ret_layout);
@@ -378,7 +383,7 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
sym::assert_inhabited
| sym::assert_zero_valid
| sym::assert_mem_uninitialized_valid => {
- let ty = instance.substs.type_at(0);
+ let ty = instance.args.type_at(0);
let requirement = ValidityRequirement::from_intrinsic(intrinsic_name).unwrap();
let should_panic = !self
@@ -393,17 +398,14 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
// For *all* intrinsics we first check `is_uninhabited` to give a more specific
// error message.
_ if layout.abi.is_uninhabited() => format!(
- "aborted execution: attempted to instantiate uninhabited type `{}`",
- ty
+ "aborted execution: attempted to instantiate uninhabited type `{ty}`"
),
ValidityRequirement::Inhabited => bug!("handled earlier"),
ValidityRequirement::Zero => format!(
- "aborted execution: attempted to zero-initialize type `{}`, which is invalid",
- ty
+ "aborted execution: attempted to zero-initialize type `{ty}`, which is invalid"
),
ValidityRequirement::UninitMitigated0x01Fill => format!(
- "aborted execution: attempted to leave type `{}` uninitialized, which is invalid",
- ty
+ "aborted execution: attempted to leave type `{ty}` uninitialized, which is invalid"
),
ValidityRequirement::Uninit => bug!("assert_uninit_valid doesn't exist"),
};
@@ -419,19 +421,17 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
assert_eq!(input_len, dest_len, "Return vector length must match input length");
assert!(
index < dest_len,
- "Index `{}` must be in bounds of vector with length {}",
- index,
- dest_len
+ "Index `{index}` must be in bounds of vector with length {dest_len}"
);
for i in 0..dest_len {
- let place = self.mplace_index(&dest, i)?;
+ let place = self.project_index(&dest, i)?;
let value = if i == index {
elem.clone()
} else {
- self.mplace_index(&input, i)?.into()
+ self.project_index(&input, i)?.into()
};
- self.copy_op(&value, &place.into(), /*allow_transmute*/ false)?;
+ self.copy_op(&value, &place, /*allow_transmute*/ false)?;
}
}
sym::simd_extract => {
@@ -439,12 +439,10 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
let (input, input_len) = self.operand_to_simd(&args[0])?;
assert!(
index < input_len,
- "index `{}` must be in bounds of vector with length {}",
- index,
- input_len
+ "index `{index}` must be in bounds of vector with length {input_len}"
);
self.copy_op(
- &self.mplace_index(&input, index)?.into(),
+ &self.project_index(&input, index)?,
dest,
/*allow_transmute*/ false,
)?;
@@ -609,7 +607,7 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
count: &OpTy<'tcx, <M as Machine<'mir, 'tcx>>::Provenance>,
nonoverlapping: bool,
) -> InterpResult<'tcx> {
- let count = self.read_target_usize(&count)?;
+ let count = self.read_target_usize(count)?;
let layout = self.layout_of(src.layout.ty.builtin_deref(true).unwrap().ty)?;
let (size, align) = (layout.size, layout.align.abi);
// `checked_mul` enforces a too small bound (the correct one would probably be target_isize_max),
@@ -621,8 +619,8 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
)
})?;
- let src = self.read_pointer(&src)?;
- let dst = self.read_pointer(&dst)?;
+ let src = self.read_pointer(src)?;
+ let dst = self.read_pointer(dst)?;
self.mem_copy(src, align, dst, align, size, nonoverlapping)
}
@@ -635,9 +633,9 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
) -> InterpResult<'tcx> {
let layout = self.layout_of(dst.layout.ty.builtin_deref(true).unwrap().ty)?;
- let dst = self.read_pointer(&dst)?;
- let byte = self.read_scalar(&byte)?.to_u8()?;
- let count = self.read_target_usize(&count)?;
+ let dst = self.read_pointer(dst)?;
+ let byte = self.read_scalar(byte)?.to_u8()?;
+ let count = self.read_target_usize(count)?;
// `checked_mul` enforces a too small bound (the correct one would probably be target_isize_max),
// but no actual allocation can be big enough for the difference to be noticeable.
@@ -649,6 +647,24 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
self.write_bytes_ptr(dst, bytes)
}
+ pub(crate) fn compare_bytes_intrinsic(
+ &mut self,
+ left: &OpTy<'tcx, <M as Machine<'mir, 'tcx>>::Provenance>,
+ right: &OpTy<'tcx, <M as Machine<'mir, 'tcx>>::Provenance>,
+ byte_count: &OpTy<'tcx, <M as Machine<'mir, 'tcx>>::Provenance>,
+ ) -> InterpResult<'tcx, Scalar<M::Provenance>> {
+ let left = self.read_pointer(left)?;
+ let right = self.read_pointer(right)?;
+ let n = Size::from_bytes(self.read_target_usize(byte_count)?);
+
+ let left_bytes = self.read_bytes_ptr_strip_provenance(left, n)?;
+ let right_bytes = self.read_bytes_ptr_strip_provenance(right, n)?;
+
+ // `Ordering`'s discriminants are -1/0/+1, so casting does the right thing.
+ let result = Ord::cmp(left_bytes, right_bytes) as i32;
+ Ok(Scalar::from_i32(result))
+ }
+
pub(crate) fn raw_eq_intrinsic(
&mut self,
lhs: &OpTy<'tcx, <M as Machine<'mir, 'tcx>>::Provenance>,
diff --git a/compiler/rustc_const_eval/src/interpret/intrinsics/caller_location.rs b/compiler/rustc_const_eval/src/interpret/intrinsics/caller_location.rs
index df5b58100..948bec746 100644
--- a/compiler/rustc_const_eval/src/interpret/intrinsics/caller_location.rs
+++ b/compiler/rustc_const_eval/src/interpret/intrinsics/caller_location.rs
@@ -96,16 +96,16 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
let loc_ty = self
.tcx
.type_of(self.tcx.require_lang_item(LangItem::PanicLocation, None))
- .subst(*self.tcx, self.tcx.mk_substs(&[self.tcx.lifetimes.re_erased.into()]));
+ .instantiate(*self.tcx, self.tcx.mk_args(&[self.tcx.lifetimes.re_erased.into()]));
let loc_layout = self.layout_of(loc_ty).unwrap();
let location = self.allocate(loc_layout, MemoryKind::CallerLocation).unwrap();
// Initialize fields.
- self.write_immediate(file.to_ref(self), &self.mplace_field(&location, 0).unwrap().into())
+ self.write_immediate(file.to_ref(self), &self.project_field(&location, 0).unwrap())
.expect("writing to memory we just allocated cannot fail");
- self.write_scalar(line, &self.mplace_field(&location, 1).unwrap().into())
+ self.write_scalar(line, &self.project_field(&location, 1).unwrap())
.expect("writing to memory we just allocated cannot fail");
- self.write_scalar(col, &self.mplace_field(&location, 2).unwrap().into())
+ self.write_scalar(col, &self.project_field(&location, 2).unwrap())
.expect("writing to memory we just allocated cannot fail");
location
diff --git a/compiler/rustc_const_eval/src/interpret/machine.rs b/compiler/rustc_const_eval/src/interpret/machine.rs
index b448e3a24..e101785b6 100644
--- a/compiler/rustc_const_eval/src/interpret/machine.rs
+++ b/compiler/rustc_const_eval/src/interpret/machine.rs
@@ -17,7 +17,7 @@ use rustc_target::spec::abi::Abi as CallAbi;
use crate::const_eval::CheckAlignment;
use super::{
- AllocBytes, AllocId, AllocRange, Allocation, ConstAllocation, Frame, ImmTy, InterpCx,
+ AllocBytes, AllocId, AllocRange, Allocation, ConstAllocation, FnArg, Frame, ImmTy, InterpCx,
InterpResult, MemoryKind, OpTy, Operand, PlaceTy, Pointer, Provenance, Scalar,
};
@@ -84,7 +84,7 @@ pub trait AllocMap<K: Hash + Eq, V> {
/// Methods of this trait signifies a point where CTFE evaluation would fail
/// and some use case dependent behaviour can instead be applied.
-pub trait Machine<'mir, 'tcx>: Sized {
+pub trait Machine<'mir, 'tcx: 'mir>: Sized {
/// Additional memory kinds a machine wishes to distinguish from the builtin ones
type MemoryKind: Debug + std::fmt::Display + MayLeak + Eq + 'static;
@@ -182,7 +182,7 @@ pub trait Machine<'mir, 'tcx>: Sized {
ecx: &mut InterpCx<'mir, 'tcx, Self>,
instance: ty::Instance<'tcx>,
abi: CallAbi,
- args: &[OpTy<'tcx, Self::Provenance>],
+ args: &[FnArg<'tcx, Self::Provenance>],
destination: &PlaceTy<'tcx, Self::Provenance>,
target: Option<mir::BasicBlock>,
unwind: mir::UnwindAction,
@@ -194,7 +194,7 @@ pub trait Machine<'mir, 'tcx>: Sized {
ecx: &mut InterpCx<'mir, 'tcx, Self>,
fn_val: Self::ExtraFnVal,
abi: CallAbi,
- args: &[OpTy<'tcx, Self::Provenance>],
+ args: &[FnArg<'tcx, Self::Provenance>],
destination: &PlaceTy<'tcx, Self::Provenance>,
target: Option<mir::BasicBlock>,
unwind: mir::UnwindAction,
@@ -418,6 +418,18 @@ pub trait Machine<'mir, 'tcx>: Sized {
Ok(())
}
+ /// Called on places used for in-place function argument and return value handling.
+ ///
+ /// These places need to be protected to make sure the program cannot tell whether the
+ /// argument/return value was actually copied or passed in-place..
+ fn protect_in_place_function_argument(
+ ecx: &mut InterpCx<'mir, 'tcx, Self>,
+ place: &PlaceTy<'tcx, Self::Provenance>,
+ ) -> InterpResult<'tcx> {
+ // Without an aliasing model, all we can do is put `Uninit` into the place.
+ ecx.write_uninit(place)
+ }
+
/// Called immediately before a new stack frame gets pushed.
fn init_frame_extra(
ecx: &mut InterpCx<'mir, 'tcx, Self>,
@@ -439,6 +451,14 @@ pub trait Machine<'mir, 'tcx>: Sized {
Ok(())
}
+ /// Called just before the return value is copied to the caller-provided return place.
+ fn before_stack_pop(
+ _ecx: &InterpCx<'mir, 'tcx, Self>,
+ _frame: &Frame<'mir, 'tcx, Self::Provenance, Self::FrameExtra>,
+ ) -> InterpResult<'tcx> {
+ Ok(())
+ }
+
/// Called immediately after a stack frame got popped, but before jumping back to the caller.
/// The `locals` have already been destroyed!
fn after_stack_pop(
@@ -484,7 +504,7 @@ pub macro compile_time_machine(<$mir: lifetime, $tcx: lifetime>) {
_ecx: &mut InterpCx<$mir, $tcx, Self>,
fn_val: !,
_abi: CallAbi,
- _args: &[OpTy<$tcx>],
+ _args: &[FnArg<$tcx>],
_destination: &PlaceTy<$tcx, Self::Provenance>,
_target: Option<mir::BasicBlock>,
_unwind: mir::UnwindAction,
diff --git a/compiler/rustc_const_eval/src/interpret/memory.rs b/compiler/rustc_const_eval/src/interpret/memory.rs
index 1125d8d1f..11bffedf5 100644
--- a/compiler/rustc_const_eval/src/interpret/memory.rs
+++ b/compiler/rustc_const_eval/src/interpret/memory.rs
@@ -53,7 +53,7 @@ impl<T: fmt::Display> fmt::Display for MemoryKind<T> {
match self {
MemoryKind::Stack => write!(f, "stack variable"),
MemoryKind::CallerLocation => write!(f, "caller location"),
- MemoryKind::Machine(m) => write!(f, "{}", m),
+ MemoryKind::Machine(m) => write!(f, "{m}"),
}
}
}
@@ -91,7 +91,7 @@ impl<'tcx, Other> FnVal<'tcx, Other> {
// `Memory` has to depend on the `Machine` because some of its operations
// (e.g., `get`) call a `Machine` hook.
pub struct Memory<'mir, 'tcx, M: Machine<'mir, 'tcx>> {
- /// Allocations local to this instance of the miri engine. The kind
+ /// Allocations local to this instance of the interpreter. The kind
/// helps ensure that the same mechanism is used for allocation and
/// deallocation. When an allocation is not found here, it is a
/// global and looked up in the `tcx` for read access. Some machines may
@@ -317,7 +317,7 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
kind = "static_mem"
)
}
- None => err_ub!(PointerUseAfterFree(alloc_id)),
+ None => err_ub!(PointerUseAfterFree(alloc_id, CheckInAllocMsg::MemoryAccessTest)),
}
.into());
};
@@ -380,7 +380,8 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
M::enforce_alignment(self),
CheckInAllocMsg::MemoryAccessTest,
|alloc_id, offset, prov| {
- let (size, align) = self.get_live_alloc_size_and_align(alloc_id)?;
+ let (size, align) = self
+ .get_live_alloc_size_and_align(alloc_id, CheckInAllocMsg::MemoryAccessTest)?;
Ok((size, align, (alloc_id, offset, prov)))
},
)
@@ -404,7 +405,7 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
CheckAlignment::Error,
msg,
|alloc_id, _, _| {
- let (size, align) = self.get_live_alloc_size_and_align(alloc_id)?;
+ let (size, align) = self.get_live_alloc_size_and_align(alloc_id, msg)?;
Ok((size, align, ()))
},
)?;
@@ -414,7 +415,9 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
/// Low-level helper function to check if a ptr is in-bounds and potentially return a reference
/// to the allocation it points to. Supports both shared and mutable references, as the actual
/// checking is offloaded to a helper closure. `align` defines whether and which alignment check
- /// is done. Returns `None` for size 0, and otherwise `Some` of what `alloc_size` returned.
+ /// is done.
+ ///
+ /// If this returns `None`, the size is 0; it can however return `Some` even for size 0.
fn check_and_deref_ptr<T>(
&self,
ptr: Pointer<Option<M::Provenance>>,
@@ -515,7 +518,7 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
}
Some(GlobalAlloc::Function(..)) => throw_ub!(DerefFunctionPointer(id)),
Some(GlobalAlloc::VTable(..)) => throw_ub!(DerefVTablePointer(id)),
- None => throw_ub!(PointerUseAfterFree(id)),
+ None => throw_ub!(PointerUseAfterFree(id, CheckInAllocMsg::MemoryAccessTest)),
Some(GlobalAlloc::Static(def_id)) => {
assert!(self.tcx.is_static(def_id));
assert!(!self.tcx.is_thread_local_static(def_id));
@@ -761,11 +764,15 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
}
}
- /// Obtain the size and alignment of a live allocation.
- pub fn get_live_alloc_size_and_align(&self, id: AllocId) -> InterpResult<'tcx, (Size, Align)> {
+ /// Obtain the size and alignment of a *live* allocation.
+ fn get_live_alloc_size_and_align(
+ &self,
+ id: AllocId,
+ msg: CheckInAllocMsg,
+ ) -> InterpResult<'tcx, (Size, Align)> {
let (size, align, kind) = self.get_alloc_info(id);
if matches!(kind, AllocKind::Dead) {
- throw_ub!(PointerUseAfterFree(id))
+ throw_ub!(PointerUseAfterFree(id, msg))
}
Ok((size, align))
}
@@ -907,7 +914,7 @@ impl<'a, 'mir, 'tcx, M: Machine<'mir, 'tcx>> std::fmt::Debug for DumpAllocs<'a,
match self.ecx.memory.alloc_map.get(id) {
Some((kind, alloc)) => {
// normal alloc
- write!(fmt, " ({}, ", kind)?;
+ write!(fmt, " ({kind}, ")?;
write_allocation_track_relocs(
&mut *fmt,
*self.ecx.tcx,
@@ -1060,11 +1067,7 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
let size = Size::from_bytes(len);
let Some(alloc_ref) = self.get_ptr_alloc_mut(ptr, size, Align::ONE)? else {
// zero-sized access
- assert_matches!(
- src.next(),
- None,
- "iterator said it was empty but returned an element"
- );
+ assert_matches!(src.next(), None, "iterator said it was empty but returned an element");
return Ok(());
};
diff --git a/compiler/rustc_const_eval/src/interpret/mod.rs b/compiler/rustc_const_eval/src/interpret/mod.rs
index 898d62361..b0b553c45 100644
--- a/compiler/rustc_const_eval/src/interpret/mod.rs
+++ b/compiler/rustc_const_eval/src/interpret/mod.rs
@@ -24,10 +24,12 @@ pub use self::eval_context::{Frame, FrameInfo, InterpCx, LocalState, LocalValue,
pub use self::intern::{intern_const_alloc_recursive, InternKind};
pub use self::machine::{compile_time_machine, AllocMap, Machine, MayLeak, StackPopJump};
pub use self::memory::{AllocKind, AllocRef, AllocRefMut, FnVal, Memory, MemoryKind};
-pub use self::operand::{ImmTy, Immediate, OpTy, Operand};
-pub use self::place::{MPlaceTy, MemPlace, MemPlaceMeta, Place, PlaceTy};
+pub use self::operand::{ImmTy, Immediate, OpTy, Operand, Readable};
+pub use self::place::{MPlaceTy, MemPlace, MemPlaceMeta, Place, PlaceTy, Writeable};
+pub use self::projection::Projectable;
+pub use self::terminator::FnArg;
pub use self::validity::{CtfeValidationMode, RefTracking};
-pub use self::visitor::{MutValueVisitor, Value, ValueVisitor};
+pub use self::visitor::ValueVisitor;
pub(crate) use self::intrinsics::eval_nullary_intrinsic;
use eval_context::{from_known_layout, mir_assign_valid_types};
diff --git a/compiler/rustc_const_eval/src/interpret/operand.rs b/compiler/rustc_const_eval/src/interpret/operand.rs
index 5f89d652f..6e57a56b4 100644
--- a/compiler/rustc_const_eval/src/interpret/operand.rs
+++ b/compiler/rustc_const_eval/src/interpret/operand.rs
@@ -1,6 +1,8 @@
//! Functions concerning immediate values and operands, and reading from operands.
//! All high-level functions to read from memory work on operands as sources.
+use std::assert_matches::assert_matches;
+
use either::{Either, Left, Right};
use rustc_hir::def::Namespace;
@@ -13,8 +15,8 @@ use rustc_target::abi::{self, Abi, Align, HasDataLayout, Size};
use super::{
alloc_range, from_known_layout, mir_assign_valid_types, AllocId, ConstValue, Frame, GlobalId,
- InterpCx, InterpResult, MPlaceTy, Machine, MemPlace, MemPlaceMeta, Place, PlaceTy, Pointer,
- Provenance, Scalar,
+ InterpCx, InterpResult, MPlaceTy, Machine, MemPlace, MemPlaceMeta, PlaceTy, Pointer,
+ Projectable, Provenance, Scalar,
};
/// An `Immediate` represents a single immediate self-contained Rust value.
@@ -31,7 +33,7 @@ pub enum Immediate<Prov: Provenance = AllocId> {
/// A pair of two scalar value (must have `ScalarPair` ABI where both fields are
/// `Scalar::Initialized`).
ScalarPair(Scalar<Prov>, Scalar<Prov>),
- /// A value of fully uninitialized memory. Can have and size and layout.
+ /// A value of fully uninitialized memory. Can have arbitrary size and layout.
Uninit,
}
@@ -178,20 +180,6 @@ impl<'tcx, Prov: Provenance> From<MPlaceTy<'tcx, Prov>> for OpTy<'tcx, Prov> {
}
}
-impl<'tcx, Prov: Provenance> From<&'_ MPlaceTy<'tcx, Prov>> for OpTy<'tcx, Prov> {
- #[inline(always)]
- fn from(mplace: &MPlaceTy<'tcx, Prov>) -> Self {
- OpTy { op: Operand::Indirect(**mplace), layout: mplace.layout, align: Some(mplace.align) }
- }
-}
-
-impl<'tcx, Prov: Provenance> From<&'_ mut MPlaceTy<'tcx, Prov>> for OpTy<'tcx, Prov> {
- #[inline(always)]
- fn from(mplace: &mut MPlaceTy<'tcx, Prov>) -> Self {
- OpTy { op: Operand::Indirect(**mplace), layout: mplace.layout, align: Some(mplace.align) }
- }
-}
-
impl<'tcx, Prov: Provenance> From<ImmTy<'tcx, Prov>> for OpTy<'tcx, Prov> {
#[inline(always)]
fn from(val: ImmTy<'tcx, Prov>) -> Self {
@@ -240,43 +228,126 @@ impl<'tcx, Prov: Provenance> ImmTy<'tcx, Prov> {
let int = self.to_scalar().assert_int();
ConstInt::new(int, self.layout.ty.is_signed(), self.layout.ty.is_ptr_sized_integral())
}
+
+ /// Compute the "sub-immediate" that is located within the `base` at the given offset with the
+ /// given layout.
+ // Not called `offset` to avoid confusion with the trait method.
+ fn offset_(&self, offset: Size, layout: TyAndLayout<'tcx>, cx: &impl HasDataLayout) -> Self {
+ // This makes several assumptions about what layouts we will encounter; we match what
+ // codegen does as good as we can (see `extract_field` in `rustc_codegen_ssa/src/mir/operand.rs`).
+ let inner_val: Immediate<_> = match (**self, self.layout.abi) {
+ // if the entire value is uninit, then so is the field (can happen in ConstProp)
+ (Immediate::Uninit, _) => Immediate::Uninit,
+ // the field contains no information, can be left uninit
+ _ if layout.is_zst() => Immediate::Uninit,
+ // some fieldless enum variants can have non-zero size but still `Aggregate` ABI... try
+ // to detect those here and also give them no data
+ _ if matches!(layout.abi, Abi::Aggregate { .. })
+ && matches!(&layout.fields, abi::FieldsShape::Arbitrary { offsets, .. } if offsets.len() == 0) =>
+ {
+ Immediate::Uninit
+ }
+ // the field covers the entire type
+ _ if layout.size == self.layout.size => {
+ assert_eq!(offset.bytes(), 0);
+ assert!(
+ match (self.layout.abi, layout.abi) {
+ (Abi::Scalar(..), Abi::Scalar(..)) => true,
+ (Abi::ScalarPair(..), Abi::ScalarPair(..)) => true,
+ _ => false,
+ },
+ "cannot project into {} immediate with equally-sized field {}\nouter ABI: {:#?}\nfield ABI: {:#?}",
+ self.layout.ty,
+ layout.ty,
+ self.layout.abi,
+ layout.abi,
+ );
+ **self
+ }
+ // extract fields from types with `ScalarPair` ABI
+ (Immediate::ScalarPair(a_val, b_val), Abi::ScalarPair(a, b)) => {
+ assert!(matches!(layout.abi, Abi::Scalar(..)));
+ Immediate::from(if offset.bytes() == 0 {
+ debug_assert_eq!(layout.size, a.size(cx));
+ a_val
+ } else {
+ debug_assert_eq!(offset, a.size(cx).align_to(b.align(cx).abi));
+ debug_assert_eq!(layout.size, b.size(cx));
+ b_val
+ })
+ }
+ // everything else is a bug
+ _ => bug!("invalid field access on immediate {}, layout {:#?}", self, self.layout),
+ };
+
+ ImmTy::from_immediate(inner_val, layout)
+ }
+}
+
+impl<'tcx, Prov: Provenance> Projectable<'tcx, Prov> for ImmTy<'tcx, Prov> {
+ #[inline(always)]
+ fn layout(&self) -> TyAndLayout<'tcx> {
+ self.layout
+ }
+
+ fn meta<'mir, M: Machine<'mir, 'tcx, Provenance = Prov>>(
+ &self,
+ _ecx: &InterpCx<'mir, 'tcx, M>,
+ ) -> InterpResult<'tcx, MemPlaceMeta<M::Provenance>> {
+ assert!(self.layout.is_sized()); // unsized ImmTy can only exist temporarily and should never reach this here
+ Ok(MemPlaceMeta::None)
+ }
+
+ fn offset_with_meta(
+ &self,
+ offset: Size,
+ meta: MemPlaceMeta<Prov>,
+ layout: TyAndLayout<'tcx>,
+ cx: &impl HasDataLayout,
+ ) -> InterpResult<'tcx, Self> {
+ assert_matches!(meta, MemPlaceMeta::None); // we can't store this anywhere anyway
+ Ok(self.offset_(offset, layout, cx))
+ }
+
+ fn to_op<'mir, M: Machine<'mir, 'tcx, Provenance = Prov>>(
+ &self,
+ _ecx: &InterpCx<'mir, 'tcx, M>,
+ ) -> InterpResult<'tcx, OpTy<'tcx, M::Provenance>> {
+ Ok(self.clone().into())
+ }
}
impl<'tcx, Prov: Provenance> OpTy<'tcx, Prov> {
- pub fn len(&self, cx: &impl HasDataLayout) -> InterpResult<'tcx, u64> {
- if self.layout.is_unsized() {
- if matches!(self.op, Operand::Immediate(Immediate::Uninit)) {
- // Uninit unsized places shouldn't occur. In the interpreter we have them
- // temporarily for unsized arguments before their value is put in; in ConstProp they
- // remain uninit and this code can actually be reached.
- throw_inval!(UninitUnsizedLocal);
+ // Provided as inherent method since it doesn't need the `ecx` of `Projectable::meta`.
+ pub fn meta(&self) -> InterpResult<'tcx, MemPlaceMeta<Prov>> {
+ Ok(if self.layout.is_unsized() {
+ if matches!(self.op, Operand::Immediate(_)) {
+ // Unsized immediate OpTy cannot occur. We create a MemPlace for all unsized locals during argument passing.
+ // However, ConstProp doesn't do that, so we can run into this nonsense situation.
+ throw_inval!(ConstPropNonsense);
}
// There are no unsized immediates.
- self.assert_mem_place().len(cx)
+ self.assert_mem_place().meta
} else {
- match self.layout.fields {
- abi::FieldsShape::Array { count, .. } => Ok(count),
- _ => bug!("len not supported on sized type {:?}", self.layout.ty),
- }
- }
+ MemPlaceMeta::None
+ })
}
+}
- /// Replace the layout of this operand. There's basically no sanity check that this makes sense,
- /// you better know what you are doing! If this is an immediate, applying the wrong layout can
- /// not just lead to invalid data, it can actually *shift the data around* since the offsets of
- /// a ScalarPair are entirely determined by the layout, not the data.
- pub fn transmute(&self, layout: TyAndLayout<'tcx>) -> Self {
- assert_eq!(
- self.layout.size, layout.size,
- "transmuting with a size change, that doesn't seem right"
- );
- OpTy { layout, ..*self }
+impl<'tcx, Prov: Provenance + 'static> Projectable<'tcx, Prov> for OpTy<'tcx, Prov> {
+ #[inline(always)]
+ fn layout(&self) -> TyAndLayout<'tcx> {
+ self.layout
}
- /// Offset the operand in memory (if possible) and change its metadata.
- ///
- /// This can go wrong very easily if you give the wrong layout for the new place!
- pub(super) fn offset_with_meta(
+ fn meta<'mir, M: Machine<'mir, 'tcx, Provenance = Prov>>(
+ &self,
+ _ecx: &InterpCx<'mir, 'tcx, M>,
+ ) -> InterpResult<'tcx, MemPlaceMeta<M::Provenance>> {
+ self.meta()
+ }
+
+ fn offset_with_meta(
&self,
offset: Size,
meta: MemPlaceMeta<Prov>,
@@ -286,28 +357,43 @@ impl<'tcx, Prov: Provenance> OpTy<'tcx, Prov> {
match self.as_mplace_or_imm() {
Left(mplace) => Ok(mplace.offset_with_meta(offset, meta, layout, cx)?.into()),
Right(imm) => {
- assert!(
- matches!(*imm, Immediate::Uninit),
- "Scalar/ScalarPair cannot be offset into"
- );
assert!(!meta.has_meta()); // no place to store metadata here
// Every part of an uninit is uninit.
- Ok(ImmTy::uninit(layout).into())
+ Ok(imm.offset(offset, layout, cx)?.into())
}
}
}
- /// Offset the operand in memory (if possible).
- ///
- /// This can go wrong very easily if you give the wrong layout for the new place!
- pub fn offset(
+ fn to_op<'mir, M: Machine<'mir, 'tcx, Provenance = Prov>>(
&self,
- offset: Size,
- layout: TyAndLayout<'tcx>,
- cx: &impl HasDataLayout,
- ) -> InterpResult<'tcx, Self> {
- assert!(layout.is_sized());
- self.offset_with_meta(offset, MemPlaceMeta::None, layout, cx)
+ _ecx: &InterpCx<'mir, 'tcx, M>,
+ ) -> InterpResult<'tcx, OpTy<'tcx, M::Provenance>> {
+ Ok(self.clone())
+ }
+}
+
+pub trait Readable<'tcx, Prov: Provenance>: Projectable<'tcx, Prov> {
+ fn as_mplace_or_imm(&self) -> Either<MPlaceTy<'tcx, Prov>, ImmTy<'tcx, Prov>>;
+}
+
+impl<'tcx, Prov: Provenance + 'static> Readable<'tcx, Prov> for OpTy<'tcx, Prov> {
+ #[inline(always)]
+ fn as_mplace_or_imm(&self) -> Either<MPlaceTy<'tcx, Prov>, ImmTy<'tcx, Prov>> {
+ self.as_mplace_or_imm()
+ }
+}
+
+impl<'tcx, Prov: Provenance + 'static> Readable<'tcx, Prov> for MPlaceTy<'tcx, Prov> {
+ #[inline(always)]
+ fn as_mplace_or_imm(&self) -> Either<MPlaceTy<'tcx, Prov>, ImmTy<'tcx, Prov>> {
+ Left(self.clone())
+ }
+}
+
+impl<'tcx, Prov: Provenance> Readable<'tcx, Prov> for ImmTy<'tcx, Prov> {
+ #[inline(always)]
+ fn as_mplace_or_imm(&self) -> Either<MPlaceTy<'tcx, Prov>, ImmTy<'tcx, Prov>> {
+ Right(self.clone())
}
}
@@ -383,14 +469,14 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
/// ConstProp needs it, though.
pub fn read_immediate_raw(
&self,
- src: &OpTy<'tcx, M::Provenance>,
+ src: &impl Readable<'tcx, M::Provenance>,
) -> InterpResult<'tcx, Either<MPlaceTy<'tcx, M::Provenance>, ImmTy<'tcx, M::Provenance>>> {
Ok(match src.as_mplace_or_imm() {
Left(ref mplace) => {
if let Some(val) = self.read_immediate_from_mplace_raw(mplace)? {
Right(val)
} else {
- Left(*mplace)
+ Left(mplace.clone())
}
}
Right(val) => Right(val),
@@ -403,14 +489,18 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
#[inline(always)]
pub fn read_immediate(
&self,
- op: &OpTy<'tcx, M::Provenance>,
+ op: &impl Readable<'tcx, M::Provenance>,
) -> InterpResult<'tcx, ImmTy<'tcx, M::Provenance>> {
if !matches!(
- op.layout.abi,
+ op.layout().abi,
Abi::Scalar(abi::Scalar::Initialized { .. })
| Abi::ScalarPair(abi::Scalar::Initialized { .. }, abi::Scalar::Initialized { .. })
) {
- span_bug!(self.cur_span(), "primitive read not possible for type: {:?}", op.layout.ty);
+ span_bug!(
+ self.cur_span(),
+ "primitive read not possible for type: {:?}",
+ op.layout().ty
+ );
}
let imm = self.read_immediate_raw(op)?.right().unwrap();
if matches!(*imm, Immediate::Uninit) {
@@ -422,7 +512,7 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
/// Read a scalar from a place
pub fn read_scalar(
&self,
- op: &OpTy<'tcx, M::Provenance>,
+ op: &impl Readable<'tcx, M::Provenance>,
) -> InterpResult<'tcx, Scalar<M::Provenance>> {
Ok(self.read_immediate(op)?.to_scalar())
}
@@ -433,16 +523,22 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
/// Read a pointer from a place.
pub fn read_pointer(
&self,
- op: &OpTy<'tcx, M::Provenance>,
+ op: &impl Readable<'tcx, M::Provenance>,
) -> InterpResult<'tcx, Pointer<Option<M::Provenance>>> {
self.read_scalar(op)?.to_pointer(self)
}
/// Read a pointer-sized unsigned integer from a place.
- pub fn read_target_usize(&self, op: &OpTy<'tcx, M::Provenance>) -> InterpResult<'tcx, u64> {
+ pub fn read_target_usize(
+ &self,
+ op: &impl Readable<'tcx, M::Provenance>,
+ ) -> InterpResult<'tcx, u64> {
self.read_scalar(op)?.to_target_usize(self)
}
/// Read a pointer-sized signed integer from a place.
- pub fn read_target_isize(&self, op: &OpTy<'tcx, M::Provenance>) -> InterpResult<'tcx, i64> {
+ pub fn read_target_isize(
+ &self,
+ op: &impl Readable<'tcx, M::Provenance>,
+ ) -> InterpResult<'tcx, i64> {
self.read_scalar(op)?.to_target_isize(self)
}
@@ -497,18 +593,28 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
/// Every place can be read from, so we can turn them into an operand.
/// This will definitely return `Indirect` if the place is a `Ptr`, i.e., this
/// will never actually read from memory.
- #[inline(always)]
pub fn place_to_op(
&self,
place: &PlaceTy<'tcx, M::Provenance>,
) -> InterpResult<'tcx, OpTy<'tcx, M::Provenance>> {
- let op = match **place {
- Place::Ptr(mplace) => Operand::Indirect(mplace),
- Place::Local { frame, local } => {
- *self.local_to_op(&self.stack()[frame], local, None)?
+ match place.as_mplace_or_local() {
+ Left(mplace) => Ok(mplace.into()),
+ Right((frame, local, offset)) => {
+ let base = self.local_to_op(&self.stack()[frame], local, None)?;
+ let mut field = if let Some(offset) = offset {
+ // This got offset. We can be sure that the field is sized.
+ base.offset(offset, place.layout, self)?
+ } else {
+ assert_eq!(place.layout, base.layout);
+ // Unsized cases are possible here since an unsized local will be a
+ // `Place::Local` until the first projection calls `place_to_op` to extract the
+ // underlying mplace.
+ base
+ };
+ field.align = Some(place.align);
+ Ok(field)
}
- };
- Ok(OpTy { op, layout: place.layout, align: Some(place.align) })
+ }
}
/// Evaluate a place with the goal of reading from it. This lets us sometimes
@@ -525,7 +631,7 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
let mut op = self.local_to_op(self.frame(), mir_place.local, layout)?;
// Using `try_fold` turned out to be bad for performance, hence the loop.
for elem in mir_place.projection.iter() {
- op = self.operand_projection(&op, elem)?
+ op = self.project(&op, elem)?
}
trace!("eval_place_to_op: got {:?}", *op);
@@ -575,14 +681,6 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
Ok(op)
}
- /// Evaluate a bunch of operands at once
- pub(super) fn eval_operands(
- &self,
- ops: &[mir::Operand<'tcx>],
- ) -> InterpResult<'tcx, Vec<OpTy<'tcx, M::Provenance>>> {
- ops.iter().map(|op| self.eval_operand(op, None)).collect()
- }
-
fn eval_ty_constant(
&self,
val: ty::Const<'tcx>,
@@ -598,12 +696,10 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
throw_inval!(AlreadyReported(reported.into()))
}
ty::ConstKind::Unevaluated(uv) => {
- let instance = self.resolve(uv.def, uv.substs)?;
+ let instance = self.resolve(uv.def, uv.args)?;
let cid = GlobalId { instance, promoted: None };
- self.ctfe_query(span, |tcx| {
- tcx.eval_to_valtree(self.param_env.with_const().and(cid))
- })?
- .unwrap_or_else(|| bug!("unable to create ValTree for {uv:?}"))
+ self.ctfe_query(span, |tcx| tcx.eval_to_valtree(self.param_env.and(cid)))?
+ .unwrap_or_else(|| bug!("unable to create ValTree for {uv:?}"))
}
ty::ConstKind::Bound(..) | ty::ConstKind::Infer(..) => {
span_bug!(self.cur_span(), "unexpected ConstKind in ctfe: {val:?}")
@@ -627,7 +723,7 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
}
mir::ConstantKind::Val(val, ty) => self.const_val_to_op(val, ty, layout),
mir::ConstantKind::Unevaluated(uv, _) => {
- let instance = self.resolve(uv.def, uv.substs)?;
+ let instance = self.resolve(uv.def, uv.args)?;
Ok(self.eval_global(GlobalId { instance, promoted: uv.promoted }, span)?.into())
}
}
diff --git a/compiler/rustc_const_eval/src/interpret/operator.rs b/compiler/rustc_const_eval/src/interpret/operator.rs
index e04764636..eb0645780 100644
--- a/compiler/rustc_const_eval/src/interpret/operator.rs
+++ b/compiler/rustc_const_eval/src/interpret/operator.rs
@@ -24,8 +24,7 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
debug_assert_eq!(
Ty::new_tup(self.tcx.tcx, &[ty, self.tcx.types.bool]),
dest.layout.ty,
- "type mismatch for result of {:?}",
- op,
+ "type mismatch for result of {op:?}",
);
// Write the result to `dest`.
if let Abi::ScalarPair(..) = dest.layout.abi {
@@ -38,9 +37,9 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
// With randomized layout, `(int, bool)` might cease to be a `ScalarPair`, so we have to
// do a component-wise write here. This code path is slower than the above because
// `place_field` will have to `force_allocate` locals here.
- let val_field = self.place_field(&dest, 0)?;
+ let val_field = self.project_field(dest, 0)?;
self.write_scalar(val, &val_field)?;
- let overflowed_field = self.place_field(&dest, 1)?;
+ let overflowed_field = self.project_field(dest, 1)?;
self.write_scalar(Scalar::from_bool(overflowed), &overflowed_field)?;
}
Ok(())
@@ -56,7 +55,7 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
dest: &PlaceTy<'tcx, M::Provenance>,
) -> InterpResult<'tcx> {
let (val, _overflowed, ty) = self.overflowing_binary_op(op, left, right)?;
- assert_eq!(ty, dest.layout.ty, "type mismatch for result of {:?}", op);
+ assert_eq!(ty, dest.layout.ty, "type mismatch for result of {op:?}");
self.write_scalar(val, dest)
}
}
diff --git a/compiler/rustc_const_eval/src/interpret/place.rs b/compiler/rustc_const_eval/src/interpret/place.rs
index ca1106384..daadb7589 100644
--- a/compiler/rustc_const_eval/src/interpret/place.rs
+++ b/compiler/rustc_const_eval/src/interpret/place.rs
@@ -2,11 +2,14 @@
//! into a place.
//! All high-level functions to write to memory work on places as destinations.
+use std::assert_matches::assert_matches;
+
use either::{Either, Left, Right};
use rustc_ast::Mutability;
use rustc_index::IndexSlice;
use rustc_middle::mir;
+use rustc_middle::mir::interpret::PointerArithmetic;
use rustc_middle::ty;
use rustc_middle::ty::layout::{LayoutOf, TyAndLayout};
use rustc_middle::ty::Ty;
@@ -15,7 +18,7 @@ use rustc_target::abi::{self, Abi, Align, FieldIdx, HasDataLayout, Size, FIRST_V
use super::{
alloc_range, mir_assign_valid_types, AllocId, AllocRef, AllocRefMut, CheckInAllocMsg,
ConstAlloc, ImmTy, Immediate, InterpCx, InterpResult, Machine, MemoryKind, OpTy, Operand,
- Pointer, Provenance, Scalar,
+ Pointer, Projectable, Provenance, Readable, Scalar,
};
#[derive(Copy, Clone, Hash, PartialEq, Eq, Debug)]
@@ -44,6 +47,27 @@ impl<Prov: Provenance> MemPlaceMeta<Prov> {
Self::None => false,
}
}
+
+ pub(crate) fn len<'tcx>(
+ &self,
+ layout: TyAndLayout<'tcx>,
+ cx: &impl HasDataLayout,
+ ) -> InterpResult<'tcx, u64> {
+ if layout.is_unsized() {
+ // We need to consult `meta` metadata
+ match layout.ty.kind() {
+ ty::Slice(..) | ty::Str => self.unwrap_meta().to_target_usize(cx),
+ _ => bug!("len not supported on unsized type {:?}", layout.ty),
+ }
+ } else {
+ // Go through the layout. There are lots of types that support a length,
+ // e.g., SIMD types. (But not all repr(simd) types even have FieldsShape::Array!)
+ match layout.fields {
+ abi::FieldsShape::Array { count, .. } => Ok(count),
+ _ => bug!("len not supported on sized type {:?}", layout.ty),
+ }
+ }
+ }
}
#[derive(Copy, Clone, Hash, PartialEq, Eq, Debug)]
@@ -57,7 +81,7 @@ pub struct MemPlace<Prov: Provenance = AllocId> {
}
/// A MemPlace with its layout. Constructing it is only possible in this module.
-#[derive(Copy, Clone, Hash, Eq, PartialEq, Debug)]
+#[derive(Clone, Hash, Eq, PartialEq, Debug)]
pub struct MPlaceTy<'tcx, Prov: Provenance = AllocId> {
mplace: MemPlace<Prov>,
pub layout: TyAndLayout<'tcx>,
@@ -68,14 +92,26 @@ pub struct MPlaceTy<'tcx, Prov: Provenance = AllocId> {
pub align: Align,
}
+impl<'tcx, Prov: Provenance> std::ops::Deref for MPlaceTy<'tcx, Prov> {
+ type Target = MemPlace<Prov>;
+ #[inline(always)]
+ fn deref(&self) -> &MemPlace<Prov> {
+ &self.mplace
+ }
+}
+
#[derive(Copy, Clone, Debug)]
pub enum Place<Prov: Provenance = AllocId> {
/// A place referring to a value allocated in the `Memory` system.
Ptr(MemPlace<Prov>),
- /// To support alloc-free locals, we are able to write directly to a local.
+ /// To support alloc-free locals, we are able to write directly to a local. The offset indicates
+ /// where in the local this place is located; if it is `None`, no projection has been applied.
+ /// Such projections are meaningful even if the offset is 0, since they can change layouts.
/// (Without that optimization, we'd just always be a `MemPlace`.)
- Local { frame: usize, local: mir::Local },
+ /// Note that this only stores the frame index, not the thread this frame belongs to -- that is
+ /// implicit. This means a `Place` must never be moved across interpreter thread boundaries!
+ Local { frame: usize, local: mir::Local, offset: Option<Size> },
}
#[derive(Clone, Debug)]
@@ -97,14 +133,6 @@ impl<'tcx, Prov: Provenance> std::ops::Deref for PlaceTy<'tcx, Prov> {
}
}
-impl<'tcx, Prov: Provenance> std::ops::Deref for MPlaceTy<'tcx, Prov> {
- type Target = MemPlace<Prov>;
- #[inline(always)]
- fn deref(&self) -> &MemPlace<Prov> {
- &self.mplace
- }
-}
-
impl<'tcx, Prov: Provenance> From<MPlaceTy<'tcx, Prov>> for PlaceTy<'tcx, Prov> {
#[inline(always)]
fn from(mplace: MPlaceTy<'tcx, Prov>) -> Self {
@@ -112,33 +140,23 @@ impl<'tcx, Prov: Provenance> From<MPlaceTy<'tcx, Prov>> for PlaceTy<'tcx, Prov>
}
}
-impl<'tcx, Prov: Provenance> From<&'_ MPlaceTy<'tcx, Prov>> for PlaceTy<'tcx, Prov> {
- #[inline(always)]
- fn from(mplace: &MPlaceTy<'tcx, Prov>) -> Self {
- PlaceTy { place: Place::Ptr(**mplace), layout: mplace.layout, align: mplace.align }
- }
-}
-
-impl<'tcx, Prov: Provenance> From<&'_ mut MPlaceTy<'tcx, Prov>> for PlaceTy<'tcx, Prov> {
- #[inline(always)]
- fn from(mplace: &mut MPlaceTy<'tcx, Prov>) -> Self {
- PlaceTy { place: Place::Ptr(**mplace), layout: mplace.layout, align: mplace.align }
- }
-}
-
impl<Prov: Provenance> MemPlace<Prov> {
#[inline(always)]
pub fn from_ptr(ptr: Pointer<Option<Prov>>) -> Self {
MemPlace { ptr, meta: MemPlaceMeta::None }
}
+ #[inline(always)]
+ pub fn from_ptr_with_meta(ptr: Pointer<Option<Prov>>, meta: MemPlaceMeta<Prov>) -> Self {
+ MemPlace { ptr, meta }
+ }
+
/// Adjust the provenance of the main pointer (metadata is unaffected).
pub fn map_provenance(self, f: impl FnOnce(Option<Prov>) -> Option<Prov>) -> Self {
MemPlace { ptr: self.ptr.map_provenance(f), ..self }
}
/// Turn a mplace into a (thin or wide) pointer, as a reference, pointing to the same space.
- /// This is the inverse of `ref_to_mplace`.
#[inline(always)]
pub fn to_ref(self, cx: &impl HasDataLayout) -> Immediate<Prov> {
match self.meta {
@@ -150,7 +168,8 @@ impl<Prov: Provenance> MemPlace<Prov> {
}
#[inline]
- pub(super) fn offset_with_meta<'tcx>(
+ // Not called `offset_with_meta` to avoid confusion with the trait method.
+ fn offset_with_meta_<'tcx>(
self,
offset: Size,
meta: MemPlaceMeta<Prov>,
@@ -164,19 +183,6 @@ impl<Prov: Provenance> MemPlace<Prov> {
}
}
-impl<Prov: Provenance> Place<Prov> {
- /// Asserts that this points to some local variable.
- /// Returns the frame idx and the variable idx.
- #[inline]
- #[cfg_attr(debug_assertions, track_caller)] // only in debug builds due to perf (see #98980)
- pub fn assert_local(&self) -> (usize, mir::Local) {
- match self {
- Place::Local { frame, local } => (*frame, *local),
- _ => bug!("assert_local: expected Place::Local, got {:?}", self),
- }
- }
-}
-
impl<'tcx, Prov: Provenance> MPlaceTy<'tcx, Prov> {
/// Produces a MemPlace that works for ZST but nothing else.
/// Conceptually this is a new allocation, but it doesn't actually create an allocation so you
@@ -189,11 +195,39 @@ impl<'tcx, Prov: Provenance> MPlaceTy<'tcx, Prov> {
MPlaceTy { mplace: MemPlace { ptr, meta: MemPlaceMeta::None }, layout, align }
}
- /// Offset the place in memory and change its metadata.
- ///
- /// This can go wrong very easily if you give the wrong layout for the new place!
#[inline]
- pub(crate) fn offset_with_meta(
+ pub fn from_aligned_ptr(ptr: Pointer<Option<Prov>>, layout: TyAndLayout<'tcx>) -> Self {
+ MPlaceTy { mplace: MemPlace::from_ptr(ptr), layout, align: layout.align.abi }
+ }
+
+ #[inline]
+ pub fn from_aligned_ptr_with_meta(
+ ptr: Pointer<Option<Prov>>,
+ layout: TyAndLayout<'tcx>,
+ meta: MemPlaceMeta<Prov>,
+ ) -> Self {
+ MPlaceTy {
+ mplace: MemPlace::from_ptr_with_meta(ptr, meta),
+ layout,
+ align: layout.align.abi,
+ }
+ }
+}
+
+impl<'tcx, Prov: Provenance + 'static> Projectable<'tcx, Prov> for MPlaceTy<'tcx, Prov> {
+ #[inline(always)]
+ fn layout(&self) -> TyAndLayout<'tcx> {
+ self.layout
+ }
+
+ fn meta<'mir, M: Machine<'mir, 'tcx, Provenance = Prov>>(
+ &self,
+ _ecx: &InterpCx<'mir, 'tcx, M>,
+ ) -> InterpResult<'tcx, MemPlaceMeta<M::Provenance>> {
+ Ok(self.meta)
+ }
+
+ fn offset_with_meta(
&self,
offset: Size,
meta: MemPlaceMeta<Prov>,
@@ -201,58 +235,65 @@ impl<'tcx, Prov: Provenance> MPlaceTy<'tcx, Prov> {
cx: &impl HasDataLayout,
) -> InterpResult<'tcx, Self> {
Ok(MPlaceTy {
- mplace: self.mplace.offset_with_meta(offset, meta, cx)?,
+ mplace: self.mplace.offset_with_meta_(offset, meta, cx)?,
align: self.align.restrict_for_offset(offset),
layout,
})
}
- /// Offset the place in memory.
- ///
- /// This can go wrong very easily if you give the wrong layout for the new place!
- pub fn offset(
+ fn to_op<'mir, M: Machine<'mir, 'tcx, Provenance = Prov>>(
&self,
- offset: Size,
- layout: TyAndLayout<'tcx>,
- cx: &impl HasDataLayout,
- ) -> InterpResult<'tcx, Self> {
- assert!(layout.is_sized());
- self.offset_with_meta(offset, MemPlaceMeta::None, layout, cx)
+ _ecx: &InterpCx<'mir, 'tcx, M>,
+ ) -> InterpResult<'tcx, OpTy<'tcx, M::Provenance>> {
+ Ok(self.clone().into())
}
+}
- #[inline]
- pub fn from_aligned_ptr(ptr: Pointer<Option<Prov>>, layout: TyAndLayout<'tcx>) -> Self {
- MPlaceTy { mplace: MemPlace::from_ptr(ptr), layout, align: layout.align.abi }
+impl<'tcx, Prov: Provenance + 'static> Projectable<'tcx, Prov> for PlaceTy<'tcx, Prov> {
+ #[inline(always)]
+ fn layout(&self) -> TyAndLayout<'tcx> {
+ self.layout
}
- #[inline]
- pub fn from_aligned_ptr_with_meta(
- ptr: Pointer<Option<Prov>>,
- layout: TyAndLayout<'tcx>,
- meta: MemPlaceMeta<Prov>,
- ) -> Self {
- let mut mplace = MemPlace::from_ptr(ptr);
- mplace.meta = meta;
-
- MPlaceTy { mplace, layout, align: layout.align.abi }
+ fn meta<'mir, M: Machine<'mir, 'tcx, Provenance = Prov>>(
+ &self,
+ ecx: &InterpCx<'mir, 'tcx, M>,
+ ) -> InterpResult<'tcx, MemPlaceMeta<M::Provenance>> {
+ ecx.place_meta(self)
}
- #[inline]
- pub(crate) fn len(&self, cx: &impl HasDataLayout) -> InterpResult<'tcx, u64> {
- if self.layout.is_unsized() {
- // We need to consult `meta` metadata
- match self.layout.ty.kind() {
- ty::Slice(..) | ty::Str => self.mplace.meta.unwrap_meta().to_target_usize(cx),
- _ => bug!("len not supported on unsized type {:?}", self.layout.ty),
- }
- } else {
- // Go through the layout. There are lots of types that support a length,
- // e.g., SIMD types. (But not all repr(simd) types even have FieldsShape::Array!)
- match self.layout.fields {
- abi::FieldsShape::Array { count, .. } => Ok(count),
- _ => bug!("len not supported on sized type {:?}", self.layout.ty),
+ fn offset_with_meta(
+ &self,
+ offset: Size,
+ meta: MemPlaceMeta<Prov>,
+ layout: TyAndLayout<'tcx>,
+ cx: &impl HasDataLayout,
+ ) -> InterpResult<'tcx, Self> {
+ Ok(match self.as_mplace_or_local() {
+ Left(mplace) => mplace.offset_with_meta(offset, meta, layout, cx)?.into(),
+ Right((frame, local, old_offset)) => {
+ assert_matches!(meta, MemPlaceMeta::None); // we couldn't store it anyway...
+ let new_offset = cx
+ .data_layout()
+ .offset(old_offset.unwrap_or(Size::ZERO).bytes(), offset.bytes())?;
+ PlaceTy {
+ place: Place::Local {
+ frame,
+ local,
+ offset: Some(Size::from_bytes(new_offset)),
+ },
+ align: self.align.restrict_for_offset(offset),
+ layout,
+ }
}
- }
+ })
+ }
+
+ fn to_op<'mir, M: Machine<'mir, 'tcx, Provenance = Prov>>(
+ &self,
+ ecx: &InterpCx<'mir, 'tcx, M>,
+ ) -> InterpResult<'tcx, OpTy<'tcx, M::Provenance>> {
+ ecx.place_to_op(self)
}
}
@@ -280,13 +321,15 @@ impl<'tcx, Prov: Provenance> OpTy<'tcx, Prov> {
}
}
-impl<'tcx, Prov: Provenance> PlaceTy<'tcx, Prov> {
+impl<'tcx, Prov: Provenance + 'static> PlaceTy<'tcx, Prov> {
/// A place is either an mplace or some local.
#[inline]
- pub fn as_mplace_or_local(&self) -> Either<MPlaceTy<'tcx, Prov>, (usize, mir::Local)> {
+ pub fn as_mplace_or_local(
+ &self,
+ ) -> Either<MPlaceTy<'tcx, Prov>, (usize, mir::Local, Option<Size>)> {
match **self {
Place::Ptr(mplace) => Left(MPlaceTy { mplace, layout: self.layout, align: self.align }),
- Place::Local { frame, local } => Right((frame, local)),
+ Place::Local { frame, local, offset } => Right((frame, local, offset)),
}
}
@@ -302,18 +345,80 @@ impl<'tcx, Prov: Provenance> PlaceTy<'tcx, Prov> {
}
}
+pub trait Writeable<'tcx, Prov: Provenance>: Projectable<'tcx, Prov> {
+ fn as_mplace_or_local(
+ &self,
+ ) -> Either<MPlaceTy<'tcx, Prov>, (usize, mir::Local, Option<Size>, Align, TyAndLayout<'tcx>)>;
+
+ fn force_mplace<'mir, M: Machine<'mir, 'tcx, Provenance = Prov>>(
+ &self,
+ ecx: &mut InterpCx<'mir, 'tcx, M>,
+ ) -> InterpResult<'tcx, MPlaceTy<'tcx, Prov>>;
+}
+
+impl<'tcx, Prov: Provenance + 'static> Writeable<'tcx, Prov> for PlaceTy<'tcx, Prov> {
+ #[inline(always)]
+ fn as_mplace_or_local(
+ &self,
+ ) -> Either<MPlaceTy<'tcx, Prov>, (usize, mir::Local, Option<Size>, Align, TyAndLayout<'tcx>)>
+ {
+ self.as_mplace_or_local()
+ .map_right(|(frame, local, offset)| (frame, local, offset, self.align, self.layout))
+ }
+
+ #[inline(always)]
+ fn force_mplace<'mir, M: Machine<'mir, 'tcx, Provenance = Prov>>(
+ &self,
+ ecx: &mut InterpCx<'mir, 'tcx, M>,
+ ) -> InterpResult<'tcx, MPlaceTy<'tcx, Prov>> {
+ ecx.force_allocation(self)
+ }
+}
+
+impl<'tcx, Prov: Provenance + 'static> Writeable<'tcx, Prov> for MPlaceTy<'tcx, Prov> {
+ #[inline(always)]
+ fn as_mplace_or_local(
+ &self,
+ ) -> Either<MPlaceTy<'tcx, Prov>, (usize, mir::Local, Option<Size>, Align, TyAndLayout<'tcx>)>
+ {
+ Left(self.clone())
+ }
+
+ #[inline(always)]
+ fn force_mplace<'mir, M: Machine<'mir, 'tcx, Provenance = Prov>>(
+ &self,
+ _ecx: &mut InterpCx<'mir, 'tcx, M>,
+ ) -> InterpResult<'tcx, MPlaceTy<'tcx, Prov>> {
+ Ok(self.clone())
+ }
+}
+
// FIXME: Working around https://github.com/rust-lang/rust/issues/54385
impl<'mir, 'tcx: 'mir, Prov, M> InterpCx<'mir, 'tcx, M>
where
Prov: Provenance + 'static,
M: Machine<'mir, 'tcx, Provenance = Prov>,
{
+ /// Get the metadata of the given place.
+ pub(super) fn place_meta(
+ &self,
+ place: &PlaceTy<'tcx, M::Provenance>,
+ ) -> InterpResult<'tcx, MemPlaceMeta<M::Provenance>> {
+ if place.layout.is_unsized() {
+ // For `Place::Local`, the metadata is stored with the local, not the place. So we have
+ // to look that up first.
+ self.place_to_op(place)?.meta()
+ } else {
+ Ok(MemPlaceMeta::None)
+ }
+ }
+
/// Take a value, which represents a (thin or wide) reference, and make it a place.
- /// Alignment is just based on the type. This is the inverse of `MemPlace::to_ref()`.
+ /// Alignment is just based on the type. This is the inverse of `mplace_to_ref()`.
///
/// Only call this if you are sure the place is "valid" (aligned and inbounds), or do not
/// want to ever use the place for memory access!
- /// Generally prefer `deref_operand`.
+ /// Generally prefer `deref_pointer`.
pub fn ref_to_mplace(
&self,
val: &ImmTy<'tcx, M::Provenance>,
@@ -327,17 +432,29 @@ where
Immediate::Uninit => throw_ub!(InvalidUninitBytes(None)),
};
- let mplace = MemPlace { ptr: ptr.to_pointer(self)?, meta };
- // When deref'ing a pointer, the *static* alignment given by the type is what matters.
- let align = layout.align.abi;
- Ok(MPlaceTy { mplace, layout, align })
+ // `ref_to_mplace` is called on raw pointers even if they don't actually get dereferenced;
+ // we hence can't call `size_and_align_of` since that asserts more validity than we want.
+ Ok(MPlaceTy::from_aligned_ptr_with_meta(ptr.to_pointer(self)?, layout, meta))
+ }
+
+ /// Turn a mplace into a (thin or wide) mutable raw pointer, pointing to the same space.
+ /// `align` information is lost!
+ /// This is the inverse of `ref_to_mplace`.
+ pub fn mplace_to_ref(
+ &self,
+ mplace: &MPlaceTy<'tcx, M::Provenance>,
+ ) -> InterpResult<'tcx, ImmTy<'tcx, M::Provenance>> {
+ let imm = mplace.to_ref(self);
+ let layout = self.layout_of(Ty::new_mut_ptr(self.tcx.tcx, mplace.layout.ty))?;
+ Ok(ImmTy::from_immediate(imm, layout))
}
/// Take an operand, representing a pointer, and dereference it to a place.
+ /// Corresponds to the `*` operator in Rust.
#[instrument(skip(self), level = "debug")]
- pub fn deref_operand(
+ pub fn deref_pointer(
&self,
- src: &OpTy<'tcx, M::Provenance>,
+ src: &impl Readable<'tcx, M::Provenance>,
) -> InterpResult<'tcx, MPlaceTy<'tcx, M::Provenance>> {
let val = self.read_immediate(src)?;
trace!("deref to {} on {:?}", val.layout.ty, *val);
@@ -347,41 +464,44 @@ where
}
let mplace = self.ref_to_mplace(&val)?;
- self.check_mplace(mplace)?;
+ self.check_mplace(&mplace)?;
Ok(mplace)
}
#[inline]
pub(super) fn get_place_alloc(
&self,
- place: &MPlaceTy<'tcx, M::Provenance>,
+ mplace: &MPlaceTy<'tcx, M::Provenance>,
) -> InterpResult<'tcx, Option<AllocRef<'_, 'tcx, M::Provenance, M::AllocExtra, M::Bytes>>>
{
- assert!(place.layout.is_sized());
- assert!(!place.meta.has_meta());
- let size = place.layout.size;
- self.get_ptr_alloc(place.ptr, size, place.align)
+ let (size, _align) = self
+ .size_and_align_of_mplace(&mplace)?
+ .unwrap_or((mplace.layout.size, mplace.layout.align.abi));
+ // Due to packed places, only `mplace.align` matters.
+ self.get_ptr_alloc(mplace.ptr, size, mplace.align)
}
#[inline]
pub(super) fn get_place_alloc_mut(
&mut self,
- place: &MPlaceTy<'tcx, M::Provenance>,
+ mplace: &MPlaceTy<'tcx, M::Provenance>,
) -> InterpResult<'tcx, Option<AllocRefMut<'_, 'tcx, M::Provenance, M::AllocExtra, M::Bytes>>>
{
- assert!(place.layout.is_sized());
- assert!(!place.meta.has_meta());
- let size = place.layout.size;
- self.get_ptr_alloc_mut(place.ptr, size, place.align)
+ let (size, _align) = self
+ .size_and_align_of_mplace(&mplace)?
+ .unwrap_or((mplace.layout.size, mplace.layout.align.abi));
+ // Due to packed places, only `mplace.align` matters.
+ self.get_ptr_alloc_mut(mplace.ptr, size, mplace.align)
}
/// Check if this mplace is dereferenceable and sufficiently aligned.
- pub fn check_mplace(&self, mplace: MPlaceTy<'tcx, M::Provenance>) -> InterpResult<'tcx> {
- let (size, align) = self
+ pub fn check_mplace(&self, mplace: &MPlaceTy<'tcx, M::Provenance>) -> InterpResult<'tcx> {
+ let (size, _align) = self
.size_and_align_of_mplace(&mplace)?
.unwrap_or((mplace.layout.size, mplace.layout.align.abi));
- assert!(mplace.align <= align, "dynamic alignment less strict than static one?");
- let align = if M::enforce_alignment(self).should_check() { align } else { Align::ONE };
+ // Due to packed places, only `mplace.align` matters.
+ let align =
+ if M::enforce_alignment(self).should_check() { mplace.align } else { Align::ONE };
self.check_ptr_access_align(mplace.ptr, size, align, CheckInAllocMsg::DerefTest)?;
Ok(())
}
@@ -418,7 +538,7 @@ where
local: mir::Local,
) -> InterpResult<'tcx, PlaceTy<'tcx, M::Provenance>> {
let layout = self.layout_of_local(&self.stack()[frame], local, None)?;
- let place = Place::Local { frame, local };
+ let place = Place::Local { frame, local, offset: None };
Ok(PlaceTy { place, layout, align: layout.align.abi })
}
@@ -426,13 +546,13 @@ where
/// place; for reading, a more efficient alternative is `eval_place_to_op`.
#[instrument(skip(self), level = "debug")]
pub fn eval_place(
- &mut self,
+ &self,
mir_place: mir::Place<'tcx>,
) -> InterpResult<'tcx, PlaceTy<'tcx, M::Provenance>> {
let mut place = self.local_to_place(self.frame_idx(), mir_place.local)?;
// Using `try_fold` turned out to be bad for performance, hence the loop.
for elem in mir_place.projection.iter() {
- place = self.place_projection(&place, elem)?
+ place = self.project(&place, elem)?
}
trace!("{:?}", self.dump_place(place.place));
@@ -459,13 +579,13 @@ where
pub fn write_immediate(
&mut self,
src: Immediate<M::Provenance>,
- dest: &PlaceTy<'tcx, M::Provenance>,
+ dest: &impl Writeable<'tcx, M::Provenance>,
) -> InterpResult<'tcx> {
self.write_immediate_no_validate(src, dest)?;
- if M::enforce_validity(self, dest.layout) {
+ if M::enforce_validity(self, dest.layout()) {
// Data got changed, better make sure it matches the type!
- self.validate_operand(&self.place_to_op(dest)?)?;
+ self.validate_operand(&dest.to_op(self)?)?;
}
Ok(())
@@ -476,7 +596,7 @@ where
pub fn write_scalar(
&mut self,
val: impl Into<Scalar<M::Provenance>>,
- dest: &PlaceTy<'tcx, M::Provenance>,
+ dest: &impl Writeable<'tcx, M::Provenance>,
) -> InterpResult<'tcx> {
self.write_immediate(Immediate::Scalar(val.into()), dest)
}
@@ -486,7 +606,7 @@ where
pub fn write_pointer(
&mut self,
ptr: impl Into<Pointer<Option<M::Provenance>>>,
- dest: &PlaceTy<'tcx, M::Provenance>,
+ dest: &impl Writeable<'tcx, M::Provenance>,
) -> InterpResult<'tcx> {
self.write_scalar(Scalar::from_maybe_pointer(ptr.into(), self), dest)
}
@@ -497,32 +617,63 @@ where
fn write_immediate_no_validate(
&mut self,
src: Immediate<M::Provenance>,
- dest: &PlaceTy<'tcx, M::Provenance>,
+ dest: &impl Writeable<'tcx, M::Provenance>,
) -> InterpResult<'tcx> {
- assert!(dest.layout.is_sized(), "Cannot write unsized data");
- trace!("write_immediate: {:?} <- {:?}: {}", *dest, src, dest.layout.ty);
+ assert!(dest.layout().is_sized(), "Cannot write unsized immediate data");
// See if we can avoid an allocation. This is the counterpart to `read_immediate_raw`,
// but not factored as a separate function.
- let mplace = match dest.place {
- Place::Local { frame, local } => {
- match M::access_local_mut(self, frame, local)? {
- Operand::Immediate(local) => {
- // Local can be updated in-place.
- *local = src;
- return Ok(());
- }
- Operand::Indirect(mplace) => {
- // The local is in memory, go on below.
- *mplace
+ let mplace = match dest.as_mplace_or_local() {
+ Right((frame, local, offset, align, layout)) => {
+ if offset.is_some() {
+ // This has been projected to a part of this local. We could have complicated
+ // logic to still keep this local as an `Operand`... but it's much easier to
+ // just fall back to the indirect path.
+ dest.force_mplace(self)?
+ } else {
+ match M::access_local_mut(self, frame, local)? {
+ Operand::Immediate(local_val) => {
+ // Local can be updated in-place.
+ *local_val = src;
+ // Double-check that the value we are storing and the local fit to each other.
+ // (*After* doing the update for borrow checker reasons.)
+ if cfg!(debug_assertions) {
+ let local_layout =
+ self.layout_of_local(&self.stack()[frame], local, None)?;
+ match (src, local_layout.abi) {
+ (Immediate::Scalar(scalar), Abi::Scalar(s)) => {
+ assert_eq!(scalar.size(), s.size(self))
+ }
+ (
+ Immediate::ScalarPair(a_val, b_val),
+ Abi::ScalarPair(a, b),
+ ) => {
+ assert_eq!(a_val.size(), a.size(self));
+ assert_eq!(b_val.size(), b.size(self));
+ }
+ (Immediate::Uninit, _) => {}
+ (src, abi) => {
+ bug!(
+ "value {src:?} cannot be written into local with type {} (ABI {abi:?})",
+ local_layout.ty
+ )
+ }
+ };
+ }
+ return Ok(());
+ }
+ Operand::Indirect(mplace) => {
+ // The local is in memory, go on below.
+ MPlaceTy { mplace: *mplace, align, layout }
+ }
}
}
}
- Place::Ptr(mplace) => mplace, // already referring to memory
+ Left(mplace) => mplace, // already referring to memory
};
// This is already in memory, write there.
- self.write_immediate_to_mplace_no_validate(src, dest.layout, dest.align, mplace)
+ self.write_immediate_to_mplace_no_validate(src, mplace.layout, mplace.align, mplace.mplace)
}
/// Write an immediate to memory.
@@ -541,14 +692,17 @@ where
// wrong type.
let tcx = *self.tcx;
- let Some(mut alloc) = self.get_place_alloc_mut(&MPlaceTy { mplace: dest, layout, align })? else {
+ let Some(mut alloc) =
+ self.get_place_alloc_mut(&MPlaceTy { mplace: dest, layout, align })?
+ else {
// zero-sized access
return Ok(());
};
match value {
Immediate::Scalar(scalar) => {
- let Abi::Scalar(s) = layout.abi else { span_bug!(
+ let Abi::Scalar(s) = layout.abi else {
+ span_bug!(
self.cur_span(),
"write_immediate_to_mplace: invalid Scalar layout: {layout:#?}",
)
@@ -561,7 +715,8 @@ where
// We checked `ptr_align` above, so all fields will have the alignment they need.
// We would anyway check against `ptr_align.restrict_for_offset(b_offset)`,
// which `ptr.offset(b_offset)` cannot possibly fail to satisfy.
- let Abi::ScalarPair(a, b) = layout.abi else { span_bug!(
+ let Abi::ScalarPair(a, b) = layout.abi else {
+ span_bug!(
self.cur_span(),
"write_immediate_to_mplace: invalid ScalarPair layout: {:#?}",
layout
@@ -582,18 +737,29 @@ where
}
}
- pub fn write_uninit(&mut self, dest: &PlaceTy<'tcx, M::Provenance>) -> InterpResult<'tcx> {
+ pub fn write_uninit(
+ &mut self,
+ dest: &impl Writeable<'tcx, M::Provenance>,
+ ) -> InterpResult<'tcx> {
let mplace = match dest.as_mplace_or_local() {
Left(mplace) => mplace,
- Right((frame, local)) => {
- match M::access_local_mut(self, frame, local)? {
- Operand::Immediate(local) => {
- *local = Immediate::Uninit;
- return Ok(());
- }
- Operand::Indirect(mplace) => {
- // The local is in memory, go on below.
- MPlaceTy { mplace: *mplace, layout: dest.layout, align: dest.align }
+ Right((frame, local, offset, align, layout)) => {
+ if offset.is_some() {
+ // This has been projected to a part of this local. We could have complicated
+ // logic to still keep this local as an `Operand`... but it's much easier to
+ // just fall back to the indirect path.
+ // FIXME: share the logic with `write_immediate_no_validate`.
+ dest.force_mplace(self)?
+ } else {
+ match M::access_local_mut(self, frame, local)? {
+ Operand::Immediate(local) => {
+ *local = Immediate::Uninit;
+ return Ok(());
+ }
+ Operand::Indirect(mplace) => {
+ // The local is in memory, go on below.
+ MPlaceTy { mplace: *mplace, layout, align }
+ }
}
}
}
@@ -612,15 +778,15 @@ where
#[instrument(skip(self), level = "debug")]
pub fn copy_op(
&mut self,
- src: &OpTy<'tcx, M::Provenance>,
- dest: &PlaceTy<'tcx, M::Provenance>,
+ src: &impl Readable<'tcx, M::Provenance>,
+ dest: &impl Writeable<'tcx, M::Provenance>,
allow_transmute: bool,
) -> InterpResult<'tcx> {
self.copy_op_no_validate(src, dest, allow_transmute)?;
- if M::enforce_validity(self, dest.layout) {
+ if M::enforce_validity(self, dest.layout()) {
// Data got changed, better make sure it matches the type!
- self.validate_operand(&self.place_to_op(dest)?)?;
+ self.validate_operand(&dest.to_op(self)?)?;
}
Ok(())
@@ -633,20 +799,20 @@ where
#[instrument(skip(self), level = "debug")]
fn copy_op_no_validate(
&mut self,
- src: &OpTy<'tcx, M::Provenance>,
- dest: &PlaceTy<'tcx, M::Provenance>,
+ src: &impl Readable<'tcx, M::Provenance>,
+ dest: &impl Writeable<'tcx, M::Provenance>,
allow_transmute: bool,
) -> InterpResult<'tcx> {
// We do NOT compare the types for equality, because well-typed code can
// actually "transmute" `&mut T` to `&T` in an assignment without a cast.
let layout_compat =
- mir_assign_valid_types(*self.tcx, self.param_env, src.layout, dest.layout);
+ mir_assign_valid_types(*self.tcx, self.param_env, src.layout(), dest.layout());
if !allow_transmute && !layout_compat {
span_bug!(
self.cur_span(),
"type mismatch when copying!\nsrc: {:?},\ndest: {:?}",
- src.layout.ty,
- dest.layout.ty,
+ src.layout().ty,
+ dest.layout().ty,
);
}
@@ -659,13 +825,13 @@ where
// actually sized, due to a trivially false where-clause
// predicate like `where Self: Sized` with `Self = dyn Trait`.
// See #102553 for an example of such a predicate.
- if src.layout.is_unsized() {
- throw_inval!(SizeOfUnsizedType(src.layout.ty));
+ if src.layout().is_unsized() {
+ throw_inval!(ConstPropNonsense);
}
- if dest.layout.is_unsized() {
- throw_inval!(SizeOfUnsizedType(dest.layout.ty));
+ if dest.layout().is_unsized() {
+ throw_inval!(ConstPropNonsense);
}
- assert_eq!(src.layout.size, dest.layout.size);
+ assert_eq!(src.layout().size, dest.layout().size);
// Yay, we got a value that we can write directly.
return if layout_compat {
self.write_immediate_no_validate(*src_val, dest)
@@ -674,10 +840,10 @@ where
// loaded using the offsets defined by `src.layout`. When we put this back into
// the destination, we have to use the same offsets! So (a) we make sure we
// write back to memory, and (b) we use `dest` *with the source layout*.
- let dest_mem = self.force_allocation(dest)?;
+ let dest_mem = dest.force_mplace(self)?;
self.write_immediate_to_mplace_no_validate(
*src_val,
- src.layout,
+ src.layout(),
dest_mem.align,
*dest_mem,
)
@@ -686,9 +852,9 @@ where
Left(mplace) => mplace,
};
// Slow path, this does not fit into an immediate. Just memcpy.
- trace!("copy_op: {:?} <- {:?}: {}", *dest, src, dest.layout.ty);
+ trace!("copy_op: {:?} <- {:?}: {}", *dest, src, dest.layout().ty);
- let dest = self.force_allocation(&dest)?;
+ let dest = dest.force_mplace(self)?;
let Some((dest_size, _)) = self.size_and_align_of_mplace(&dest)? else {
span_bug!(self.cur_span(), "copy_op needs (dynamically) sized values")
};
@@ -720,8 +886,8 @@ where
place: &PlaceTy<'tcx, M::Provenance>,
) -> InterpResult<'tcx, MPlaceTy<'tcx, M::Provenance>> {
let mplace = match place.place {
- Place::Local { frame, local } => {
- match M::access_local_mut(self, frame, local)? {
+ Place::Local { frame, local, offset } => {
+ let whole_local = match M::access_local_mut(self, frame, local)? {
&mut Operand::Immediate(local_val) => {
// We need to make an allocation.
@@ -734,10 +900,11 @@ where
throw_unsup_format!("unsized locals are not supported");
}
let mplace = *self.allocate(local_layout, MemoryKind::Stack)?;
+ // Preserve old value. (As an optimization, we can skip this if it was uninit.)
if !matches!(local_val, Immediate::Uninit) {
- // Preserve old value. (As an optimization, we can skip this if it was uninit.)
- // We don't have to validate as we can assume the local
- // was already valid for its type.
+ // We don't have to validate as we can assume the local was already
+ // valid for its type. We must not use any part of `place` here, that
+ // could be a projection to a part of the local!
self.write_immediate_to_mplace_no_validate(
local_val,
local_layout,
@@ -745,29 +912,48 @@ where
mplace,
)?;
}
- // Now we can call `access_mut` again, asserting it goes well,
- // and actually overwrite things.
+ // Now we can call `access_mut` again, asserting it goes well, and actually
+ // overwrite things. This points to the entire allocation, not just the part
+ // the place refers to, i.e. we do this before we apply `offset`.
*M::access_local_mut(self, frame, local).unwrap() =
Operand::Indirect(mplace);
mplace
}
&mut Operand::Indirect(mplace) => mplace, // this already was an indirect local
+ };
+ if let Some(offset) = offset {
+ whole_local.offset_with_meta_(offset, MemPlaceMeta::None, self)?
+ } else {
+ // Preserve wide place metadata, do not call `offset`.
+ whole_local
}
}
Place::Ptr(mplace) => mplace,
};
- // Return with the original layout, so that the caller can go on
+ // Return with the original layout and align, so that the caller can go on
Ok(MPlaceTy { mplace, layout: place.layout, align: place.align })
}
+ pub fn allocate_dyn(
+ &mut self,
+ layout: TyAndLayout<'tcx>,
+ kind: MemoryKind<M::MemoryKind>,
+ meta: MemPlaceMeta<M::Provenance>,
+ ) -> InterpResult<'tcx, MPlaceTy<'tcx, M::Provenance>> {
+ let Some((size, align)) = self.size_and_align_of(&meta, &layout)? else {
+ span_bug!(self.cur_span(), "cannot allocate space for `extern` type, size is not known")
+ };
+ let ptr = self.allocate_ptr(size, align, kind)?;
+ Ok(MPlaceTy::from_aligned_ptr_with_meta(ptr.into(), layout, meta))
+ }
+
pub fn allocate(
&mut self,
layout: TyAndLayout<'tcx>,
kind: MemoryKind<M::MemoryKind>,
) -> InterpResult<'tcx, MPlaceTy<'tcx, M::Provenance>> {
assert!(layout.is_sized());
- let ptr = self.allocate_ptr(layout.size, layout.align.abi, kind)?;
- Ok(MPlaceTy::from_aligned_ptr(ptr.into(), layout))
+ self.allocate_dyn(layout, kind, MemPlaceMeta::None)
}
/// Returns a wide MPlace of type `&'static [mut] str` to a new 1-aligned allocation.
@@ -798,10 +984,10 @@ where
operands: &IndexSlice<FieldIdx, mir::Operand<'tcx>>,
dest: &PlaceTy<'tcx, M::Provenance>,
) -> InterpResult<'tcx> {
- self.write_uninit(&dest)?;
+ self.write_uninit(dest)?;
let (variant_index, variant_dest, active_field_index) = match *kind {
mir::AggregateKind::Adt(_, variant_index, _, _, active_field_index) => {
- let variant_dest = self.place_downcast(&dest, variant_index)?;
+ let variant_dest = self.project_downcast(dest, variant_index)?;
(variant_index, variant_dest, active_field_index)
}
_ => (FIRST_VARIANT, dest.clone(), None),
@@ -811,11 +997,11 @@ where
}
for (field_index, operand) in operands.iter_enumerated() {
let field_index = active_field_index.unwrap_or(field_index);
- let field_dest = self.place_field(&variant_dest, field_index.as_usize())?;
+ let field_dest = self.project_field(&variant_dest, field_index.as_usize())?;
let op = self.eval_operand(operand, Some(field_dest.layout))?;
self.copy_op(&op, &field_dest, /*allow_transmute*/ false)?;
}
- self.write_discriminant(variant_index, &dest)
+ self.write_discriminant(variant_index, dest)
}
pub fn raw_const_to_mplace(
@@ -851,22 +1037,24 @@ where
Ok((mplace, vtable))
}
- /// Turn an operand with a `dyn* Trait` type into an operand with the actual dynamic type.
- /// Aso returns the vtable.
- pub(super) fn unpack_dyn_star(
+ /// Turn a `dyn* Trait` type into an value with the actual dynamic type.
+ /// Also returns the vtable.
+ pub(super) fn unpack_dyn_star<P: Projectable<'tcx, M::Provenance>>(
&self,
- op: &OpTy<'tcx, M::Provenance>,
- ) -> InterpResult<'tcx, (OpTy<'tcx, M::Provenance>, Pointer<Option<M::Provenance>>)> {
+ val: &P,
+ ) -> InterpResult<'tcx, (P, Pointer<Option<M::Provenance>>)> {
assert!(
- matches!(op.layout.ty.kind(), ty::Dynamic(_, _, ty::DynStar)),
+ matches!(val.layout().ty.kind(), ty::Dynamic(_, _, ty::DynStar)),
"`unpack_dyn_star` only makes sense on `dyn*` types"
);
- let data = self.operand_field(&op, 0)?;
- let vtable = self.operand_field(&op, 1)?;
- let vtable = self.read_pointer(&vtable)?;
+ let data = self.project_field(val, 0)?;
+ let vtable = self.project_field(val, 1)?;
+ let vtable = self.read_pointer(&vtable.to_op(self)?)?;
let (ty, _) = self.get_ptr_vtable(vtable)?;
let layout = self.layout_of(ty)?;
- let data = data.transmute(layout);
+ // `data` is already the right thing but has the wrong type. So we transmute it, by
+ // projecting with offset 0.
+ let data = data.transmute(layout, self)?;
Ok((data, vtable))
}
}
diff --git a/compiler/rustc_const_eval/src/interpret/projection.rs b/compiler/rustc_const_eval/src/interpret/projection.rs
index d7d31fe18..882097ad2 100644
--- a/compiler/rustc_const_eval/src/interpret/projection.rs
+++ b/compiler/rustc_const_eval/src/interpret/projection.rs
@@ -7,18 +7,70 @@
//! but we still need to do bounds checking and adjust the layout. To not duplicate that with MPlaceTy, we actually
//! implement the logic on OpTy, and MPlaceTy calls that.
-use either::{Left, Right};
-
use rustc_middle::mir;
use rustc_middle::ty;
-use rustc_middle::ty::layout::LayoutOf;
+use rustc_middle::ty::layout::{LayoutOf, TyAndLayout};
use rustc_middle::ty::Ty;
-use rustc_target::abi::{self, Abi, VariantIdx};
+use rustc_middle::ty::TyCtxt;
+use rustc_target::abi::HasDataLayout;
+use rustc_target::abi::Size;
+use rustc_target::abi::{self, VariantIdx};
+
+use super::{InterpCx, InterpResult, MPlaceTy, Machine, MemPlaceMeta, OpTy, Provenance, Scalar};
-use super::{
- ImmTy, Immediate, InterpCx, InterpResult, MPlaceTy, Machine, MemPlaceMeta, OpTy, PlaceTy,
- Provenance, Scalar,
-};
+/// A thing that we can project into, and that has a layout.
+pub trait Projectable<'tcx, Prov: Provenance>: Sized + std::fmt::Debug {
+ /// Get the layout.
+ fn layout(&self) -> TyAndLayout<'tcx>;
+
+ /// Get the metadata of a wide value.
+ fn meta<'mir, M: Machine<'mir, 'tcx, Provenance = Prov>>(
+ &self,
+ ecx: &InterpCx<'mir, 'tcx, M>,
+ ) -> InterpResult<'tcx, MemPlaceMeta<M::Provenance>>;
+
+ fn len<'mir, M: Machine<'mir, 'tcx, Provenance = Prov>>(
+ &self,
+ ecx: &InterpCx<'mir, 'tcx, M>,
+ ) -> InterpResult<'tcx, u64> {
+ self.meta(ecx)?.len(self.layout(), ecx)
+ }
+
+ /// Offset the value by the given amount, replacing the layout and metadata.
+ fn offset_with_meta(
+ &self,
+ offset: Size,
+ meta: MemPlaceMeta<Prov>,
+ layout: TyAndLayout<'tcx>,
+ cx: &impl HasDataLayout,
+ ) -> InterpResult<'tcx, Self>;
+
+ fn offset(
+ &self,
+ offset: Size,
+ layout: TyAndLayout<'tcx>,
+ cx: &impl HasDataLayout,
+ ) -> InterpResult<'tcx, Self> {
+ assert!(layout.is_sized());
+ self.offset_with_meta(offset, MemPlaceMeta::None, layout, cx)
+ }
+
+ fn transmute(
+ &self,
+ layout: TyAndLayout<'tcx>,
+ cx: &impl HasDataLayout,
+ ) -> InterpResult<'tcx, Self> {
+ assert_eq!(self.layout().size, layout.size);
+ self.offset_with_meta(Size::ZERO, MemPlaceMeta::None, layout, cx)
+ }
+
+ /// Convert this to an `OpTy`. This might be an irreversible transformation, but is useful for
+ /// reading from this thing.
+ fn to_op<'mir, M: Machine<'mir, 'tcx, Provenance = Prov>>(
+ &self,
+ ecx: &InterpCx<'mir, 'tcx, M>,
+ ) -> InterpResult<'tcx, OpTy<'tcx, M::Provenance>>;
+}
// FIXME: Working around https://github.com/rust-lang/rust/issues/54385
impl<'mir, 'tcx: 'mir, Prov, M> InterpCx<'mir, 'tcx, M>
@@ -26,167 +78,83 @@ where
Prov: Provenance + 'static,
M: Machine<'mir, 'tcx, Provenance = Prov>,
{
- //# Field access
-
/// Offset a pointer to project to a field of a struct/union. Unlike `place_field`, this is
/// always possible without allocating, so it can take `&self`. Also return the field's layout.
- /// This supports both struct and array fields.
+ /// This supports both struct and array fields, but not slices!
///
/// This also works for arrays, but then the `usize` index type is restricting.
/// For indexing into arrays, use `mplace_index`.
- pub fn mplace_field(
+ pub fn project_field<P: Projectable<'tcx, M::Provenance>>(
&self,
- base: &MPlaceTy<'tcx, M::Provenance>,
+ base: &P,
field: usize,
- ) -> InterpResult<'tcx, MPlaceTy<'tcx, M::Provenance>> {
- let offset = base.layout.fields.offset(field);
- let field_layout = base.layout.field(self, field);
+ ) -> InterpResult<'tcx, P> {
+ // Slices nominally have length 0, so they will panic somewhere in `fields.offset`.
+ debug_assert!(
+ !matches!(base.layout().ty.kind(), ty::Slice(..)),
+ "`field` projection called on a slice -- call `index` projection instead"
+ );
+ let offset = base.layout().fields.offset(field);
+ let field_layout = base.layout().field(self, field);
// Offset may need adjustment for unsized fields.
let (meta, offset) = if field_layout.is_unsized() {
+ if base.layout().is_sized() {
+ // An unsized field of a sized type? Sure...
+ // But const-prop actually feeds us such nonsense MIR! (see test `const_prop/issue-86351.rs`)
+ throw_inval!(ConstPropNonsense);
+ }
+ let base_meta = base.meta(self)?;
// Re-use parent metadata to determine dynamic field layout.
// With custom DSTS, this *will* execute user-defined code, but the same
// happens at run-time so that's okay.
- match self.size_and_align_of(&base.meta, &field_layout)? {
- Some((_, align)) => (base.meta, offset.align_to(align)),
+ match self.size_and_align_of(&base_meta, &field_layout)? {
+ Some((_, align)) => (base_meta, offset.align_to(align)),
None => {
// For unsized types with an extern type tail we perform no adjustments.
// NOTE: keep this in sync with `PlaceRef::project_field` in the codegen backend.
- assert!(matches!(base.meta, MemPlaceMeta::None));
- (base.meta, offset)
+ assert!(matches!(base_meta, MemPlaceMeta::None));
+ (base_meta, offset)
}
}
} else {
- // base.meta could be present; we might be accessing a sized field of an unsized
+ // base_meta could be present; we might be accessing a sized field of an unsized
// struct.
(MemPlaceMeta::None, offset)
};
- // We do not look at `base.layout.align` nor `field_layout.align`, unlike
- // codegen -- mostly to see if we can get away with that
base.offset_with_meta(offset, meta, field_layout, self)
}
- /// Gets the place of a field inside the place, and also the field's type.
- /// Just a convenience function, but used quite a bit.
- /// This is the only projection that might have a side-effect: We cannot project
- /// into the field of a local `ScalarPair`, we have to first allocate it.
- pub fn place_field(
- &mut self,
- base: &PlaceTy<'tcx, M::Provenance>,
- field: usize,
- ) -> InterpResult<'tcx, PlaceTy<'tcx, M::Provenance>> {
- // FIXME: We could try to be smarter and avoid allocation for fields that span the
- // entire place.
- let base = self.force_allocation(base)?;
- Ok(self.mplace_field(&base, field)?.into())
- }
-
- pub fn operand_field(
+ /// Downcasting to an enum variant.
+ pub fn project_downcast<P: Projectable<'tcx, M::Provenance>>(
&self,
- base: &OpTy<'tcx, M::Provenance>,
- field: usize,
- ) -> InterpResult<'tcx, OpTy<'tcx, M::Provenance>> {
- let base = match base.as_mplace_or_imm() {
- Left(ref mplace) => {
- // We can reuse the mplace field computation logic for indirect operands.
- let field = self.mplace_field(mplace, field)?;
- return Ok(field.into());
- }
- Right(value) => value,
- };
-
- let field_layout = base.layout.field(self, field);
- let offset = base.layout.fields.offset(field);
- // This makes several assumptions about what layouts we will encounter; we match what
- // codegen does as good as we can (see `extract_field` in `rustc_codegen_ssa/src/mir/operand.rs`).
- let field_val: Immediate<_> = match (*base, base.layout.abi) {
- // if the entire value is uninit, then so is the field (can happen in ConstProp)
- (Immediate::Uninit, _) => Immediate::Uninit,
- // the field contains no information, can be left uninit
- _ if field_layout.is_zst() => Immediate::Uninit,
- // the field covers the entire type
- _ if field_layout.size == base.layout.size => {
- assert!(match (base.layout.abi, field_layout.abi) {
- (Abi::Scalar(..), Abi::Scalar(..)) => true,
- (Abi::ScalarPair(..), Abi::ScalarPair(..)) => true,
- _ => false,
- });
- assert!(offset.bytes() == 0);
- *base
- }
- // extract fields from types with `ScalarPair` ABI
- (Immediate::ScalarPair(a_val, b_val), Abi::ScalarPair(a, b)) => {
- assert!(matches!(field_layout.abi, Abi::Scalar(..)));
- Immediate::from(if offset.bytes() == 0 {
- debug_assert_eq!(field_layout.size, a.size(self));
- a_val
- } else {
- debug_assert_eq!(offset, a.size(self).align_to(b.align(self).abi));
- debug_assert_eq!(field_layout.size, b.size(self));
- b_val
- })
- }
- // everything else is a bug
- _ => span_bug!(
- self.cur_span(),
- "invalid field access on immediate {}, layout {:#?}",
- base,
- base.layout
- ),
- };
-
- Ok(ImmTy::from_immediate(field_val, field_layout).into())
- }
-
- //# Downcasting
-
- pub fn mplace_downcast(
- &self,
- base: &MPlaceTy<'tcx, M::Provenance>,
+ base: &P,
variant: VariantIdx,
- ) -> InterpResult<'tcx, MPlaceTy<'tcx, M::Provenance>> {
+ ) -> InterpResult<'tcx, P> {
+ assert!(!base.meta(self)?.has_meta());
// Downcasts only change the layout.
// (In particular, no check about whether this is even the active variant -- that's by design,
// see https://github.com/rust-lang/rust/issues/93688#issuecomment-1032929496.)
- assert!(!base.meta.has_meta());
- let mut base = *base;
- base.layout = base.layout.for_variant(self, variant);
- Ok(base)
- }
-
- pub fn place_downcast(
- &self,
- base: &PlaceTy<'tcx, M::Provenance>,
- variant: VariantIdx,
- ) -> InterpResult<'tcx, PlaceTy<'tcx, M::Provenance>> {
- // Downcast just changes the layout
- let mut base = base.clone();
- base.layout = base.layout.for_variant(self, variant);
- Ok(base)
- }
-
- pub fn operand_downcast(
- &self,
- base: &OpTy<'tcx, M::Provenance>,
- variant: VariantIdx,
- ) -> InterpResult<'tcx, OpTy<'tcx, M::Provenance>> {
- // Downcast just changes the layout
- let mut base = base.clone();
- base.layout = base.layout.for_variant(self, variant);
- Ok(base)
+ // So we just "offset" by 0.
+ let layout = base.layout().for_variant(self, variant);
+ if layout.abi.is_uninhabited() {
+ // `read_discriminant` should have excluded uninhabited variants... but ConstProp calls
+ // us on dead code.
+ throw_inval!(ConstPropNonsense)
+ }
+ // This cannot be `transmute` as variants *can* have a smaller size than the entire enum.
+ base.offset(Size::ZERO, layout, self)
}
- //# Slice indexing
-
- #[inline(always)]
- pub fn operand_index(
+ /// Compute the offset and field layout for accessing the given index.
+ pub fn project_index<P: Projectable<'tcx, M::Provenance>>(
&self,
- base: &OpTy<'tcx, M::Provenance>,
+ base: &P,
index: u64,
- ) -> InterpResult<'tcx, OpTy<'tcx, M::Provenance>> {
+ ) -> InterpResult<'tcx, P> {
// Not using the layout method because we want to compute on u64
- match base.layout.fields {
+ let (offset, field_layout) = match base.layout().fields {
abi::FieldsShape::Array { stride, count: _ } => {
// `count` is nonsense for slices, use the dynamic length instead.
let len = base.len(self)?;
@@ -196,63 +164,26 @@ where
}
let offset = stride * index; // `Size` multiplication
// All fields have the same layout.
- let field_layout = base.layout.field(self, 0);
- base.offset(offset, field_layout, self)
+ let field_layout = base.layout().field(self, 0);
+ (offset, field_layout)
}
_ => span_bug!(
self.cur_span(),
"`mplace_index` called on non-array type {:?}",
- base.layout.ty
+ base.layout().ty
),
- }
- }
-
- /// Iterates over all fields of an array. Much more efficient than doing the
- /// same by repeatedly calling `operand_index`.
- pub fn operand_array_fields<'a>(
- &self,
- base: &'a OpTy<'tcx, Prov>,
- ) -> InterpResult<'tcx, impl Iterator<Item = InterpResult<'tcx, OpTy<'tcx, Prov>>> + 'a> {
- let len = base.len(self)?; // also asserts that we have a type where this makes sense
- let abi::FieldsShape::Array { stride, .. } = base.layout.fields else {
- span_bug!(self.cur_span(), "operand_array_fields: expected an array layout");
};
- let field_layout = base.layout.field(self, 0);
- let dl = &self.tcx.data_layout;
- // `Size` multiplication
- Ok((0..len).map(move |i| base.offset(stride * i, field_layout, dl)))
- }
-
- /// Index into an array.
- pub fn mplace_index(
- &self,
- base: &MPlaceTy<'tcx, M::Provenance>,
- index: u64,
- ) -> InterpResult<'tcx, MPlaceTy<'tcx, M::Provenance>> {
- Ok(self.operand_index(&base.into(), index)?.assert_mem_place())
- }
- pub fn place_index(
- &mut self,
- base: &PlaceTy<'tcx, M::Provenance>,
- index: u64,
- ) -> InterpResult<'tcx, PlaceTy<'tcx, M::Provenance>> {
- // There's not a lot we can do here, since we cannot have a place to a part of a local. If
- // we are accessing the only element of a 1-element array, it's still the entire local...
- // that doesn't seem worth it.
- let base = self.force_allocation(base)?;
- Ok(self.mplace_index(&base, index)?.into())
+ base.offset(offset, field_layout, self)
}
- //# ConstantIndex support
-
- fn operand_constant_index(
+ fn project_constant_index<P: Projectable<'tcx, M::Provenance>>(
&self,
- base: &OpTy<'tcx, M::Provenance>,
+ base: &P,
offset: u64,
min_length: u64,
from_end: bool,
- ) -> InterpResult<'tcx, OpTy<'tcx, M::Provenance>> {
+ ) -> InterpResult<'tcx, P> {
let n = base.len(self)?;
if n < min_length {
// This can only be reached in ConstProp and non-rustc-MIR.
@@ -267,32 +198,38 @@ where
offset
};
- self.operand_index(base, index)
+ self.project_index(base, index)
}
- fn place_constant_index(
- &mut self,
- base: &PlaceTy<'tcx, M::Provenance>,
- offset: u64,
- min_length: u64,
- from_end: bool,
- ) -> InterpResult<'tcx, PlaceTy<'tcx, M::Provenance>> {
- let base = self.force_allocation(base)?;
- Ok(self
- .operand_constant_index(&base.into(), offset, min_length, from_end)?
- .assert_mem_place()
- .into())
+ /// Iterates over all fields of an array. Much more efficient than doing the
+ /// same by repeatedly calling `operand_index`.
+ pub fn project_array_fields<'a, P: Projectable<'tcx, M::Provenance>>(
+ &self,
+ base: &'a P,
+ ) -> InterpResult<'tcx, impl Iterator<Item = InterpResult<'tcx, P>> + 'a>
+ where
+ 'tcx: 'a,
+ {
+ let abi::FieldsShape::Array { stride, .. } = base.layout().fields else {
+ span_bug!(self.cur_span(), "operand_array_fields: expected an array layout");
+ };
+ let len = base.len(self)?;
+ let field_layout = base.layout().field(self, 0);
+ let tcx: TyCtxt<'tcx> = *self.tcx;
+ // `Size` multiplication
+ Ok((0..len).map(move |i| {
+ base.offset_with_meta(stride * i, MemPlaceMeta::None, field_layout, &tcx)
+ }))
}
- //# Subslicing
-
- fn operand_subslice(
+ /// Subslicing
+ fn project_subslice<P: Projectable<'tcx, M::Provenance>>(
&self,
- base: &OpTy<'tcx, M::Provenance>,
+ base: &P,
from: u64,
to: u64,
from_end: bool,
- ) -> InterpResult<'tcx, OpTy<'tcx, M::Provenance>> {
+ ) -> InterpResult<'tcx, P> {
let len = base.len(self)?; // also asserts that we have a type where this makes sense
let actual_to = if from_end {
if from.checked_add(to).map_or(true, |to| to > len) {
@@ -306,16 +243,20 @@ where
// Not using layout method because that works with usize, and does not work with slices
// (that have count 0 in their layout).
- let from_offset = match base.layout.fields {
+ let from_offset = match base.layout().fields {
abi::FieldsShape::Array { stride, .. } => stride * from, // `Size` multiplication is checked
_ => {
- span_bug!(self.cur_span(), "unexpected layout of index access: {:#?}", base.layout)
+ span_bug!(
+ self.cur_span(),
+ "unexpected layout of index access: {:#?}",
+ base.layout()
+ )
}
};
// Compute meta and new layout
let inner_len = actual_to.checked_sub(from).unwrap();
- let (meta, ty) = match base.layout.ty.kind() {
+ let (meta, ty) = match base.layout().ty.kind() {
// It is not nice to match on the type, but that seems to be the only way to
// implement this.
ty::Array(inner, _) => {
@@ -323,85 +264,43 @@ where
}
ty::Slice(..) => {
let len = Scalar::from_target_usize(inner_len, self);
- (MemPlaceMeta::Meta(len), base.layout.ty)
+ (MemPlaceMeta::Meta(len), base.layout().ty)
}
_ => {
- span_bug!(self.cur_span(), "cannot subslice non-array type: `{:?}`", base.layout.ty)
+ span_bug!(
+ self.cur_span(),
+ "cannot subslice non-array type: `{:?}`",
+ base.layout().ty
+ )
}
};
let layout = self.layout_of(ty)?;
- base.offset_with_meta(from_offset, meta, layout, self)
- }
-
- pub fn place_subslice(
- &mut self,
- base: &PlaceTy<'tcx, M::Provenance>,
- from: u64,
- to: u64,
- from_end: bool,
- ) -> InterpResult<'tcx, PlaceTy<'tcx, M::Provenance>> {
- let base = self.force_allocation(base)?;
- Ok(self.operand_subslice(&base.into(), from, to, from_end)?.assert_mem_place().into())
- }
-
- //# Applying a general projection
- /// Projects into a place.
- #[instrument(skip(self), level = "trace")]
- pub fn place_projection(
- &mut self,
- base: &PlaceTy<'tcx, M::Provenance>,
- proj_elem: mir::PlaceElem<'tcx>,
- ) -> InterpResult<'tcx, PlaceTy<'tcx, M::Provenance>> {
- use rustc_middle::mir::ProjectionElem::*;
- Ok(match proj_elem {
- OpaqueCast(ty) => {
- let mut place = base.clone();
- place.layout = self.layout_of(ty)?;
- place
- }
- Field(field, _) => self.place_field(base, field.index())?,
- Downcast(_, variant) => self.place_downcast(base, variant)?,
- Deref => self.deref_operand(&self.place_to_op(base)?)?.into(),
- Index(local) => {
- let layout = self.layout_of(self.tcx.types.usize)?;
- let n = self.local_to_op(self.frame(), local, Some(layout))?;
- let n = self.read_target_usize(&n)?;
- self.place_index(base, n)?
- }
- ConstantIndex { offset, min_length, from_end } => {
- self.place_constant_index(base, offset, min_length, from_end)?
- }
- Subslice { from, to, from_end } => self.place_subslice(base, from, to, from_end)?,
- })
+ base.offset_with_meta(from_offset, meta, layout, self)
}
+ /// Applying a general projection
#[instrument(skip(self), level = "trace")]
- pub fn operand_projection(
- &self,
- base: &OpTy<'tcx, M::Provenance>,
- proj_elem: mir::PlaceElem<'tcx>,
- ) -> InterpResult<'tcx, OpTy<'tcx, M::Provenance>> {
+ pub fn project<P>(&self, base: &P, proj_elem: mir::PlaceElem<'tcx>) -> InterpResult<'tcx, P>
+ where
+ P: Projectable<'tcx, M::Provenance> + From<MPlaceTy<'tcx, M::Provenance>> + std::fmt::Debug,
+ {
use rustc_middle::mir::ProjectionElem::*;
Ok(match proj_elem {
- OpaqueCast(ty) => {
- let mut op = base.clone();
- op.layout = self.layout_of(ty)?;
- op
- }
- Field(field, _) => self.operand_field(base, field.index())?,
- Downcast(_, variant) => self.operand_downcast(base, variant)?,
- Deref => self.deref_operand(base)?.into(),
+ OpaqueCast(ty) => base.transmute(self.layout_of(ty)?, self)?,
+ Field(field, _) => self.project_field(base, field.index())?,
+ Downcast(_, variant) => self.project_downcast(base, variant)?,
+ Deref => self.deref_pointer(&base.to_op(self)?)?.into(),
Index(local) => {
let layout = self.layout_of(self.tcx.types.usize)?;
let n = self.local_to_op(self.frame(), local, Some(layout))?;
let n = self.read_target_usize(&n)?;
- self.operand_index(base, n)?
+ self.project_index(base, n)?
}
ConstantIndex { offset, min_length, from_end } => {
- self.operand_constant_index(base, offset, min_length, from_end)?
+ self.project_constant_index(base, offset, min_length, from_end)?
}
- Subslice { from, to, from_end } => self.operand_subslice(base, from, to, from_end)?,
+ Subslice { from, to, from_end } => self.project_subslice(base, from, to, from_end)?,
})
}
}
diff --git a/compiler/rustc_const_eval/src/interpret/step.rs b/compiler/rustc_const_eval/src/interpret/step.rs
index 619da8abb..0740894a4 100644
--- a/compiler/rustc_const_eval/src/interpret/step.rs
+++ b/compiler/rustc_const_eval/src/interpret/step.rs
@@ -8,7 +8,7 @@ use rustc_middle::mir;
use rustc_middle::mir::interpret::{InterpResult, Scalar};
use rustc_middle::ty::layout::LayoutOf;
-use super::{ImmTy, InterpCx, Machine};
+use super::{ImmTy, InterpCx, Machine, Projectable};
use crate::util;
impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
@@ -178,7 +178,7 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
// The operand always has the same type as the result.
let val = self.read_immediate(&self.eval_operand(operand, Some(dest.layout))?)?;
let val = self.unary_op(un_op, &val)?;
- assert_eq!(val.layout, dest.layout, "layout mismatch for result of {:?}", un_op);
+ assert_eq!(val.layout, dest.layout, "layout mismatch for result of {un_op:?}");
self.write_immediate(*val, &dest)?;
}
@@ -197,8 +197,8 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
self.get_place_alloc_mut(&dest)?;
} else {
// Write the src to the first element.
- let first = self.mplace_field(&dest, 0)?;
- self.copy_op(&src, &first.into(), /*allow_transmute*/ false)?;
+ let first = self.project_index(&dest, 0)?;
+ self.copy_op(&src, &first, /*allow_transmute*/ false)?;
// This is performance-sensitive code for big static/const arrays! So we
// avoid writing each operand individually and instead just make many copies
@@ -208,13 +208,13 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
let rest_ptr = first_ptr.offset(elem_size, self)?;
// For the alignment of `rest_ptr`, we crucially do *not* use `first.align` as
// that place might be more aligned than its type mandates (a `u8` array could
- // be 4-aligned if it sits at the right spot in a struct). Instead we use
- // `first.layout.align`, i.e., the alignment given by the type.
+ // be 4-aligned if it sits at the right spot in a struct). We have to also factor
+ // in element size.
self.mem_copy_repeatedly(
first_ptr,
- first.align,
+ dest.align,
rest_ptr,
- first.layout.align.abi,
+ dest.align.restrict_for_offset(elem_size),
elem_size,
length - 1,
/*nonoverlapping:*/ true,
@@ -224,8 +224,7 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
Len(place) => {
let src = self.eval_place(place)?;
- let op = self.place_to_op(&src)?;
- let len = op.len(self)?;
+ let len = src.len(self)?;
self.write_scalar(Scalar::from_target_usize(len, self), &dest)?;
}
@@ -248,7 +247,7 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
AddressOf(_, place) => {
// Figure out whether this is an addr_of of an already raw place.
- let place_base_raw = if place.has_deref() {
+ let place_base_raw = if place.is_indirect_first_projection() {
let ty = self.frame().body.local_decls[place.local].ty;
ty.is_unsafe_ptr()
} else {
@@ -270,12 +269,10 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
let ty = self.subst_from_current_frame_and_normalize_erasing_regions(ty)?;
let layout = self.layout_of(ty)?;
if let mir::NullOp::SizeOf | mir::NullOp::AlignOf = null_op && layout.is_unsized() {
- // FIXME: This should be a span_bug (#80742)
- self.tcx.sess.delay_span_bug(
+ span_bug!(
self.frame().current_span(),
- format!("{null_op:?} MIR operator called for unsized type {ty}"),
+ "{null_op:?} MIR operator called for unsized type {ty}",
);
- throw_inval!(SizeOfUnsizedType(ty));
}
let val = match null_op {
mir::NullOp::SizeOf => layout.size.bytes(),
@@ -302,8 +299,9 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
Discriminant(place) => {
let op = self.eval_place_to_op(place, None)?;
- let discr_val = self.read_discriminant(&op)?.0;
- self.write_scalar(discr_val, &dest)?;
+ let variant = self.read_discriminant(&op)?;
+ let discr = self.discriminant_for_variant(op.layout, variant)?;
+ self.write_scalar(discr, &dest)?;
}
}
diff --git a/compiler/rustc_const_eval/src/interpret/terminator.rs b/compiler/rustc_const_eval/src/interpret/terminator.rs
index 15823a597..3c03172bb 100644
--- a/compiler/rustc_const_eval/src/interpret/terminator.rs
+++ b/compiler/rustc_const_eval/src/interpret/terminator.rs
@@ -1,7 +1,8 @@
use std::borrow::Cow;
+use either::Either;
use rustc_ast::ast::InlineAsmOptions;
-use rustc_middle::ty::layout::{FnAbiOf, LayoutOf};
+use rustc_middle::ty::layout::{FnAbiOf, LayoutOf, TyAndLayout};
use rustc_middle::ty::Instance;
use rustc_middle::{
mir,
@@ -12,12 +13,63 @@ use rustc_target::abi::call::{ArgAbi, ArgAttribute, ArgAttributes, FnAbi, PassMo
use rustc_target::spec::abi::Abi;
use super::{
- FnVal, ImmTy, Immediate, InterpCx, InterpResult, MPlaceTy, Machine, MemoryKind, OpTy, Operand,
- PlaceTy, Scalar, StackPopCleanup,
+ AllocId, FnVal, ImmTy, Immediate, InterpCx, InterpResult, MPlaceTy, Machine, MemoryKind, OpTy,
+ Operand, PlaceTy, Provenance, Scalar, StackPopCleanup,
};
use crate::fluent_generated as fluent;
+/// An argment passed to a function.
+#[derive(Clone, Debug)]
+pub enum FnArg<'tcx, Prov: Provenance = AllocId> {
+ /// Pass a copy of the given operand.
+ Copy(OpTy<'tcx, Prov>),
+ /// Allow for the argument to be passed in-place: destroy the value originally stored at that place and
+ /// make the place inaccessible for the duration of the function call.
+ InPlace(PlaceTy<'tcx, Prov>),
+}
+
+impl<'tcx, Prov: Provenance> FnArg<'tcx, Prov> {
+ pub fn layout(&self) -> &TyAndLayout<'tcx> {
+ match self {
+ FnArg::Copy(op) => &op.layout,
+ FnArg::InPlace(place) => &place.layout,
+ }
+ }
+}
+
impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
+ /// Make a copy of the given fn_arg. Any `InPlace` are degenerated to copies, no protection of the
+ /// original memory occurs.
+ pub fn copy_fn_arg(
+ &self,
+ arg: &FnArg<'tcx, M::Provenance>,
+ ) -> InterpResult<'tcx, OpTy<'tcx, M::Provenance>> {
+ match arg {
+ FnArg::Copy(op) => Ok(op.clone()),
+ FnArg::InPlace(place) => self.place_to_op(&place),
+ }
+ }
+
+ /// Make a copy of the given fn_args. Any `InPlace` are degenerated to copies, no protection of the
+ /// original memory occurs.
+ pub fn copy_fn_args(
+ &self,
+ args: &[FnArg<'tcx, M::Provenance>],
+ ) -> InterpResult<'tcx, Vec<OpTy<'tcx, M::Provenance>>> {
+ args.iter().map(|fn_arg| self.copy_fn_arg(fn_arg)).collect()
+ }
+
+ pub fn fn_arg_field(
+ &self,
+ arg: &FnArg<'tcx, M::Provenance>,
+ field: usize,
+ ) -> InterpResult<'tcx, FnArg<'tcx, M::Provenance>> {
+ Ok(match arg {
+ FnArg::Copy(op) => FnArg::Copy(self.project_field(op, field)?),
+ FnArg::InPlace(place) => FnArg::InPlace(self.project_field(place, field)?),
+ })
+ }
+
pub(super) fn eval_terminator(
&mut self,
terminator: &mir::Terminator<'tcx>,
@@ -68,14 +120,14 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
let old_stack = self.frame_idx();
let old_loc = self.frame().loc;
let func = self.eval_operand(func, None)?;
- let args = self.eval_operands(args)?;
+ let args = self.eval_fn_call_arguments(args)?;
let fn_sig_binder = func.layout.ty.fn_sig(*self.tcx);
let fn_sig =
self.tcx.normalize_erasing_late_bound_regions(self.param_env, fn_sig_binder);
let extra_args = &args[fn_sig.inputs().len()..];
let extra_args =
- self.tcx.mk_type_list_from_iter(extra_args.iter().map(|arg| arg.layout.ty));
+ self.tcx.mk_type_list_from_iter(extra_args.iter().map(|arg| arg.layout().ty));
let (fn_val, fn_abi, with_caller_location) = match *func.layout.ty.kind() {
ty::FnPtr(_sig) => {
@@ -83,8 +135,8 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
let fn_val = self.get_ptr_fn(fn_ptr)?;
(fn_val, self.fn_abi_of_fn_ptr(fn_sig_binder, extra_args)?, false)
}
- ty::FnDef(def_id, substs) => {
- let instance = self.resolve(def_id, substs)?;
+ ty::FnDef(def_id, args) => {
+ let instance = self.resolve(def_id, args)?;
(
FnVal::Instance(instance),
self.fn_abi_of_instance(instance, extra_args)?,
@@ -185,6 +237,21 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
Ok(())
}
+ /// Evaluate the arguments of a function call
+ pub(super) fn eval_fn_call_arguments(
+ &self,
+ ops: &[mir::Operand<'tcx>],
+ ) -> InterpResult<'tcx, Vec<FnArg<'tcx, M::Provenance>>> {
+ ops.iter()
+ .map(|op| {
+ Ok(match op {
+ mir::Operand::Move(place) => FnArg::InPlace(self.eval_place(*place)?),
+ _ => FnArg::Copy(self.eval_operand(op, None)?),
+ })
+ })
+ .collect()
+ }
+
fn check_argument_compat(
caller_abi: &ArgAbi<'tcx, Ty<'tcx>>,
callee_abi: &ArgAbi<'tcx, Ty<'tcx>>,
@@ -275,7 +342,7 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
fn pass_argument<'x, 'y>(
&mut self,
caller_args: &mut impl Iterator<
- Item = (&'x OpTy<'tcx, M::Provenance>, &'y ArgAbi<'tcx, Ty<'tcx>>),
+ Item = (&'x FnArg<'tcx, M::Provenance>, &'y ArgAbi<'tcx, Ty<'tcx>>),
>,
callee_abi: &ArgAbi<'tcx, Ty<'tcx>>,
callee_arg: &PlaceTy<'tcx, M::Provenance>,
@@ -295,35 +362,38 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
// Now, check
if !Self::check_argument_compat(caller_abi, callee_abi) {
let callee_ty = format!("{}", callee_arg.layout.ty);
- let caller_ty = format!("{}", caller_arg.layout.ty);
+ let caller_ty = format!("{}", caller_arg.layout().ty);
throw_ub_custom!(
fluent::const_eval_incompatible_types,
callee_ty = callee_ty,
caller_ty = caller_ty,
)
}
+ // We work with a copy of the argument for now; if this is in-place argument passing, we
+ // will later protect the source it comes from. This means the callee cannot observe if we
+ // did in-place of by-copy argument passing, except for pointer equality tests.
+ let caller_arg_copy = self.copy_fn_arg(&caller_arg)?;
// Special handling for unsized parameters.
- if caller_arg.layout.is_unsized() {
+ if caller_arg_copy.layout.is_unsized() {
// `check_argument_compat` ensures that both have the same type, so we know they will use the metadata the same way.
- assert_eq!(caller_arg.layout.ty, callee_arg.layout.ty);
+ assert_eq!(caller_arg_copy.layout.ty, callee_arg.layout.ty);
// We have to properly pre-allocate the memory for the callee.
- // So let's tear down some wrappers.
+ // So let's tear down some abstractions.
// This all has to be in memory, there are no immediate unsized values.
- let src = caller_arg.assert_mem_place();
+ let src = caller_arg_copy.assert_mem_place();
// The destination cannot be one of these "spread args".
- let (dest_frame, dest_local) = callee_arg.assert_local();
+ let (dest_frame, dest_local, dest_offset) = callee_arg
+ .as_mplace_or_local()
+ .right()
+ .expect("callee fn arguments must be locals");
// We are just initializing things, so there can't be anything here yet.
assert!(matches!(
*self.local_to_op(&self.stack()[dest_frame], dest_local, None)?,
Operand::Immediate(Immediate::Uninit)
));
+ assert_eq!(dest_offset, None);
// Allocate enough memory to hold `src`.
- let Some((size, align)) = self.size_and_align_of_mplace(&src)? else {
- span_bug!(self.cur_span(), "unsized fn arg with `extern` type tail should not be allowed")
- };
- let ptr = self.allocate_ptr(size, align, MemoryKind::Stack)?;
- let dest_place =
- MPlaceTy::from_aligned_ptr_with_meta(ptr.into(), callee_arg.layout, src.meta);
+ let dest_place = self.allocate_dyn(src.layout, MemoryKind::Stack, src.meta)?;
// Update the local to be that new place.
*M::access_local_mut(self, dest_frame, dest_local)? = Operand::Indirect(*dest_place);
}
@@ -331,7 +401,12 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
// FIXME: Depending on the PassMode, this should reset some padding to uninitialized. (This
// is true for all `copy_op`, but there are a lot of special cases for argument passing
// specifically.)
- self.copy_op(&caller_arg, callee_arg, /*allow_transmute*/ true)
+ self.copy_op(&caller_arg_copy, callee_arg, /*allow_transmute*/ true)?;
+ // If this was an in-place pass, protect the place it comes from for the duration of the call.
+ if let FnArg::InPlace(place) = caller_arg {
+ M::protect_in_place_function_argument(self, place)?;
+ }
+ Ok(())
}
/// Call this function -- pushing the stack frame and initializing the arguments.
@@ -346,7 +421,7 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
&mut self,
fn_val: FnVal<'tcx, M::ExtraFnVal>,
(caller_abi, caller_fn_abi): (Abi, &FnAbi<'tcx, Ty<'tcx>>),
- args: &[OpTy<'tcx, M::Provenance>],
+ args: &[FnArg<'tcx, M::Provenance>],
with_caller_location: bool,
destination: &PlaceTy<'tcx, M::Provenance>,
target: Option<mir::BasicBlock>,
@@ -372,8 +447,15 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
match instance.def {
ty::InstanceDef::Intrinsic(def_id) => {
assert!(self.tcx.is_intrinsic(def_id));
- // caller_fn_abi is not relevant here, we interpret the arguments directly for each intrinsic.
- M::call_intrinsic(self, instance, args, destination, target, unwind)
+ // FIXME: Should `InPlace` arguments be reset to uninit?
+ M::call_intrinsic(
+ self,
+ instance,
+ &self.copy_fn_args(args)?,
+ destination,
+ target,
+ unwind,
+ )
}
ty::InstanceDef::VTableShim(..)
| ty::InstanceDef::ReifyShim(..)
@@ -385,10 +467,18 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
| ty::InstanceDef::ThreadLocalShim(..)
| ty::InstanceDef::Item(_) => {
// We need MIR for this fn
- let Some((body, instance)) =
- M::find_mir_or_eval_fn(self, instance, caller_abi, args, destination, target, unwind)? else {
- return Ok(());
- };
+ let Some((body, instance)) = M::find_mir_or_eval_fn(
+ self,
+ instance,
+ caller_abi,
+ args,
+ destination,
+ target,
+ unwind,
+ )?
+ else {
+ return Ok(());
+ };
// Compute callee information using the `instance` returned by
// `find_mir_or_eval_fn`.
@@ -409,6 +499,11 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
}
}
+ // Check that all target features required by the callee (i.e., from
+ // the attribute `#[target_feature(enable = ...)]`) are enabled at
+ // compile time.
+ self.check_fn_target_features(instance)?;
+
if !callee_fn_abi.can_unwind {
// The callee cannot unwind, so force the `Unreachable` unwind handling.
unwind = mir::UnwindAction::Unreachable;
@@ -428,7 +523,13 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
"caller ABI: {:?}, args: {:#?}",
caller_abi,
args.iter()
- .map(|arg| (arg.layout.ty, format!("{:?}", **arg)))
+ .map(|arg| (
+ arg.layout().ty,
+ match arg {
+ FnArg::Copy(op) => format!("copy({:?})", *op),
+ FnArg::InPlace(place) => format!("in-place({:?})", *place),
+ }
+ ))
.collect::<Vec<_>>()
);
trace!(
@@ -449,7 +550,7 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
// last incoming argument. These two iterators do not have the same type,
// so to keep the code paths uniform we accept an allocation
// (for RustCall ABI only).
- let caller_args: Cow<'_, [OpTy<'tcx, M::Provenance>]> =
+ let caller_args: Cow<'_, [FnArg<'tcx, M::Provenance>]> =
if caller_abi == Abi::RustCall && !args.is_empty() {
// Untuple
let (untuple_arg, args) = args.split_last().unwrap();
@@ -458,11 +559,10 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
args.iter()
.map(|a| Ok(a.clone()))
.chain(
- (0..untuple_arg.layout.fields.count())
- .map(|i| self.operand_field(untuple_arg, i)),
+ (0..untuple_arg.layout().fields.count())
+ .map(|i| self.fn_arg_field(untuple_arg, i)),
)
- .collect::<InterpResult<'_, Vec<OpTy<'tcx, M::Provenance>>>>(
- )?,
+ .collect::<InterpResult<'_, Vec<_>>>()?,
)
} else {
// Plain arg passing
@@ -491,7 +591,7 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
if Some(local) == body.spread_arg {
// Must be a tuple
for i in 0..dest.layout.fields.count() {
- let dest = self.place_field(&dest, i)?;
+ let dest = self.project_field(&dest, i)?;
let callee_abi = callee_args_abis.next().unwrap();
self.pass_argument(&mut caller_args, callee_abi, &dest)?;
}
@@ -523,6 +623,14 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
caller_ty = caller_ty,
)
}
+ // Ensure the return place is aligned and dereferenceable, and protect it for
+ // in-place return value passing.
+ if let Either::Left(mplace) = destination.as_mplace_or_local() {
+ self.check_mplace(&mplace)?;
+ } else {
+ // Nothing to do for locals, they are always properly allocated and aligned.
+ }
+ M::protect_in_place_function_argument(self, destination)?;
};
match res {
Err(err) => {
@@ -538,11 +646,14 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
// We have to implement all "object safe receivers". So we have to go search for a
// pointer or `dyn Trait` type, but it could be wrapped in newtypes. So recursively
// unwrap those newtypes until we are there.
- let mut receiver = args[0].clone();
+ // An `InPlace` does nothing here, we keep the original receiver intact. We can't
+ // really pass the argument in-place anyway, and we are constructing a new
+ // `Immediate` receiver.
+ let mut receiver = self.copy_fn_arg(&args[0])?;
let receiver_place = loop {
match receiver.layout.ty.kind() {
ty::Ref(..) | ty::RawPtr(..) => {
- // We do *not* use `deref_operand` here: we don't want to conceptually
+ // We do *not* use `deref_pointer` here: we don't want to conceptually
// create a place that must be dereferenceable, since the receiver might
// be a raw pointer and (for `*const dyn Trait`) we don't need to
// actually access memory to resolve this method.
@@ -562,7 +673,7 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
// Not there yet, search for the only non-ZST field.
let mut non_zst_field = None;
for i in 0..receiver.layout.fields.count() {
- let field = self.operand_field(&receiver, i)?;
+ let field = self.project_field(&receiver, i)?;
let zst =
field.layout.is_zst() && field.layout.align.abi.bytes() == 1;
if !zst {
@@ -588,12 +699,11 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
let (vptr, dyn_ty, adjusted_receiver) = if let ty::Dynamic(data, _, ty::DynStar) =
receiver_place.layout.ty.kind()
{
- let (recv, vptr) = self.unpack_dyn_star(&receiver_place.into())?;
+ let (recv, vptr) = self.unpack_dyn_star(&receiver_place)?;
let (dyn_ty, dyn_trait) = self.get_ptr_vtable(vptr)?;
if dyn_trait != data.principal() {
throw_ub_custom!(fluent::const_eval_dyn_star_call_vtable_mismatch);
}
- let recv = recv.assert_mem_place(); // we passed an MPlaceTy to `unpack_dyn_star` so we definitely still have one
(vptr, dyn_ty, recv.ptr)
} else {
@@ -603,8 +713,12 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
.tcx
.struct_tail_erasing_lifetimes(receiver_place.layout.ty, self.param_env);
let ty::Dynamic(data, _, ty::Dyn) = receiver_tail.kind() else {
- span_bug!(self.cur_span(), "dynamic call on non-`dyn` type {}", receiver_tail)
- };
+ span_bug!(
+ self.cur_span(),
+ "dynamic call on non-`dyn` type {}",
+ receiver_tail
+ )
+ };
assert!(receiver_place.layout.is_unsized());
// Get the required information from the vtable.
@@ -622,7 +736,9 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
// Now determine the actual method to call. We can do that in two different ways and
// compare them to ensure everything fits.
- let Some(ty::VtblEntry::Method(fn_inst)) = self.get_vtable_entries(vptr)?.get(idx).copied() else {
+ let Some(ty::VtblEntry::Method(fn_inst)) =
+ self.get_vtable_entries(vptr)?.get(idx).copied()
+ else {
// FIXME(fee1-dead) these could be variants of the UB info enum instead of this
throw_ub_custom!(fluent::const_eval_dyn_call_not_a_method);
};
@@ -632,7 +748,7 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
let trait_def_id = tcx.trait_of_item(def_id).unwrap();
let virtual_trait_ref =
- ty::TraitRef::from_method(tcx, trait_def_id, instance.substs);
+ ty::TraitRef::from_method(tcx, trait_def_id, instance.args);
let existential_trait_ref =
ty::ExistentialTraitRef::erase_self_ty(tcx, virtual_trait_ref);
let concrete_trait_ref = existential_trait_ref.with_self_ty(tcx, dyn_ty);
@@ -641,18 +757,20 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
tcx,
self.param_env,
def_id,
- instance.substs.rebase_onto(tcx, trait_def_id, concrete_trait_ref.substs),
+ instance.args.rebase_onto(tcx, trait_def_id, concrete_trait_ref.args),
)
.unwrap();
assert_eq!(fn_inst, concrete_method);
}
// Adjust receiver argument. Layout can be any (thin) ptr.
- args[0] = ImmTy::from_immediate(
- Scalar::from_maybe_pointer(adjusted_receiver, self).into(),
- self.layout_of(Ty::new_mut_ptr(self.tcx.tcx, dyn_ty))?,
- )
- .into();
+ args[0] = FnArg::Copy(
+ ImmTy::from_immediate(
+ Scalar::from_maybe_pointer(adjusted_receiver, self).into(),
+ self.layout_of(Ty::new_mut_ptr(self.tcx.tcx, dyn_ty))?,
+ )
+ .into(),
+ );
trace!("Patched receiver operand to {:#?}", args[0]);
// recurse with concrete function
self.eval_fn_call(
@@ -668,6 +786,31 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
}
}
+ fn check_fn_target_features(&self, instance: ty::Instance<'tcx>) -> InterpResult<'tcx, ()> {
+ let attrs = self.tcx.codegen_fn_attrs(instance.def_id());
+ if attrs
+ .target_features
+ .iter()
+ .any(|feature| !self.tcx.sess.target_features.contains(feature))
+ {
+ throw_ub_custom!(
+ fluent::const_eval_unavailable_target_features_for_fn,
+ unavailable_feats = attrs
+ .target_features
+ .iter()
+ .filter(|&feature| !self.tcx.sess.target_features.contains(feature))
+ .fold(String::new(), |mut s, feature| {
+ if !s.is_empty() {
+ s.push_str(", ");
+ }
+ s.push_str(feature.as_str());
+ s
+ }),
+ );
+ }
+ Ok(())
+ }
+
fn drop_in_place(
&mut self,
place: &PlaceTy<'tcx, M::Provenance>,
@@ -688,7 +831,7 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
}
ty::Dynamic(_, _, ty::DynStar) => {
// Dropping a `dyn*`. Need to find actual drop fn.
- self.unpack_dyn_star(&place.into())?.0.assert_mem_place()
+ self.unpack_dyn_star(&place)?.0
}
_ => {
debug_assert_eq!(
@@ -701,16 +844,13 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
let instance = ty::Instance::resolve_drop_in_place(*self.tcx, place.layout.ty);
let fn_abi = self.fn_abi_of_instance(instance, ty::List::empty())?;
- let arg = ImmTy::from_immediate(
- place.to_ref(self),
- self.layout_of(Ty::new_mut_ptr(self.tcx.tcx, place.layout.ty))?,
- );
+ let arg = self.mplace_to_ref(&place)?;
let ret = MPlaceTy::fake_alloc_zst(self.layout_of(self.tcx.types.unit)?);
self.eval_fn_call(
FnVal::Instance(instance),
(Abi::Rust, fn_abi),
- &[arg.into()],
+ &[FnArg::Copy(arg.into())],
false,
&ret.into(),
Some(target),
diff --git a/compiler/rustc_const_eval/src/interpret/util.rs b/compiler/rustc_const_eval/src/interpret/util.rs
index 22bdd4d2c..b33194423 100644
--- a/compiler/rustc_const_eval/src/interpret/util.rs
+++ b/compiler/rustc_const_eval/src/interpret/util.rs
@@ -33,12 +33,12 @@ where
match *ty.kind() {
ty::Param(_) => ControlFlow::Break(FoundParam),
- ty::Closure(def_id, substs)
- | ty::Generator(def_id, substs, ..)
- | ty::FnDef(def_id, substs) => {
+ ty::Closure(def_id, args)
+ | ty::Generator(def_id, args, ..)
+ | ty::FnDef(def_id, args) => {
let instance = ty::InstanceDef::Item(def_id);
let unused_params = self.tcx.unused_generic_params(instance);
- for (index, subst) in substs.into_iter().enumerate() {
+ for (index, subst) in args.into_iter().enumerate() {
let index = index
.try_into()
.expect("more generic parameters than can fit into a `u32`");
diff --git a/compiler/rustc_const_eval/src/interpret/validity.rs b/compiler/rustc_const_eval/src/interpret/validity.rs
index 21c655988..d3f05af1c 100644
--- a/compiler/rustc_const_eval/src/interpret/validity.rs
+++ b/compiler/rustc_const_eval/src/interpret/validity.rs
@@ -25,13 +25,17 @@ use rustc_target::abi::{
use std::hash::Hash;
-// for the validation errors
-use super::UndefinedBehaviorInfo::*;
use super::{
AllocId, CheckInAllocMsg, GlobalAlloc, ImmTy, Immediate, InterpCx, InterpResult, MPlaceTy,
- Machine, MemPlaceMeta, OpTy, Pointer, Scalar, ValueVisitor,
+ Machine, MemPlaceMeta, OpTy, Pointer, Projectable, Scalar, ValueVisitor,
};
+// for the validation errors
+use super::InterpError::UndefinedBehavior as Ub;
+use super::InterpError::Unsupported as Unsup;
+use super::UndefinedBehaviorInfo::*;
+use super::UnsupportedOpInfo::*;
+
macro_rules! throw_validation_failure {
($where:expr, $kind: expr) => {{
let where_ = &$where;
@@ -43,7 +47,7 @@ macro_rules! throw_validation_failure {
None
};
- throw_ub!(Validation(ValidationErrorInfo { path, kind: $kind }))
+ throw_ub!(ValidationError(ValidationErrorInfo { path, kind: $kind }))
}};
}
@@ -85,16 +89,16 @@ macro_rules! try_validation {
Ok(x) => x,
// We catch the error and turn it into a validation failure. We are okay with
// allocation here as this can only slow down builds that fail anyway.
- Err(e) => match e.into_parts() {
+ Err(e) => match e.kind() {
$(
- (InterpError::UndefinedBehavior($($p)|+), _) =>
+ $($p)|+ =>
throw_validation_failure!(
$where,
$kind
)
),+,
#[allow(unreachable_patterns)]
- (e, rest) => Err::<!, _>($crate::interpret::InterpErrorInfo::from_parts(e, rest))?,
+ _ => Err::<!, _>(e)?,
}
}
}};
@@ -136,19 +140,19 @@ pub struct RefTracking<T, PATH = ()> {
pub todo: Vec<(T, PATH)>,
}
-impl<T: Copy + Eq + Hash + std::fmt::Debug, PATH: Default> RefTracking<T, PATH> {
+impl<T: Clone + Eq + Hash + std::fmt::Debug, PATH: Default> RefTracking<T, PATH> {
pub fn empty() -> Self {
RefTracking { seen: FxHashSet::default(), todo: vec![] }
}
pub fn new(op: T) -> Self {
let mut ref_tracking_for_consts =
- RefTracking { seen: FxHashSet::default(), todo: vec![(op, PATH::default())] };
+ RefTracking { seen: FxHashSet::default(), todo: vec![(op.clone(), PATH::default())] };
ref_tracking_for_consts.seen.insert(op);
ref_tracking_for_consts
}
pub fn track(&mut self, op: T, path: impl FnOnce() -> PATH) {
- if self.seen.insert(op) {
+ if self.seen.insert(op.clone()) {
trace!("Recursing below ptr {:#?}", op);
let path = path();
// Remember to come back to this later.
@@ -164,14 +168,14 @@ fn write_path(out: &mut String, path: &[PathElem]) {
for elem in path.iter() {
match elem {
- Field(name) => write!(out, ".{}", name),
+ Field(name) => write!(out, ".{name}"),
EnumTag => write!(out, ".<enum-tag>"),
- Variant(name) => write!(out, ".<enum-variant({})>", name),
+ Variant(name) => write!(out, ".<enum-variant({name})>"),
GeneratorTag => write!(out, ".<generator-tag>"),
GeneratorState(idx) => write!(out, ".<generator-state({})>", idx.index()),
- CapturedVar(name) => write!(out, ".<captured-var({})>", name),
- TupleElem(idx) => write!(out, ".{}", idx),
- ArrayElem(idx) => write!(out, "[{}]", idx),
+ CapturedVar(name) => write!(out, ".<captured-var({name})>"),
+ TupleElem(idx) => write!(out, ".{idx}"),
+ ArrayElem(idx) => write!(out, "[{idx}]"),
// `.<deref>` does not match Rust syntax, but it is more readable for long paths -- and
// some of the other items here also are not Rust syntax. Actually we can't
// even use the usual syntax because we are just showing the projections,
@@ -294,7 +298,13 @@ impl<'rt, 'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> ValidityVisitor<'rt, 'mir, '
Ok(try_validation!(
self.ecx.read_immediate(op),
self.path,
- InvalidUninitBytes(None) => Uninit { expected }
+ Ub(InvalidUninitBytes(None)) =>
+ Uninit { expected },
+ // The `Unsup` cases can only occur during CTFE
+ Unsup(ReadPointerAsInt(_)) =>
+ PointerAsInt { expected },
+ Unsup(ReadPartialPointer(_)) =>
+ PartialPointer,
))
}
@@ -319,8 +329,8 @@ impl<'rt, 'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> ValidityVisitor<'rt, 'mir, '
let (_ty, _trait) = try_validation!(
self.ecx.get_ptr_vtable(vtable),
self.path,
- DanglingIntPointer(..) |
- InvalidVTablePointer(..) => InvalidVTablePtr { value: format!("{vtable}") }
+ Ub(DanglingIntPointer(..) | InvalidVTablePointer(..)) =>
+ InvalidVTablePtr { value: format!("{vtable}") }
);
// FIXME: check if the type/trait match what ty::Dynamic says?
}
@@ -345,6 +355,7 @@ impl<'rt, 'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> ValidityVisitor<'rt, 'mir, '
value: &OpTy<'tcx, M::Provenance>,
ptr_kind: PointerKind,
) -> InterpResult<'tcx> {
+ // Not using `deref_pointer` since we do the dereferenceable check ourselves below.
let place = self.ecx.ref_to_mplace(&self.read_immediate(value, ptr_kind.into())?)?;
// Handle wide pointers.
// Check metadata early, for better diagnostics
@@ -355,7 +366,7 @@ impl<'rt, 'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> ValidityVisitor<'rt, 'mir, '
let size_and_align = try_validation!(
self.ecx.size_and_align_of_mplace(&place),
self.path,
- InvalidMeta(msg) => match msg {
+ Ub(InvalidMeta(msg)) => match msg {
InvalidMetaKind::SliceTooBig => InvalidMetaSliceTooLarge { ptr_kind },
InvalidMetaKind::TooBig => InvalidMetaTooLarge { ptr_kind },
}
@@ -374,23 +385,23 @@ impl<'rt, 'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> ValidityVisitor<'rt, 'mir, '
CheckInAllocMsg::InboundsTest, // will anyway be replaced by validity message
),
self.path,
- AlignmentCheckFailed { required, has } => UnalignedPtr {
+ Ub(AlignmentCheckFailed { required, has }) => UnalignedPtr {
ptr_kind,
required_bytes: required.bytes(),
found_bytes: has.bytes()
},
- DanglingIntPointer(0, _) => NullPtr { ptr_kind },
- DanglingIntPointer(i, _) => DanglingPtrNoProvenance {
+ Ub(DanglingIntPointer(0, _)) => NullPtr { ptr_kind },
+ Ub(DanglingIntPointer(i, _)) => DanglingPtrNoProvenance {
ptr_kind,
// FIXME this says "null pointer" when null but we need translate
- pointer: format!("{}", Pointer::<Option<AllocId>>::from_addr_invalid(i))
+ pointer: format!("{}", Pointer::<Option<AllocId>>::from_addr_invalid(*i))
},
- PointerOutOfBounds { .. } => DanglingPtrOutOfBounds {
+ Ub(PointerOutOfBounds { .. }) => DanglingPtrOutOfBounds {
ptr_kind
},
// This cannot happen during const-eval (because interning already detects
// dangling pointers), but it can happen in Miri.
- PointerUseAfterFree(..) => DanglingPtrUseAfterFree {
+ Ub(PointerUseAfterFree(..)) => DanglingPtrUseAfterFree {
ptr_kind,
},
);
@@ -462,6 +473,8 @@ impl<'rt, 'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> ValidityVisitor<'rt, 'mir, '
/// Check if this is a value of primitive type, and if yes check the validity of the value
/// at that type. Return `true` if the type is indeed primitive.
+ ///
+ /// Note that not all of these have `FieldsShape::Primitive`, e.g. wide references.
fn try_visit_primitive(
&mut self,
value: &OpTy<'tcx, M::Provenance>,
@@ -474,7 +487,7 @@ impl<'rt, 'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> ValidityVisitor<'rt, 'mir, '
try_validation!(
value.to_bool(),
self.path,
- InvalidBool(..) => ValidationErrorKind::InvalidBool {
+ Ub(InvalidBool(..)) => ValidationErrorKind::InvalidBool {
value: format!("{value:x}"),
}
);
@@ -485,7 +498,7 @@ impl<'rt, 'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> ValidityVisitor<'rt, 'mir, '
try_validation!(
value.to_char(),
self.path,
- InvalidChar(..) => ValidationErrorKind::InvalidChar {
+ Ub(InvalidChar(..)) => ValidationErrorKind::InvalidChar {
value: format!("{value:x}"),
}
);
@@ -494,7 +507,7 @@ impl<'rt, 'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> ValidityVisitor<'rt, 'mir, '
ty::Float(_) | ty::Int(_) | ty::Uint(_) => {
// NOTE: Keep this in sync with the array optimization for int/float
// types below!
- let value = self.read_scalar(
+ self.read_scalar(
value,
if matches!(ty.kind(), ty::Float(..)) {
ExpectedKind::Float
@@ -502,20 +515,9 @@ impl<'rt, 'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> ValidityVisitor<'rt, 'mir, '
ExpectedKind::Int
},
)?;
- // As a special exception we *do* match on a `Scalar` here, since we truly want
- // to know its underlying representation (and *not* cast it to an integer).
- if matches!(value, Scalar::Ptr(..)) {
- throw_validation_failure!(
- self.path,
- ExpectedNonPtr { value: format!("{value:x}") }
- )
- }
Ok(true)
}
ty::RawPtr(..) => {
- // We are conservative with uninit for integers, but try to
- // actually enforce the strict rules for raw pointers (mostly because
- // that lets us re-use `ref_to_mplace`).
let place =
self.ecx.ref_to_mplace(&self.read_immediate(value, ExpectedKind::RawPtr)?)?;
if place.layout.is_unsized() {
@@ -546,10 +548,8 @@ impl<'rt, 'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> ValidityVisitor<'rt, 'mir, '
let _fn = try_validation!(
self.ecx.get_ptr_fn(ptr),
self.path,
- DanglingIntPointer(..) |
- InvalidFunctionPointer(..) => InvalidFnPtr {
- value: format!("{ptr}"),
- },
+ Ub(DanglingIntPointer(..) | InvalidFunctionPointer(..)) =>
+ InvalidFnPtr { value: format!("{ptr}") },
);
// FIXME: Check if the signature matches
} else {
@@ -657,13 +657,13 @@ impl<'rt, 'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> ValueVisitor<'mir, 'tcx, M>
Ok(try_validation!(
this.ecx.read_discriminant(op),
this.path,
- InvalidTag(val) => InvalidEnumTag {
+ Ub(InvalidTag(val)) => InvalidEnumTag {
value: format!("{val:x}"),
},
-
- InvalidUninitBytes(None) => UninitEnumTag,
- )
- .1)
+ Ub(UninhabitedEnumVariantRead(_)) => UninhabitedEnumVariant,
+ // Uninit / bad provenance are not possible since the field was already previously
+ // checked at its integer type.
+ ))
})
}
@@ -733,60 +733,7 @@ impl<'rt, 'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> ValueVisitor<'mir, 'tcx, M>
}
}
- // Recursively walk the value at its type.
- self.walk_value(op)?;
-
- // *After* all of this, check the ABI. We need to check the ABI to handle
- // types like `NonNull` where the `Scalar` info is more restrictive than what
- // the fields say (`rustc_layout_scalar_valid_range_start`).
- // But in most cases, this will just propagate what the fields say,
- // and then we want the error to point at the field -- so, first recurse,
- // then check ABI.
- //
- // FIXME: We could avoid some redundant checks here. For newtypes wrapping
- // scalars, we do the same check on every "level" (e.g., first we check
- // MyNewtype and then the scalar in there).
- match op.layout.abi {
- Abi::Uninhabited => {
- let ty = op.layout.ty;
- throw_validation_failure!(self.path, UninhabitedVal { ty });
- }
- Abi::Scalar(scalar_layout) => {
- if !scalar_layout.is_uninit_valid() {
- // There is something to check here.
- let scalar = self.read_scalar(op, ExpectedKind::InitScalar)?;
- self.visit_scalar(scalar, scalar_layout)?;
- }
- }
- Abi::ScalarPair(a_layout, b_layout) => {
- // We can only proceed if *both* scalars need to be initialized.
- // FIXME: find a way to also check ScalarPair when one side can be uninit but
- // the other must be init.
- if !a_layout.is_uninit_valid() && !b_layout.is_uninit_valid() {
- let (a, b) =
- self.read_immediate(op, ExpectedKind::InitScalar)?.to_scalar_pair();
- self.visit_scalar(a, a_layout)?;
- self.visit_scalar(b, b_layout)?;
- }
- }
- Abi::Vector { .. } => {
- // No checks here, we assume layout computation gets this right.
- // (This is harder to check since Miri does not represent these as `Immediate`. We
- // also cannot use field projections since this might be a newtype around a vector.)
- }
- Abi::Aggregate { .. } => {
- // Nothing to do.
- }
- }
-
- Ok(())
- }
-
- fn visit_aggregate(
- &mut self,
- op: &OpTy<'tcx, M::Provenance>,
- fields: impl Iterator<Item = InterpResult<'tcx, Self::V>>,
- ) -> InterpResult<'tcx> {
+ // Recursively walk the value at its type. Apply optimizations for some large types.
match op.layout.ty.kind() {
ty::Str => {
let mplace = op.assert_mem_place(); // strings are unsized and hence never immediate
@@ -794,7 +741,8 @@ impl<'rt, 'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> ValueVisitor<'mir, 'tcx, M>
try_validation!(
self.ecx.read_bytes_ptr_strip_provenance(mplace.ptr, Size::from_bytes(len)),
self.path,
- InvalidUninitBytes(..) => { UninitStr },
+ Ub(InvalidUninitBytes(..)) => Uninit { expected: ExpectedKind::Str },
+ Unsup(ReadPointerAsInt(_)) => PointerAsInt { expected: ExpectedKind::Str }
);
}
ty::Array(tys, ..) | ty::Slice(tys)
@@ -806,6 +754,7 @@ impl<'rt, 'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> ValueVisitor<'mir, 'tcx, M>
if matches!(tys.kind(), ty::Int(..) | ty::Uint(..) | ty::Float(..))
=>
{
+ let expected = if tys.is_integral() { ExpectedKind::Int } else { ExpectedKind::Float };
// Optimized handling for arrays of integer/float type.
// This is the length of the array/slice.
@@ -824,7 +773,7 @@ impl<'rt, 'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> ValueVisitor<'mir, 'tcx, M>
Left(mplace) => mplace,
Right(imm) => match *imm {
Immediate::Uninit =>
- throw_validation_failure!(self.path, UninitVal),
+ throw_validation_failure!(self.path, Uninit { expected }),
Immediate::Scalar(..) | Immediate::ScalarPair(..) =>
bug!("arrays/slices can never have Scalar/ScalarPair layout"),
}
@@ -850,17 +799,21 @@ impl<'rt, 'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> ValueVisitor<'mir, 'tcx, M>
// For some errors we might be able to provide extra information.
// (This custom logic does not fit the `try_validation!` macro.)
match err.kind() {
- err_ub!(InvalidUninitBytes(Some((_alloc_id, access)))) => {
+ Ub(InvalidUninitBytes(Some((_alloc_id, access)))) | Unsup(ReadPointerAsInt(Some((_alloc_id, access)))) => {
// Some byte was uninitialized, determine which
// element that byte belongs to so we can
// provide an index.
let i = usize::try_from(
- access.uninit.start.bytes() / layout.size.bytes(),
+ access.bad.start.bytes() / layout.size.bytes(),
)
.unwrap();
self.path.push(PathElem::ArrayElem(i));
- throw_validation_failure!(self.path, UninitVal)
+ if matches!(err.kind(), Ub(InvalidUninitBytes(_))) {
+ throw_validation_failure!(self.path, Uninit { expected })
+ } else {
+ throw_validation_failure!(self.path, PointerAsInt { expected })
+ }
}
// Propagate upwards (that will also check for unexpected errors).
@@ -874,12 +827,58 @@ impl<'rt, 'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> ValueVisitor<'mir, 'tcx, M>
// ZST type, so either validation fails for all elements or none.
ty::Array(tys, ..) | ty::Slice(tys) if self.ecx.layout_of(*tys)?.is_zst() => {
// Validate just the first element (if any).
- self.walk_aggregate(op, fields.take(1))?
+ if op.len(self.ecx)? > 0 {
+ self.visit_field(op, 0, &self.ecx.project_index(op, 0)?)?;
+ }
}
_ => {
- self.walk_aggregate(op, fields)? // default handler
+ self.walk_value(op)?; // default handler
+ }
+ }
+
+ // *After* all of this, check the ABI. We need to check the ABI to handle
+ // types like `NonNull` where the `Scalar` info is more restrictive than what
+ // the fields say (`rustc_layout_scalar_valid_range_start`).
+ // But in most cases, this will just propagate what the fields say,
+ // and then we want the error to point at the field -- so, first recurse,
+ // then check ABI.
+ //
+ // FIXME: We could avoid some redundant checks here. For newtypes wrapping
+ // scalars, we do the same check on every "level" (e.g., first we check
+ // MyNewtype and then the scalar in there).
+ match op.layout.abi {
+ Abi::Uninhabited => {
+ let ty = op.layout.ty;
+ throw_validation_failure!(self.path, UninhabitedVal { ty });
+ }
+ Abi::Scalar(scalar_layout) => {
+ if !scalar_layout.is_uninit_valid() {
+ // There is something to check here.
+ let scalar = self.read_scalar(op, ExpectedKind::InitScalar)?;
+ self.visit_scalar(scalar, scalar_layout)?;
+ }
+ }
+ Abi::ScalarPair(a_layout, b_layout) => {
+ // We can only proceed if *both* scalars need to be initialized.
+ // FIXME: find a way to also check ScalarPair when one side can be uninit but
+ // the other must be init.
+ if !a_layout.is_uninit_valid() && !b_layout.is_uninit_valid() {
+ let (a, b) =
+ self.read_immediate(op, ExpectedKind::InitScalar)?.to_scalar_pair();
+ self.visit_scalar(a, a_layout)?;
+ self.visit_scalar(b, b_layout)?;
+ }
+ }
+ Abi::Vector { .. } => {
+ // No checks here, we assume layout computation gets this right.
+ // (This is harder to check since Miri does not represent these as `Immediate`. We
+ // also cannot use field projections since this might be a newtype around a vector.)
+ }
+ Abi::Aggregate { .. } => {
+ // Nothing to do.
}
}
+
Ok(())
}
}
@@ -900,17 +899,22 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
// Run it.
match visitor.visit_value(&op) {
Ok(()) => Ok(()),
- // Pass through validation failures.
- Err(err) if matches!(err.kind(), err_ub!(Validation { .. })) => Err(err),
- // Complain about any other kind of UB error -- those are bad because we'd like to
+ // Pass through validation failures and "invalid program" issues.
+ Err(err)
+ if matches!(
+ err.kind(),
+ err_ub!(ValidationError { .. }) | InterpError::InvalidProgram(_)
+ ) =>
+ {
+ Err(err)
+ }
+ // Complain about any other kind of error -- those are bad because we'd like to
// report them in a way that shows *where* in the value the issue lies.
- Err(err) if matches!(err.kind(), InterpError::UndefinedBehavior(_)) => {
+ Err(err) => {
let (err, backtrace) = err.into_parts();
backtrace.print_backtrace();
bug!("Unexpected Undefined Behavior error during validation: {err:?}");
}
- // Pass through everything else.
- Err(err) => Err(err),
}
}
diff --git a/compiler/rustc_const_eval/src/interpret/visitor.rs b/compiler/rustc_const_eval/src/interpret/visitor.rs
index 7a1445939..531e2bd3e 100644
--- a/compiler/rustc_const_eval/src/interpret/visitor.rs
+++ b/compiler/rustc_const_eval/src/interpret/visitor.rs
@@ -1,544 +1,202 @@
//! Visitor for a run-time value with a given layout: Traverse enums, structs and other compound
//! types until we arrive at the leaves, with custom handling for primitive types.
+use rustc_index::IndexVec;
use rustc_middle::mir::interpret::InterpResult;
use rustc_middle::ty;
-use rustc_middle::ty::layout::TyAndLayout;
+use rustc_target::abi::FieldIdx;
use rustc_target::abi::{FieldsShape, VariantIdx, Variants};
use std::num::NonZeroUsize;
-use super::{InterpCx, MPlaceTy, Machine, OpTy, PlaceTy};
+use super::{InterpCx, MPlaceTy, Machine, Projectable};
-/// A thing that we can project into, and that has a layout.
-/// This wouldn't have to depend on `Machine` but with the current type inference,
-/// that's just more convenient to work with (avoids repeating all the `Machine` bounds).
-pub trait Value<'mir, 'tcx, M: Machine<'mir, 'tcx>>: Sized {
- /// Gets this value's layout.
- fn layout(&self) -> TyAndLayout<'tcx>;
+/// How to traverse a value and what to do when we are at the leaves.
+pub trait ValueVisitor<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>>: Sized {
+ type V: Projectable<'tcx, M::Provenance> + From<MPlaceTy<'tcx, M::Provenance>>;
- /// Makes this into an `OpTy`, in a cheap way that is good for reading.
- fn to_op_for_read(
- &self,
- ecx: &InterpCx<'mir, 'tcx, M>,
- ) -> InterpResult<'tcx, OpTy<'tcx, M::Provenance>>;
-
- /// Makes this into an `OpTy`, in a potentially more expensive way that is good for projections.
- fn to_op_for_proj(
- &self,
- ecx: &InterpCx<'mir, 'tcx, M>,
- ) -> InterpResult<'tcx, OpTy<'tcx, M::Provenance>> {
- self.to_op_for_read(ecx)
- }
-
- /// Creates this from an `OpTy`.
- ///
- /// If `to_op_for_proj` only ever produces `Indirect` operands, then this one is definitely `Indirect`.
- fn from_op(op: &OpTy<'tcx, M::Provenance>) -> Self;
-
- /// Projects to the given enum variant.
- fn project_downcast(
- &self,
- ecx: &InterpCx<'mir, 'tcx, M>,
- variant: VariantIdx,
- ) -> InterpResult<'tcx, Self>;
-
- /// Projects to the n-th field.
- fn project_field(
- &self,
- ecx: &InterpCx<'mir, 'tcx, M>,
- field: usize,
- ) -> InterpResult<'tcx, Self>;
-}
-
-/// A thing that we can project into given *mutable* access to `ecx`, and that has a layout.
-/// This wouldn't have to depend on `Machine` but with the current type inference,
-/// that's just more convenient to work with (avoids repeating all the `Machine` bounds).
-pub trait ValueMut<'mir, 'tcx, M: Machine<'mir, 'tcx>>: Sized {
- /// Gets this value's layout.
- fn layout(&self) -> TyAndLayout<'tcx>;
-
- /// Makes this into an `OpTy`, in a cheap way that is good for reading.
- fn to_op_for_read(
- &self,
- ecx: &InterpCx<'mir, 'tcx, M>,
- ) -> InterpResult<'tcx, OpTy<'tcx, M::Provenance>>;
-
- /// Makes this into an `OpTy`, in a potentially more expensive way that is good for projections.
- fn to_op_for_proj(
- &self,
- ecx: &mut InterpCx<'mir, 'tcx, M>,
- ) -> InterpResult<'tcx, OpTy<'tcx, M::Provenance>>;
-
- /// Creates this from an `OpTy`.
- ///
- /// If `to_op_for_proj` only ever produces `Indirect` operands, then this one is definitely `Indirect`.
- fn from_op(op: &OpTy<'tcx, M::Provenance>) -> Self;
-
- /// Projects to the given enum variant.
- fn project_downcast(
- &self,
- ecx: &mut InterpCx<'mir, 'tcx, M>,
- variant: VariantIdx,
- ) -> InterpResult<'tcx, Self>;
-
- /// Projects to the n-th field.
- fn project_field(
- &self,
- ecx: &mut InterpCx<'mir, 'tcx, M>,
- field: usize,
- ) -> InterpResult<'tcx, Self>;
-}
-
-// We cannot have a general impl which shows that Value implies ValueMut. (When we do, it says we
-// cannot `impl ValueMut for PlaceTy` because some downstream crate could `impl Value for PlaceTy`.)
-// So we have some copy-paste here. (We could have a macro but since we only have 2 types with this
-// double-impl, that would barely make the code shorter, if at all.)
-
-impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> Value<'mir, 'tcx, M> for OpTy<'tcx, M::Provenance> {
- #[inline(always)]
- fn layout(&self) -> TyAndLayout<'tcx> {
- self.layout
- }
-
- #[inline(always)]
- fn to_op_for_read(
- &self,
- _ecx: &InterpCx<'mir, 'tcx, M>,
- ) -> InterpResult<'tcx, OpTy<'tcx, M::Provenance>> {
- Ok(self.clone())
- }
-
- #[inline(always)]
- fn from_op(op: &OpTy<'tcx, M::Provenance>) -> Self {
- op.clone()
- }
-
- #[inline(always)]
- fn project_downcast(
- &self,
- ecx: &InterpCx<'mir, 'tcx, M>,
- variant: VariantIdx,
- ) -> InterpResult<'tcx, Self> {
- ecx.operand_downcast(self, variant)
- }
-
- #[inline(always)]
- fn project_field(
- &self,
- ecx: &InterpCx<'mir, 'tcx, M>,
- field: usize,
- ) -> InterpResult<'tcx, Self> {
- ecx.operand_field(self, field)
- }
-}
-
-impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> ValueMut<'mir, 'tcx, M>
- for OpTy<'tcx, M::Provenance>
-{
- #[inline(always)]
- fn layout(&self) -> TyAndLayout<'tcx> {
- self.layout
- }
-
- #[inline(always)]
- fn to_op_for_read(
- &self,
- _ecx: &InterpCx<'mir, 'tcx, M>,
- ) -> InterpResult<'tcx, OpTy<'tcx, M::Provenance>> {
- Ok(self.clone())
- }
-
- #[inline(always)]
- fn to_op_for_proj(
- &self,
- _ecx: &mut InterpCx<'mir, 'tcx, M>,
- ) -> InterpResult<'tcx, OpTy<'tcx, M::Provenance>> {
- Ok(self.clone())
- }
-
- #[inline(always)]
- fn from_op(op: &OpTy<'tcx, M::Provenance>) -> Self {
- op.clone()
- }
-
- #[inline(always)]
- fn project_downcast(
- &self,
- ecx: &mut InterpCx<'mir, 'tcx, M>,
- variant: VariantIdx,
- ) -> InterpResult<'tcx, Self> {
- ecx.operand_downcast(self, variant)
- }
-
- #[inline(always)]
- fn project_field(
- &self,
- ecx: &mut InterpCx<'mir, 'tcx, M>,
- field: usize,
- ) -> InterpResult<'tcx, Self> {
- ecx.operand_field(self, field)
- }
-}
-
-impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> Value<'mir, 'tcx, M>
- for MPlaceTy<'tcx, M::Provenance>
-{
- #[inline(always)]
- fn layout(&self) -> TyAndLayout<'tcx> {
- self.layout
- }
-
- #[inline(always)]
- fn to_op_for_read(
- &self,
- _ecx: &InterpCx<'mir, 'tcx, M>,
- ) -> InterpResult<'tcx, OpTy<'tcx, M::Provenance>> {
- Ok(self.into())
- }
-
- #[inline(always)]
- fn from_op(op: &OpTy<'tcx, M::Provenance>) -> Self {
- // assert is justified because our `to_op_for_read` only ever produces `Indirect` operands.
- op.assert_mem_place()
- }
-
- #[inline(always)]
- fn project_downcast(
- &self,
- ecx: &InterpCx<'mir, 'tcx, M>,
- variant: VariantIdx,
- ) -> InterpResult<'tcx, Self> {
- ecx.mplace_downcast(self, variant)
- }
-
- #[inline(always)]
- fn project_field(
- &self,
- ecx: &InterpCx<'mir, 'tcx, M>,
- field: usize,
- ) -> InterpResult<'tcx, Self> {
- ecx.mplace_field(self, field)
- }
-}
-
-impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> ValueMut<'mir, 'tcx, M>
- for MPlaceTy<'tcx, M::Provenance>
-{
- #[inline(always)]
- fn layout(&self) -> TyAndLayout<'tcx> {
- self.layout
- }
-
- #[inline(always)]
- fn to_op_for_read(
- &self,
- _ecx: &InterpCx<'mir, 'tcx, M>,
- ) -> InterpResult<'tcx, OpTy<'tcx, M::Provenance>> {
- Ok(self.into())
- }
-
- #[inline(always)]
- fn to_op_for_proj(
- &self,
- _ecx: &mut InterpCx<'mir, 'tcx, M>,
- ) -> InterpResult<'tcx, OpTy<'tcx, M::Provenance>> {
- Ok(self.into())
- }
-
- #[inline(always)]
- fn from_op(op: &OpTy<'tcx, M::Provenance>) -> Self {
- // assert is justified because our `to_op_for_proj` only ever produces `Indirect` operands.
- op.assert_mem_place()
- }
-
- #[inline(always)]
- fn project_downcast(
- &self,
- ecx: &mut InterpCx<'mir, 'tcx, M>,
- variant: VariantIdx,
- ) -> InterpResult<'tcx, Self> {
- ecx.mplace_downcast(self, variant)
- }
-
- #[inline(always)]
- fn project_field(
- &self,
- ecx: &mut InterpCx<'mir, 'tcx, M>,
- field: usize,
- ) -> InterpResult<'tcx, Self> {
- ecx.mplace_field(self, field)
- }
-}
-
-impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> ValueMut<'mir, 'tcx, M>
- for PlaceTy<'tcx, M::Provenance>
-{
- #[inline(always)]
- fn layout(&self) -> TyAndLayout<'tcx> {
- self.layout
- }
-
- #[inline(always)]
- fn to_op_for_read(
- &self,
- ecx: &InterpCx<'mir, 'tcx, M>,
- ) -> InterpResult<'tcx, OpTy<'tcx, M::Provenance>> {
- // No need for `force_allocation` since we are just going to read from this.
- ecx.place_to_op(self)
- }
+ /// The visitor must have an `InterpCx` in it.
+ fn ecx(&self) -> &InterpCx<'mir, 'tcx, M>;
+ /// `read_discriminant` can be hooked for better error messages.
#[inline(always)]
- fn to_op_for_proj(
- &self,
- ecx: &mut InterpCx<'mir, 'tcx, M>,
- ) -> InterpResult<'tcx, OpTy<'tcx, M::Provenance>> {
- // We `force_allocation` here so that `from_op` below can work.
- Ok(ecx.force_allocation(self)?.into())
+ fn read_discriminant(&mut self, v: &Self::V) -> InterpResult<'tcx, VariantIdx> {
+ Ok(self.ecx().read_discriminant(&v.to_op(self.ecx())?)?)
}
- #[inline(always)]
- fn from_op(op: &OpTy<'tcx, M::Provenance>) -> Self {
- // assert is justified because our `to_op` only ever produces `Indirect` operands.
- op.assert_mem_place().into()
- }
-
- #[inline(always)]
- fn project_downcast(
- &self,
- ecx: &mut InterpCx<'mir, 'tcx, M>,
- variant: VariantIdx,
- ) -> InterpResult<'tcx, Self> {
- ecx.place_downcast(self, variant)
- }
-
- #[inline(always)]
- fn project_field(
- &self,
- ecx: &mut InterpCx<'mir, 'tcx, M>,
- field: usize,
- ) -> InterpResult<'tcx, Self> {
- ecx.place_field(self, field)
- }
-}
-
-macro_rules! make_value_visitor {
- ($visitor_trait:ident, $value_trait:ident, $($mutability:ident)?) => {
- /// How to traverse a value and what to do when we are at the leaves.
- pub trait $visitor_trait<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>>: Sized {
- type V: $value_trait<'mir, 'tcx, M>;
-
- /// The visitor must have an `InterpCx` in it.
- fn ecx(&$($mutability)? self)
- -> &$($mutability)? InterpCx<'mir, 'tcx, M>;
-
- /// `read_discriminant` can be hooked for better error messages.
- #[inline(always)]
- fn read_discriminant(
- &mut self,
- op: &OpTy<'tcx, M::Provenance>,
- ) -> InterpResult<'tcx, VariantIdx> {
- Ok(self.ecx().read_discriminant(op)?.1)
- }
-
- // Recursive actions, ready to be overloaded.
- /// Visits the given value, dispatching as appropriate to more specialized visitors.
- #[inline(always)]
- fn visit_value(&mut self, v: &Self::V) -> InterpResult<'tcx>
- {
- self.walk_value(v)
- }
- /// Visits the given value as a union. No automatic recursion can happen here.
- #[inline(always)]
- fn visit_union(&mut self, _v: &Self::V, _fields: NonZeroUsize) -> InterpResult<'tcx>
- {
- Ok(())
- }
- /// Visits the given value as the pointer of a `Box`. There is nothing to recurse into.
- /// The type of `v` will be a raw pointer, but this is a field of `Box<T>` and the
- /// pointee type is the actual `T`.
- #[inline(always)]
- fn visit_box(&mut self, _v: &Self::V) -> InterpResult<'tcx>
- {
- Ok(())
+ /// This function provides the chance to reorder the order in which fields are visited for
+ /// `FieldsShape::Aggregate`: The order of fields will be
+ /// `(0..num_fields).map(aggregate_field_order)`.
+ ///
+ /// The default means we iterate in source declaration order; alternative this can do an inverse
+ /// lookup in `memory_index` to use memory field order instead.
+ #[inline(always)]
+ fn aggregate_field_order(_memory_index: &IndexVec<FieldIdx, u32>, idx: usize) -> usize {
+ idx
+ }
+
+ // Recursive actions, ready to be overloaded.
+ /// Visits the given value, dispatching as appropriate to more specialized visitors.
+ #[inline(always)]
+ fn visit_value(&mut self, v: &Self::V) -> InterpResult<'tcx> {
+ self.walk_value(v)
+ }
+ /// Visits the given value as a union. No automatic recursion can happen here.
+ #[inline(always)]
+ fn visit_union(&mut self, _v: &Self::V, _fields: NonZeroUsize) -> InterpResult<'tcx> {
+ Ok(())
+ }
+ /// Visits the given value as the pointer of a `Box`. There is nothing to recurse into.
+ /// The type of `v` will be a raw pointer, but this is a field of `Box<T>` and the
+ /// pointee type is the actual `T`.
+ #[inline(always)]
+ fn visit_box(&mut self, _v: &Self::V) -> InterpResult<'tcx> {
+ Ok(())
+ }
+
+ /// Called each time we recurse down to a field of a "product-like" aggregate
+ /// (structs, tuples, arrays and the like, but not enums), passing in old (outer)
+ /// and new (inner) value.
+ /// This gives the visitor the chance to track the stack of nested fields that
+ /// we are descending through.
+ #[inline(always)]
+ fn visit_field(
+ &mut self,
+ _old_val: &Self::V,
+ _field: usize,
+ new_val: &Self::V,
+ ) -> InterpResult<'tcx> {
+ self.visit_value(new_val)
+ }
+ /// Called when recursing into an enum variant.
+ /// This gives the visitor the chance to track the stack of nested fields that
+ /// we are descending through.
+ #[inline(always)]
+ fn visit_variant(
+ &mut self,
+ _old_val: &Self::V,
+ _variant: VariantIdx,
+ new_val: &Self::V,
+ ) -> InterpResult<'tcx> {
+ self.visit_value(new_val)
+ }
+
+ fn walk_value(&mut self, v: &Self::V) -> InterpResult<'tcx> {
+ let ty = v.layout().ty;
+ trace!("walk_value: type: {ty}");
+
+ // Special treatment for special types, where the (static) layout is not sufficient.
+ match *ty.kind() {
+ // If it is a trait object, switch to the real type that was used to create it.
+ ty::Dynamic(_, _, ty::Dyn) => {
+ // Dyn types. This is unsized, and the actual dynamic type of the data is given by the
+ // vtable stored in the place metadata.
+ // unsized values are never immediate, so we can assert_mem_place
+ let op = v.to_op(self.ecx())?;
+ let dest = op.assert_mem_place();
+ let inner_mplace = self.ecx().unpack_dyn_trait(&dest)?.0;
+ trace!("walk_value: dyn object layout: {:#?}", inner_mplace.layout);
+ // recurse with the inner type
+ return self.visit_field(&v, 0, &inner_mplace.into());
}
- /// Visits this value as an aggregate, you are getting an iterator yielding
- /// all the fields (still in an `InterpResult`, you have to do error handling yourself).
- /// Recurses into the fields.
- #[inline(always)]
- fn visit_aggregate(
- &mut self,
- v: &Self::V,
- fields: impl Iterator<Item=InterpResult<'tcx, Self::V>>,
- ) -> InterpResult<'tcx> {
- self.walk_aggregate(v, fields)
+ ty::Dynamic(_, _, ty::DynStar) => {
+ // DynStar types. Very different from a dyn type (but strangely part of the
+ // same variant in `TyKind`): These are pairs where the 2nd component is the
+ // vtable, and the first component is the data (which must be ptr-sized).
+ let data = self.ecx().unpack_dyn_star(v)?.0;
+ return self.visit_field(&v, 0, &data);
}
-
- /// Called each time we recurse down to a field of a "product-like" aggregate
- /// (structs, tuples, arrays and the like, but not enums), passing in old (outer)
- /// and new (inner) value.
- /// This gives the visitor the chance to track the stack of nested fields that
- /// we are descending through.
- #[inline(always)]
- fn visit_field(
- &mut self,
- _old_val: &Self::V,
- _field: usize,
- new_val: &Self::V,
- ) -> InterpResult<'tcx> {
- self.visit_value(new_val)
+ // Slices do not need special handling here: they have `Array` field
+ // placement with length 0, so we enter the `Array` case below which
+ // indirectly uses the metadata to determine the actual length.
+
+ // However, `Box`... let's talk about `Box`.
+ ty::Adt(def, ..) if def.is_box() => {
+ // `Box` is a hybrid primitive-library-defined type that one the one hand is
+ // a dereferenceable pointer, on the other hand has *basically arbitrary
+ // user-defined layout* since the user controls the 'allocator' field. So it
+ // cannot be treated like a normal pointer, since it does not fit into an
+ // `Immediate`. Yeah, it is quite terrible. But many visitors want to do
+ // something with "all boxed pointers", so we handle this mess for them.
+ //
+ // When we hit a `Box`, we do not do the usual field recursion; instead,
+ // we (a) call `visit_box` on the pointer value, and (b) recurse on the
+ // allocator field. We also assert tons of things to ensure we do not miss
+ // any other fields.
+
+ // `Box` has two fields: the pointer we care about, and the allocator.
+ assert_eq!(v.layout().fields.count(), 2, "`Box` must have exactly 2 fields");
+ let (unique_ptr, alloc) =
+ (self.ecx().project_field(v, 0)?, self.ecx().project_field(v, 1)?);
+ // Unfortunately there is some type junk in the way here: `unique_ptr` is a `Unique`...
+ // (which means another 2 fields, the second of which is a `PhantomData`)
+ assert_eq!(unique_ptr.layout().fields.count(), 2);
+ let (nonnull_ptr, phantom) = (
+ self.ecx().project_field(&unique_ptr, 0)?,
+ self.ecx().project_field(&unique_ptr, 1)?,
+ );
+ assert!(
+ phantom.layout().ty.ty_adt_def().is_some_and(|adt| adt.is_phantom_data()),
+ "2nd field of `Unique` should be PhantomData but is {:?}",
+ phantom.layout().ty,
+ );
+ // ... that contains a `NonNull`... (gladly, only a single field here)
+ assert_eq!(nonnull_ptr.layout().fields.count(), 1);
+ let raw_ptr = self.ecx().project_field(&nonnull_ptr, 0)?; // the actual raw ptr
+ // ... whose only field finally is a raw ptr we can dereference.
+ self.visit_box(&raw_ptr)?;
+
+ // The second `Box` field is the allocator, which we recursively check for validity
+ // like in regular structs.
+ self.visit_field(v, 1, &alloc)?;
+
+ // We visited all parts of this one.
+ return Ok(());
}
- /// Called when recursing into an enum variant.
- /// This gives the visitor the chance to track the stack of nested fields that
- /// we are descending through.
- #[inline(always)]
- fn visit_variant(
- &mut self,
- _old_val: &Self::V,
- _variant: VariantIdx,
- new_val: &Self::V,
- ) -> InterpResult<'tcx> {
- self.visit_value(new_val)
+ _ => {}
+ };
+
+ // Visit the fields of this value.
+ match &v.layout().fields {
+ FieldsShape::Primitive => {}
+ &FieldsShape::Union(fields) => {
+ self.visit_union(v, fields)?;
}
-
- // Default recursors. Not meant to be overloaded.
- fn walk_aggregate(
- &mut self,
- v: &Self::V,
- fields: impl Iterator<Item=InterpResult<'tcx, Self::V>>,
- ) -> InterpResult<'tcx> {
- // Now iterate over it.
- for (idx, field_val) in fields.enumerate() {
- self.visit_field(v, idx, &field_val?)?;
+ FieldsShape::Arbitrary { offsets, memory_index } => {
+ for idx in 0..offsets.len() {
+ let idx = Self::aggregate_field_order(memory_index, idx);
+ let field = self.ecx().project_field(v, idx)?;
+ self.visit_field(v, idx, &field)?;
}
- Ok(())
}
- fn walk_value(&mut self, v: &Self::V) -> InterpResult<'tcx>
- {
- let ty = v.layout().ty;
- trace!("walk_value: type: {ty}");
-
- // Special treatment for special types, where the (static) layout is not sufficient.
- match *ty.kind() {
- // If it is a trait object, switch to the real type that was used to create it.
- ty::Dynamic(_, _, ty::Dyn) => {
- // Dyn types. This is unsized, and the actual dynamic type of the data is given by the
- // vtable stored in the place metadata.
- // unsized values are never immediate, so we can assert_mem_place
- let op = v.to_op_for_read(self.ecx())?;
- let dest = op.assert_mem_place();
- let inner_mplace = self.ecx().unpack_dyn_trait(&dest)?.0;
- trace!("walk_value: dyn object layout: {:#?}", inner_mplace.layout);
- // recurse with the inner type
- return self.visit_field(&v, 0, &$value_trait::from_op(&inner_mplace.into()));
- },
- ty::Dynamic(_, _, ty::DynStar) => {
- // DynStar types. Very different from a dyn type (but strangely part of the
- // same variant in `TyKind`): These are pairs where the 2nd component is the
- // vtable, and the first component is the data (which must be ptr-sized).
- let op = v.to_op_for_proj(self.ecx())?;
- let data = self.ecx().unpack_dyn_star(&op)?.0;
- return self.visit_field(&v, 0, &$value_trait::from_op(&data));
- }
- // Slices do not need special handling here: they have `Array` field
- // placement with length 0, so we enter the `Array` case below which
- // indirectly uses the metadata to determine the actual length.
-
- // However, `Box`... let's talk about `Box`.
- ty::Adt(def, ..) if def.is_box() => {
- // `Box` is a hybrid primitive-library-defined type that one the one hand is
- // a dereferenceable pointer, on the other hand has *basically arbitrary
- // user-defined layout* since the user controls the 'allocator' field. So it
- // cannot be treated like a normal pointer, since it does not fit into an
- // `Immediate`. Yeah, it is quite terrible. But many visitors want to do
- // something with "all boxed pointers", so we handle this mess for them.
- //
- // When we hit a `Box`, we do not do the usual `visit_aggregate`; instead,
- // we (a) call `visit_box` on the pointer value, and (b) recurse on the
- // allocator field. We also assert tons of things to ensure we do not miss
- // any other fields.
-
- // `Box` has two fields: the pointer we care about, and the allocator.
- assert_eq!(v.layout().fields.count(), 2, "`Box` must have exactly 2 fields");
- let (unique_ptr, alloc) =
- (v.project_field(self.ecx(), 0)?, v.project_field(self.ecx(), 1)?);
- // Unfortunately there is some type junk in the way here: `unique_ptr` is a `Unique`...
- // (which means another 2 fields, the second of which is a `PhantomData`)
- assert_eq!(unique_ptr.layout().fields.count(), 2);
- let (nonnull_ptr, phantom) = (
- unique_ptr.project_field(self.ecx(), 0)?,
- unique_ptr.project_field(self.ecx(), 1)?,
- );
- assert!(
- phantom.layout().ty.ty_adt_def().is_some_and(|adt| adt.is_phantom_data()),
- "2nd field of `Unique` should be PhantomData but is {:?}",
- phantom.layout().ty,
- );
- // ... that contains a `NonNull`... (gladly, only a single field here)
- assert_eq!(nonnull_ptr.layout().fields.count(), 1);
- let raw_ptr = nonnull_ptr.project_field(self.ecx(), 0)?; // the actual raw ptr
- // ... whose only field finally is a raw ptr we can dereference.
- self.visit_box(&raw_ptr)?;
-
- // The second `Box` field is the allocator, which we recursively check for validity
- // like in regular structs.
- self.visit_field(v, 1, &alloc)?;
-
- // We visited all parts of this one.
- return Ok(());
- }
- _ => {},
- };
-
- // Visit the fields of this value.
- match &v.layout().fields {
- FieldsShape::Primitive => {}
- &FieldsShape::Union(fields) => {
- self.visit_union(v, fields)?;
- }
- FieldsShape::Arbitrary { offsets, .. } => {
- // FIXME: We collect in a vec because otherwise there are lifetime
- // errors: Projecting to a field needs access to `ecx`.
- let fields: Vec<InterpResult<'tcx, Self::V>> =
- (0..offsets.len()).map(|i| {
- v.project_field(self.ecx(), i)
- })
- .collect();
- self.visit_aggregate(v, fields.into_iter())?;
- }
- FieldsShape::Array { .. } => {
- // Let's get an mplace (or immediate) first.
- // This might `force_allocate` if `v` is a `PlaceTy`, but `place_index` does that anyway.
- let op = v.to_op_for_proj(self.ecx())?;
- // Now we can go over all the fields.
- // This uses the *run-time length*, i.e., if we are a slice,
- // the dynamic info from the metadata is used.
- let iter = self.ecx().operand_array_fields(&op)?
- .map(|f| f.and_then(|f| {
- Ok($value_trait::from_op(&f))
- }));
- self.visit_aggregate(v, iter)?;
- }
+ FieldsShape::Array { .. } => {
+ for (idx, field) in self.ecx().project_array_fields(v)?.enumerate() {
+ self.visit_field(v, idx, &field?)?;
}
+ }
+ }
- match v.layout().variants {
- // If this is a multi-variant layout, find the right variant and proceed
- // with *its* fields.
- Variants::Multiple { .. } => {
- let op = v.to_op_for_read(self.ecx())?;
- let idx = self.read_discriminant(&op)?;
- let inner = v.project_downcast(self.ecx(), idx)?;
- trace!("walk_value: variant layout: {:#?}", inner.layout());
- // recurse with the inner type
- self.visit_variant(v, idx, &inner)
- }
- // For single-variant layouts, we already did anything there is to do.
- Variants::Single { .. } => Ok(())
- }
+ match v.layout().variants {
+ // If this is a multi-variant layout, find the right variant and proceed
+ // with *its* fields.
+ Variants::Multiple { .. } => {
+ let idx = self.read_discriminant(v)?;
+ // There are 3 cases where downcasts can turn a Scalar/ScalarPair into a different ABI which
+ // could be a problem for `ImmTy` (see layout_sanity_check):
+ // - variant.size == Size::ZERO: works fine because `ImmTy::offset` has a special case for
+ // zero-sized layouts.
+ // - variant.fields.count() == 0: works fine because `ImmTy::offset` has a special case for
+ // zero-field aggregates.
+ // - variant.abi.is_uninhabited(): triggers UB in `read_discriminant` so we never get here.
+ let inner = self.ecx().project_downcast(v, idx)?;
+ trace!("walk_value: variant layout: {:#?}", inner.layout());
+ // recurse with the inner type
+ self.visit_variant(v, idx, &inner)?;
}
+ // For single-variant layouts, we already did anything there is to do.
+ Variants::Single { .. } => {}
}
+
+ Ok(())
}
}
-
-make_value_visitor!(ValueVisitor, Value,);
-make_value_visitor!(MutValueVisitor, ValueMut, mut);
diff --git a/compiler/rustc_const_eval/src/transform/check_consts/check.rs b/compiler/rustc_const_eval/src/transform/check_consts/check.rs
index 14540e8df..fae047bff 100644
--- a/compiler/rustc_const_eval/src/transform/check_consts/check.rs
+++ b/compiler/rustc_const_eval/src/transform/check_consts/check.rs
@@ -8,8 +8,9 @@ use rustc_infer::infer::TyCtxtInferExt;
use rustc_infer::traits::{ImplSource, Obligation, ObligationCause};
use rustc_middle::mir::visit::{MutatingUseContext, NonMutatingUseContext, PlaceContext, Visitor};
use rustc_middle::mir::*;
-use rustc_middle::ty::subst::{GenericArgKind, InternalSubsts};
+use rustc_middle::traits::BuiltinImplSource;
use rustc_middle::ty::{self, adjustment::PointerCoercion, Instance, InstanceDef, Ty, TyCtxt};
+use rustc_middle::ty::{GenericArgKind, GenericArgs};
use rustc_middle::ty::{TraitRef, TypeVisitableExt};
use rustc_mir_dataflow::{self, Analysis};
use rustc_span::{sym, Span, Symbol};
@@ -20,7 +21,7 @@ use std::mem;
use std::ops::Deref;
use super::ops::{self, NonConstOp, Status};
-use super::qualifs::{self, CustomEq, HasMutInterior, NeedsDrop, NeedsNonConstDrop};
+use super::qualifs::{self, CustomEq, HasMutInterior, NeedsDrop};
use super::resolver::FlowSensitiveAnalysis;
use super::{ConstCx, Qualif};
use crate::const_eval::is_unstable_const_fn;
@@ -33,7 +34,7 @@ type QualifResults<'mir, 'tcx, Q> =
pub struct Qualifs<'mir, 'tcx> {
has_mut_interior: Option<QualifResults<'mir, 'tcx, HasMutInterior>>,
needs_drop: Option<QualifResults<'mir, 'tcx, NeedsDrop>>,
- needs_non_const_drop: Option<QualifResults<'mir, 'tcx, NeedsNonConstDrop>>,
+ // needs_non_const_drop: Option<QualifResults<'mir, 'tcx, NeedsNonConstDrop>>,
}
impl<'mir, 'tcx> Qualifs<'mir, 'tcx> {
@@ -76,15 +77,17 @@ impl<'mir, 'tcx> Qualifs<'mir, 'tcx> {
local: Local,
location: Location,
) -> bool {
+ // FIXME(effects) replace with `NeedsNonconstDrop` after const traits work again
+ /*
let ty = ccx.body.local_decls[local].ty;
- if !NeedsNonConstDrop::in_any_value_of_ty(ccx, ty) {
+ if !NeedsDrop::in_any_value_of_ty(ccx, ty) {
return false;
}
let needs_non_const_drop = self.needs_non_const_drop.get_or_insert_with(|| {
let ConstCx { tcx, body, .. } = *ccx;
- FlowSensitiveAnalysis::new(NeedsNonConstDrop, ccx)
+ FlowSensitiveAnalysis::new(NeedsDrop, ccx)
.into_engine(tcx, &body)
.iterate_to_fixpoint()
.into_results_cursor(&body)
@@ -92,6 +95,9 @@ impl<'mir, 'tcx> Qualifs<'mir, 'tcx> {
needs_non_const_drop.seek_before_primary_effect(location);
needs_non_const_drop.get().contains(local)
+ */
+
+ self.needs_drop(ccx, local, location)
}
/// Returns `true` if `local` is `HasMutInterior` at the given `Location`.
@@ -701,8 +707,8 @@ impl<'tcx> Visitor<'tcx> for Checker<'_, 'tcx> {
let fn_ty = func.ty(body, tcx);
- let (mut callee, mut substs) = match *fn_ty.kind() {
- ty::FnDef(def_id, substs) => (def_id, substs),
+ let (mut callee, mut fn_args) = match *fn_ty.kind() {
+ ty::FnDef(def_id, fn_args) => (def_id, fn_args),
ty::FnPtr(_) => {
self.check_op(ops::FnCallIndirect);
@@ -721,7 +727,7 @@ impl<'tcx> Visitor<'tcx> for Checker<'_, 'tcx> {
let infcx = tcx.infer_ctxt().build();
let ocx = ObligationCtxt::new(&infcx);
- let predicates = tcx.predicates_of(callee).instantiate(tcx, substs);
+ let predicates = tcx.predicates_of(callee).instantiate(tcx, fn_args);
let cause = ObligationCause::new(
terminator.source_info.span,
self.body.source.def_id().expect_local(),
@@ -740,13 +746,14 @@ impl<'tcx> Visitor<'tcx> for Checker<'_, 'tcx> {
}
// Attempting to call a trait method?
+ // FIXME(effects) do we need this?
if let Some(trait_id) = tcx.trait_of_item(callee) {
trace!("attempting to call a trait method");
if !self.tcx.features().const_trait_impl {
self.check_op(ops::FnCallNonConst {
caller,
callee,
- substs,
+ args: fn_args,
span: *fn_span,
call_source: *call_source,
feature: Some(sym::const_trait_impl),
@@ -754,8 +761,7 @@ impl<'tcx> Visitor<'tcx> for Checker<'_, 'tcx> {
return;
}
- let trait_ref = TraitRef::from_method(tcx, trait_id, substs);
- let trait_ref = trait_ref.with_constness(ty::BoundConstness::ConstIfConst);
+ let trait_ref = TraitRef::from_method(tcx, trait_id, fn_args);
let obligation =
Obligation::new(tcx, ObligationCause::dummy(), param_env, trait_ref);
@@ -766,7 +772,7 @@ impl<'tcx> Visitor<'tcx> for Checker<'_, 'tcx> {
};
match implsrc {
- Ok(Some(ImplSource::Param(_, ty::BoundConstness::ConstIfConst))) => {
+ Ok(Some(ImplSource::Param(_))) if tcx.features().effects => {
debug!(
"const_trait_impl: provided {:?} via where-clause in {:?}",
trait_ref, param_env
@@ -774,12 +780,11 @@ impl<'tcx> Visitor<'tcx> for Checker<'_, 'tcx> {
return;
}
// Closure: Fn{Once|Mut}
- Ok(Some(ImplSource::Builtin(_)))
+ Ok(Some(ImplSource::Builtin(BuiltinImplSource::Misc, _)))
if trait_ref.self_ty().is_closure()
&& tcx.fn_trait_kind_from_def_id(trait_id).is_some() =>
{
- let ty::Closure(closure_def_id, substs) =
- *trait_ref.self_ty().kind()
+ let ty::Closure(closure_def_id, fn_args) = *trait_ref.self_ty().kind()
else {
unreachable!()
};
@@ -787,7 +792,7 @@ impl<'tcx> Visitor<'tcx> for Checker<'_, 'tcx> {
self.check_op(ops::FnCallNonConst {
caller,
callee,
- substs,
+ args: fn_args,
span: *fn_span,
call_source: *call_source,
feature: None,
@@ -798,28 +803,29 @@ impl<'tcx> Visitor<'tcx> for Checker<'_, 'tcx> {
}
Ok(Some(ImplSource::UserDefined(data))) => {
let callee_name = tcx.item_name(callee);
- if let Some(&did) = tcx
- .associated_item_def_ids(data.impl_def_id)
- .iter()
- .find(|did| tcx.item_name(**did) == callee_name)
- {
- // using internal substs is ok here, since this is only
- // used for the `resolve` call below
- substs = InternalSubsts::identity_for_item(tcx, did);
- callee = did;
- }
if let hir::Constness::NotConst = tcx.constness(data.impl_def_id) {
self.check_op(ops::FnCallNonConst {
caller,
callee,
- substs,
+ args: fn_args,
span: *fn_span,
call_source: *call_source,
feature: None,
});
return;
}
+
+ if let Some(&did) = tcx
+ .associated_item_def_ids(data.impl_def_id)
+ .iter()
+ .find(|did| tcx.item_name(**did) == callee_name)
+ {
+ // using internal args is ok here, since this is only
+ // used for the `resolve` call below
+ fn_args = GenericArgs::identity_for_item(tcx, did);
+ callee = did;
+ }
}
_ if !tcx.is_const_fn_raw(callee) => {
// At this point, it is only legal when the caller is in a trait
@@ -829,7 +835,7 @@ impl<'tcx> Visitor<'tcx> for Checker<'_, 'tcx> {
&& tcx.has_attr(callee_trait, sym::const_trait)
&& Some(callee_trait) == tcx.trait_of_item(caller.to_def_id())
// Can only call methods when it's `<Self as TheTrait>::f`.
- && tcx.types.self_param == substs.type_at(0)
+ && tcx.types.self_param == fn_args.type_at(0)
{
nonconst_call_permission = true;
}
@@ -856,7 +862,7 @@ impl<'tcx> Visitor<'tcx> for Checker<'_, 'tcx> {
self.check_op(ops::FnCallNonConst {
caller,
callee,
- substs,
+ args: fn_args,
span: *fn_span,
call_source: *call_source,
feature: None,
@@ -869,7 +875,7 @@ impl<'tcx> Visitor<'tcx> for Checker<'_, 'tcx> {
// Resolve a trait method call to its concrete implementation, which may be in a
// `const` trait impl.
- let instance = Instance::resolve(tcx, param_env, callee, substs);
+ let instance = Instance::resolve(tcx, param_env, callee, fn_args);
debug!("Resolving ({:?}) -> {:?}", callee, instance);
if let Ok(Some(func)) = instance {
if let InstanceDef::Item(def) = func.def {
@@ -916,7 +922,7 @@ impl<'tcx> Visitor<'tcx> for Checker<'_, 'tcx> {
self.check_op(ops::FnCallNonConst {
caller,
callee,
- substs,
+ args: fn_args,
span: *fn_span,
call_source: *call_source,
feature: None,
@@ -996,8 +1002,9 @@ impl<'tcx> Visitor<'tcx> for Checker<'_, 'tcx> {
let mut err_span = self.span;
let ty_of_dropped_place = dropped_place.ty(self.body, self.tcx).ty;
+ // FIXME(effects) replace with `NeedsNonConstDrop` once we fix const traits
let ty_needs_non_const_drop =
- qualifs::NeedsNonConstDrop::in_any_value_of_ty(self.ccx, ty_of_dropped_place);
+ qualifs::NeedsDrop::in_any_value_of_ty(self.ccx, ty_of_dropped_place);
debug!(?ty_of_dropped_place, ?ty_needs_non_const_drop);
diff --git a/compiler/rustc_const_eval/src/transform/check_consts/mod.rs b/compiler/rustc_const_eval/src/transform/check_consts/mod.rs
index 8ebfee887..e51082e1e 100644
--- a/compiler/rustc_const_eval/src/transform/check_consts/mod.rs
+++ b/compiler/rustc_const_eval/src/transform/check_consts/mod.rs
@@ -68,11 +68,11 @@ impl<'mir, 'tcx> ConstCx<'mir, 'tcx> {
pub fn fn_sig(&self) -> PolyFnSig<'tcx> {
let did = self.def_id().to_def_id();
if self.tcx.is_closure(did) {
- let ty = self.tcx.type_of(did).subst_identity();
- let ty::Closure(_, substs) = ty.kind() else { bug!("type_of closure not ty::Closure") };
- substs.as_closure().sig()
+ let ty = self.tcx.type_of(did).instantiate_identity();
+ let ty::Closure(_, args) = ty.kind() else { bug!("type_of closure not ty::Closure") };
+ args.as_closure().sig()
} else {
- self.tcx.fn_sig(did).subst_identity()
+ self.tcx.fn_sig(did).instantiate_identity()
}
}
}
@@ -127,15 +127,8 @@ fn is_parent_const_stable_trait(tcx: TyCtxt<'_>, def_id: DefId) -> bool {
let hir_id = tcx.local_def_id_to_hir_id(local_def_id);
let Some(parent) = tcx.hir().opt_parent_id(hir_id) else { return false };
- let parent_def = tcx.hir().get(parent);
-
- if !matches!(
- parent_def,
- hir::Node::Item(hir::Item {
- kind: hir::ItemKind::Impl(hir::Impl { constness: hir::Constness::Const, .. }),
- ..
- })
- ) {
+
+ if !tcx.is_const_trait_impl_raw(parent.owner.def_id.to_def_id()) {
return false;
}
diff --git a/compiler/rustc_const_eval/src/transform/check_consts/ops.rs b/compiler/rustc_const_eval/src/transform/check_consts/ops.rs
index 4eb278252..1f3cda35c 100644
--- a/compiler/rustc_const_eval/src/transform/check_consts/ops.rs
+++ b/compiler/rustc_const_eval/src/transform/check_consts/ops.rs
@@ -9,9 +9,9 @@ use rustc_infer::infer::TyCtxtInferExt;
use rustc_infer::traits::{ImplSource, Obligation, ObligationCause};
use rustc_middle::mir::{self, CallSource};
use rustc_middle::ty::print::with_no_trimmed_paths;
-use rustc_middle::ty::subst::{GenericArgKind, SubstsRef};
use rustc_middle::ty::TraitRef;
use rustc_middle::ty::{suggest_constraining_type_param, Adt, Closure, FnDef, FnPtr, Param, Ty};
+use rustc_middle::ty::{GenericArgKind, GenericArgsRef};
use rustc_middle::util::{call_kind, CallDesugaringKind, CallKind};
use rustc_session::parse::feature_err;
use rustc_span::symbol::sym;
@@ -98,7 +98,7 @@ impl<'tcx> NonConstOp<'tcx> for FnCallIndirect {
pub struct FnCallNonConst<'tcx> {
pub caller: LocalDefId,
pub callee: DefId,
- pub substs: SubstsRef<'tcx>,
+ pub args: GenericArgsRef<'tcx>,
pub span: Span,
pub call_source: CallSource,
pub feature: Option<Symbol>,
@@ -110,11 +110,11 @@ impl<'tcx> NonConstOp<'tcx> for FnCallNonConst<'tcx> {
ccx: &ConstCx<'_, 'tcx>,
_: Span,
) -> DiagnosticBuilder<'tcx, ErrorGuaranteed> {
- let FnCallNonConst { caller, callee, substs, span, call_source, feature } = *self;
+ let FnCallNonConst { caller, callee, args, span, call_source, feature } = *self;
let ConstCx { tcx, param_env, .. } = *ccx;
let diag_trait = |err, self_ty: Ty<'_>, trait_id| {
- let trait_ref = TraitRef::from_method(tcx, trait_id, substs);
+ let trait_ref = TraitRef::from_method(tcx, trait_id, args);
match self_ty.kind() {
Param(param_ty) => {
@@ -145,8 +145,11 @@ impl<'tcx> NonConstOp<'tcx> for FnCallNonConst<'tcx> {
let implsrc = selcx.select(&obligation);
if let Ok(Some(ImplSource::UserDefined(data))) = implsrc {
- let span = tcx.def_span(data.impl_def_id);
- err.subdiagnostic(errors::NonConstImplNote { span });
+ // FIXME(effects) revisit this
+ if !tcx.is_const_trait_impl_raw(data.impl_def_id) {
+ let span = tcx.def_span(data.impl_def_id);
+ err.subdiagnostic(errors::NonConstImplNote { span });
+ }
}
}
_ => {}
@@ -154,7 +157,7 @@ impl<'tcx> NonConstOp<'tcx> for FnCallNonConst<'tcx> {
};
let call_kind =
- call_kind(tcx, ccx.param_env, callee, substs, span, call_source.from_hir_call(), None);
+ call_kind(tcx, ccx.param_env, callee, args, span, call_source.from_hir_call(), None);
debug!(?call_kind);
@@ -226,7 +229,7 @@ impl<'tcx> NonConstOp<'tcx> for FnCallNonConst<'tcx> {
let mut sugg = None;
if Some(trait_id) == ccx.tcx.lang_items().eq_trait() {
- match (substs[0].unpack(), substs[1].unpack()) {
+ match (args[0].unpack(), args[1].unpack()) {
(GenericArgKind::Type(self_ty), GenericArgKind::Type(rhs_ty))
if self_ty == rhs_ty
&& self_ty.is_ref()
@@ -297,7 +300,7 @@ impl<'tcx> NonConstOp<'tcx> for FnCallNonConst<'tcx> {
.create_err(errors::NonConstFmtMacroCall { span, kind: ccx.const_kind() }),
_ => ccx.tcx.sess.create_err(errors::NonConstFnCall {
span,
- def_path_str: ccx.tcx.def_path_str_with_substs(callee, substs),
+ def_path_str: ccx.tcx.def_path_str_with_args(callee, args),
kind: ccx.const_kind(),
}),
};
@@ -310,8 +313,7 @@ impl<'tcx> NonConstOp<'tcx> for FnCallNonConst<'tcx> {
if let Some(feature) = feature && ccx.tcx.sess.is_nightly_build() {
err.help(format!(
- "add `#![feature({})]` to the crate attributes to enable",
- feature,
+ "add `#![feature({feature})]` to the crate attributes to enable",
));
}
@@ -346,10 +348,7 @@ impl<'tcx> NonConstOp<'tcx> for FnCallUnstable {
err.help("const-stable functions can only call other const-stable functions");
} else if ccx.tcx.sess.is_nightly_build() {
if let Some(feature) = feature {
- err.help(format!(
- "add `#![feature({})]` to the crate attributes to enable",
- feature
- ));
+ err.help(format!("add `#![feature({feature})]` to the crate attributes to enable"));
}
}
diff --git a/compiler/rustc_const_eval/src/transform/check_consts/post_drop_elaboration.rs b/compiler/rustc_const_eval/src/transform/check_consts/post_drop_elaboration.rs
index 1f1640fd8..e3377bd10 100644
--- a/compiler/rustc_const_eval/src/transform/check_consts/post_drop_elaboration.rs
+++ b/compiler/rustc_const_eval/src/transform/check_consts/post_drop_elaboration.rs
@@ -5,7 +5,7 @@ use rustc_span::{symbol::sym, Span};
use super::check::Qualifs;
use super::ops::{self, NonConstOp};
-use super::qualifs::{NeedsNonConstDrop, Qualif};
+use super::qualifs::{NeedsDrop, Qualif};
use super::ConstCx;
/// Returns `true` if we should use the more precise live drop checker that runs after drop
@@ -82,7 +82,9 @@ impl<'tcx> Visitor<'tcx> for CheckLiveDrops<'_, 'tcx> {
match &terminator.kind {
mir::TerminatorKind::Drop { place: dropped_place, .. } => {
let dropped_ty = dropped_place.ty(self.body, self.tcx).ty;
- if !NeedsNonConstDrop::in_any_value_of_ty(self.ccx, dropped_ty) {
+
+ // FIXME(effects) use `NeedsNonConstDrop`
+ if !NeedsDrop::in_any_value_of_ty(self.ccx, dropped_ty) {
// Instead of throwing a bug, we just return here. This is because we have to
// run custom `const Drop` impls.
return;
diff --git a/compiler/rustc_const_eval/src/transform/check_consts/qualifs.rs b/compiler/rustc_const_eval/src/transform/check_consts/qualifs.rs
index 015a4aa94..b1b2859ef 100644
--- a/compiler/rustc_const_eval/src/transform/check_consts/qualifs.rs
+++ b/compiler/rustc_const_eval/src/transform/check_consts/qualifs.rs
@@ -7,7 +7,8 @@ use rustc_hir::LangItem;
use rustc_infer::infer::TyCtxtInferExt;
use rustc_middle::mir;
use rustc_middle::mir::*;
-use rustc_middle::ty::{self, subst::SubstsRef, AdtDef, Ty};
+use rustc_middle::traits::BuiltinImplSource;
+use rustc_middle::ty::{self, AdtDef, GenericArgsRef, Ty};
use rustc_trait_selection::traits::{
self, ImplSource, Obligation, ObligationCause, ObligationCtxt, SelectionContext,
};
@@ -22,7 +23,8 @@ pub fn in_any_value_of_ty<'tcx>(
ConstQualifs {
has_mut_interior: HasMutInterior::in_any_value_of_ty(cx, ty),
needs_drop: NeedsDrop::in_any_value_of_ty(cx, ty),
- needs_non_const_drop: NeedsNonConstDrop::in_any_value_of_ty(cx, ty),
+ // FIXME(effects)
+ needs_non_const_drop: NeedsDrop::in_any_value_of_ty(cx, ty),
custom_eq: CustomEq::in_any_value_of_ty(cx, ty),
tainted_by_errors,
}
@@ -72,7 +74,7 @@ pub trait Qualif {
fn in_adt_inherently<'tcx>(
cx: &ConstCx<'_, 'tcx>,
adt: AdtDef<'tcx>,
- substs: SubstsRef<'tcx>,
+ args: GenericArgsRef<'tcx>,
) -> bool;
}
@@ -97,7 +99,7 @@ impl Qualif for HasMutInterior {
fn in_adt_inherently<'tcx>(
_cx: &ConstCx<'_, 'tcx>,
adt: AdtDef<'tcx>,
- _: SubstsRef<'tcx>,
+ _: GenericArgsRef<'tcx>,
) -> bool {
// Exactly one type, `UnsafeCell`, has the `HasMutInterior` qualif inherently.
// It arises structurally for all other types.
@@ -127,7 +129,7 @@ impl Qualif for NeedsDrop {
fn in_adt_inherently<'tcx>(
cx: &ConstCx<'_, 'tcx>,
adt: AdtDef<'tcx>,
- _: SubstsRef<'tcx>,
+ _: GenericArgsRef<'tcx>,
) -> bool {
adt.has_dtor(cx.tcx)
}
@@ -153,12 +155,12 @@ impl Qualif for NeedsNonConstDrop {
return false;
}
+ // FIXME(effects) constness
let obligation = Obligation::new(
cx.tcx,
ObligationCause::dummy_with_span(cx.body.span),
cx.param_env,
- ty::TraitRef::from_lang_item(cx.tcx, LangItem::Destruct, cx.body.span, [ty])
- .with_constness(ty::BoundConstness::ConstIfConst),
+ ty::TraitRef::from_lang_item(cx.tcx, LangItem::Destruct, cx.body.span, [ty]),
);
let infcx = cx.tcx.infer_ctxt().build();
@@ -172,7 +174,7 @@ impl Qualif for NeedsNonConstDrop {
if !matches!(
impl_src,
- ImplSource::Builtin(_) | ImplSource::Param(_, ty::BoundConstness::ConstIfConst)
+ ImplSource::Builtin(BuiltinImplSource::Misc, _) | ImplSource::Param(_)
) {
// If our const destruct candidate is not ConstDestruct or implied by the param env,
// then it's bad
@@ -193,7 +195,7 @@ impl Qualif for NeedsNonConstDrop {
fn in_adt_inherently<'tcx>(
cx: &ConstCx<'_, 'tcx>,
adt: AdtDef<'tcx>,
- _: SubstsRef<'tcx>,
+ _: GenericArgsRef<'tcx>,
) -> bool {
adt.has_non_const_dtor(cx.tcx)
}
@@ -221,9 +223,9 @@ impl Qualif for CustomEq {
fn in_adt_inherently<'tcx>(
cx: &ConstCx<'_, 'tcx>,
def: AdtDef<'tcx>,
- substs: SubstsRef<'tcx>,
+ args: GenericArgsRef<'tcx>,
) -> bool {
- let ty = Ty::new_adt(cx.tcx, def, substs);
+ let ty = Ty::new_adt(cx.tcx, def, args);
!ty.is_structural_eq_shallow(cx.tcx)
}
}
@@ -276,9 +278,9 @@ where
Rvalue::Aggregate(kind, operands) => {
// Return early if we know that the struct or enum being constructed is always
// qualified.
- if let AggregateKind::Adt(adt_did, _, substs, ..) = **kind {
+ if let AggregateKind::Adt(adt_did, _, args, ..) = **kind {
let def = cx.tcx.adt_def(adt_did);
- if Q::in_adt_inherently(cx, def, substs) {
+ if Q::in_adt_inherently(cx, def, args) {
return true;
}
if def.is_union() && Q::in_any_value_of_ty(cx, rvalue.ty(cx.body, cx.tcx)) {
@@ -360,7 +362,7 @@ where
ConstantKind::Val(..) => None,
};
- if let Some(mir::UnevaluatedConst { def, substs: _, promoted }) = uneval {
+ if let Some(mir::UnevaluatedConst { def, args: _, promoted }) = uneval {
// Use qualifs of the type for the promoted. Promoteds in MIR body should be possible
// only for `NeedsNonConstDrop` with precise drop checking. This is the only const
// check performed after the promotion. Verify that with an assertion.
diff --git a/compiler/rustc_const_eval/src/transform/check_consts/resolver.rs b/compiler/rustc_const_eval/src/transform/check_consts/resolver.rs
index 3a869f7f5..a137f84b7 100644
--- a/compiler/rustc_const_eval/src/transform/check_consts/resolver.rs
+++ b/compiler/rustc_const_eval/src/transform/check_consts/resolver.rs
@@ -4,10 +4,12 @@
use rustc_index::bit_set::BitSet;
use rustc_middle::mir::visit::Visitor;
-use rustc_middle::mir::{self, BasicBlock, Local, Location, Statement, StatementKind};
+use rustc_middle::mir::{
+ self, BasicBlock, CallReturnPlaces, Local, Location, Statement, StatementKind, TerminatorEdges,
+};
use rustc_mir_dataflow::fmt::DebugWithContext;
use rustc_mir_dataflow::JoinSemiLattice;
-use rustc_mir_dataflow::{Analysis, AnalysisDomain, CallReturnPlaces};
+use rustc_mir_dataflow::{Analysis, AnalysisDomain};
use std::fmt;
use std::marker::PhantomData;
@@ -345,13 +347,14 @@ where
self.transfer_function(state).visit_statement(statement, location);
}
- fn apply_terminator_effect(
+ fn apply_terminator_effect<'mir>(
&mut self,
state: &mut Self::Domain,
- terminator: &mir::Terminator<'tcx>,
+ terminator: &'mir mir::Terminator<'tcx>,
location: Location,
- ) {
+ ) -> TerminatorEdges<'mir, 'tcx> {
self.transfer_function(state).visit_terminator(terminator, location);
+ terminator.edges()
}
fn apply_call_return_effect(
diff --git a/compiler/rustc_const_eval/src/transform/promote_consts.rs b/compiler/rustc_const_eval/src/transform/promote_consts.rs
index 1b39a76e4..d79c65f1d 100644
--- a/compiler/rustc_const_eval/src/transform/promote_consts.rs
+++ b/compiler/rustc_const_eval/src/transform/promote_consts.rs
@@ -16,7 +16,7 @@ use rustc_hir as hir;
use rustc_middle::mir;
use rustc_middle::mir::visit::{MutVisitor, MutatingUseContext, PlaceContext, Visitor};
use rustc_middle::mir::*;
-use rustc_middle::ty::subst::InternalSubsts;
+use rustc_middle::ty::GenericArgs;
use rustc_middle::ty::{self, List, Ty, TyCtxt, TypeVisitableExt};
use rustc_span::Span;
@@ -759,11 +759,7 @@ impl<'a, 'tcx> Promoter<'a, 'tcx> {
let (mut rvalue, source_info) = {
let statement = &mut self.source[loc.block].statements[loc.statement_index];
let StatementKind::Assign(box (_, rhs)) = &mut statement.kind else {
- span_bug!(
- statement.source_info.span,
- "{:?} is not an assignment",
- statement
- );
+ span_bug!(statement.source_info.span, "{:?} is not an assignment", statement);
};
(
@@ -845,8 +841,8 @@ impl<'a, 'tcx> Promoter<'a, 'tcx> {
let mut promoted_operand = |ty, span| {
promoted.span = span;
promoted.local_decls[RETURN_PLACE] = LocalDecl::new(ty, span);
- let substs = tcx.erase_regions(InternalSubsts::identity_for_item(tcx, def));
- let uneval = mir::UnevaluatedConst { def, substs, promoted: Some(promoted_id) };
+ let args = tcx.erase_regions(GenericArgs::identity_for_item(tcx, def));
+ let uneval = mir::UnevaluatedConst { def, args, promoted: Some(promoted_id) };
Operand::Constant(Box::new(Constant {
span,
@@ -859,7 +855,9 @@ impl<'a, 'tcx> Promoter<'a, 'tcx> {
let local_decls = &mut self.source.local_decls;
let loc = candidate.location;
let statement = &mut blocks[loc.block].statements[loc.statement_index];
- let StatementKind::Assign(box (_, Rvalue::Ref(region, borrow_kind, place))) = &mut statement.kind else {
+ let StatementKind::Assign(box (_, Rvalue::Ref(region, borrow_kind, place))) =
+ &mut statement.kind
+ else {
bug!()
};
diff --git a/compiler/rustc_const_eval/src/transform/validate.rs b/compiler/rustc_const_eval/src/transform/validate.rs
index 4cc923cd9..783b52d00 100644
--- a/compiler/rustc_const_eval/src/transform/validate.rs
+++ b/compiler/rustc_const_eval/src/transform/validate.rs
@@ -18,6 +18,7 @@ use rustc_mir_dataflow::impls::MaybeStorageLive;
use rustc_mir_dataflow::storage::always_storage_live_locals;
use rustc_mir_dataflow::{Analysis, ResultsCursor};
use rustc_target::abi::{Size, FIRST_VARIANT};
+use rustc_target::spec::abi::Abi;
#[derive(Copy, Clone, Debug, PartialEq, Eq)]
enum EdgeKind {
@@ -58,25 +59,48 @@ impl<'tcx> MirPass<'tcx> for Validator {
.iterate_to_fixpoint()
.into_results_cursor(body);
- let mut checker = TypeChecker {
+ let can_unwind = if mir_phase <= MirPhase::Runtime(RuntimePhase::Initial) {
+ // In this case `AbortUnwindingCalls` haven't yet been executed.
+ true
+ } else if !tcx.def_kind(def_id).is_fn_like() {
+ true
+ } else {
+ let body_ty = tcx.type_of(def_id).skip_binder();
+ let body_abi = match body_ty.kind() {
+ ty::FnDef(..) => body_ty.fn_sig(tcx).abi(),
+ ty::Closure(..) => Abi::RustCall,
+ ty::Generator(..) => Abi::Rust,
+ _ => {
+ span_bug!(body.span, "unexpected body ty: {:?} phase {:?}", body_ty, mir_phase)
+ }
+ };
+
+ ty::layout::fn_can_unwind(tcx, Some(def_id), body_abi)
+ };
+
+ let mut cfg_checker = CfgChecker {
when: &self.when,
body,
tcx,
- param_env,
mir_phase,
unwind_edge_count: 0,
reachable_blocks: traversal::reachable_as_bitset(body),
storage_liveness,
place_cache: FxHashSet::default(),
value_cache: FxHashSet::default(),
+ can_unwind,
};
- checker.visit_body(body);
- checker.check_cleanup_control_flow();
+ cfg_checker.visit_body(body);
+ cfg_checker.check_cleanup_control_flow();
+
+ for (location, msg) in validate_types(tcx, self.mir_phase, param_env, body) {
+ cfg_checker.fail(location, msg);
+ }
if let MirPhase::Runtime(_) = body.phase {
if let ty::InstanceDef::Item(_) = body.source.instance {
if body.has_free_regions() {
- checker.fail(
+ cfg_checker.fail(
Location::START,
format!("Free regions in optimized {} MIR", body.phase.name()),
);
@@ -86,20 +110,22 @@ impl<'tcx> MirPass<'tcx> for Validator {
}
}
-struct TypeChecker<'a, 'tcx> {
+struct CfgChecker<'a, 'tcx> {
when: &'a str,
body: &'a Body<'tcx>,
tcx: TyCtxt<'tcx>,
- param_env: ParamEnv<'tcx>,
mir_phase: MirPhase,
unwind_edge_count: usize,
reachable_blocks: BitSet<BasicBlock>,
storage_liveness: ResultsCursor<'a, 'tcx, MaybeStorageLive<'static>>,
place_cache: FxHashSet<PlaceRef<'tcx>>,
value_cache: FxHashSet<u128>,
+ // If `false`, then the MIR must not contain `UnwindAction::Continue` or
+ // `TerminatorKind::Resume`.
+ can_unwind: bool,
}
-impl<'a, 'tcx> TypeChecker<'a, 'tcx> {
+impl<'a, 'tcx> CfgChecker<'a, 'tcx> {
#[track_caller]
fn fail(&self, location: Location, msg: impl AsRef<str>) {
let span = self.body.source_info(location).span;
@@ -147,7 +173,7 @@ impl<'a, 'tcx> TypeChecker<'a, 'tcx> {
}
}
} else {
- self.fail(location, format!("encountered jump to invalid basic block {:?}", bb))
+ self.fail(location, format!("encountered jump to invalid basic block {bb:?}"))
}
}
@@ -214,16 +240,13 @@ impl<'a, 'tcx> TypeChecker<'a, 'tcx> {
stack.clear();
stack.insert(bb);
loop {
- let Some(parent)= parent[bb].take() else {
- break
- };
+ let Some(parent) = parent[bb].take() else { break };
let no_cycle = stack.insert(parent);
if !no_cycle {
self.fail(
Location { block: bb, statement_index: 0 },
format!(
- "Cleanup control flow violation: Cycle involving edge {:?} -> {:?}",
- bb, parent,
+ "Cleanup control flow violation: Cycle involving edge {bb:?} -> {parent:?}",
),
);
break;
@@ -238,47 +261,30 @@ impl<'a, 'tcx> TypeChecker<'a, 'tcx> {
match unwind {
UnwindAction::Cleanup(unwind) => {
if is_cleanup {
- self.fail(location, "unwind on cleanup block");
+ self.fail(location, "`UnwindAction::Cleanup` in cleanup block");
}
self.check_edge(location, unwind, EdgeKind::Unwind);
}
UnwindAction::Continue => {
if is_cleanup {
- self.fail(location, "unwind on cleanup block");
+ self.fail(location, "`UnwindAction::Continue` in cleanup block");
+ }
+
+ if !self.can_unwind {
+ self.fail(location, "`UnwindAction::Continue` in no-unwind function");
}
}
UnwindAction::Unreachable | UnwindAction::Terminate => (),
}
}
-
- /// Check if src can be assigned into dest.
- /// This is not precise, it will accept some incorrect assignments.
- fn mir_assign_valid_types(&self, src: Ty<'tcx>, dest: Ty<'tcx>) -> bool {
- // Fast path before we normalize.
- if src == dest {
- // Equal types, all is good.
- return true;
- }
-
- // We sometimes have to use `defining_opaque_types` for subtyping
- // to succeed here and figuring out how exactly that should work
- // is annoying. It is harmless enough to just not validate anything
- // in that case. We still check this after analysis as all opaque
- // types have been revealed at this point.
- if (src, dest).has_opaque_types() {
- return true;
- }
-
- crate::util::is_subtype(self.tcx, self.param_env, src, dest)
- }
}
-impl<'a, 'tcx> Visitor<'tcx> for TypeChecker<'a, 'tcx> {
+impl<'a, 'tcx> Visitor<'tcx> for CfgChecker<'a, 'tcx> {
fn visit_local(&mut self, local: Local, context: PlaceContext, location: Location) {
if self.body.local_decls.get(local).is_none() {
self.fail(
location,
- format!("local {:?} has no corresponding declaration in `body.local_decls`", local),
+ format!("local {local:?} has no corresponding declaration in `body.local_decls`"),
);
}
@@ -293,11 +299,286 @@ impl<'a, 'tcx> Visitor<'tcx> for TypeChecker<'a, 'tcx> {
self.storage_liveness.seek_after_primary_effect(location);
let locals_with_storage = self.storage_liveness.get();
if !locals_with_storage.contains(local) {
- self.fail(location, format!("use of local {:?}, which has no storage here", local));
+ self.fail(location, format!("use of local {local:?}, which has no storage here"));
+ }
+ }
+ }
+
+ fn visit_statement(&mut self, statement: &Statement<'tcx>, location: Location) {
+ match &statement.kind {
+ StatementKind::Assign(box (dest, rvalue)) => {
+ // FIXME(JakobDegen): Check this for all rvalues, not just this one.
+ if let Rvalue::Use(Operand::Copy(src) | Operand::Move(src)) = rvalue {
+ // The sides of an assignment must not alias. Currently this just checks whether
+ // the places are identical.
+ if dest == src {
+ self.fail(
+ location,
+ "encountered `Assign` statement with overlapping memory",
+ );
+ }
+ }
+ }
+ StatementKind::AscribeUserType(..) => {
+ if self.mir_phase >= MirPhase::Runtime(RuntimePhase::Initial) {
+ self.fail(
+ location,
+ "`AscribeUserType` should have been removed after drop lowering phase",
+ );
+ }
+ }
+ StatementKind::FakeRead(..) => {
+ if self.mir_phase >= MirPhase::Runtime(RuntimePhase::Initial) {
+ self.fail(
+ location,
+ "`FakeRead` should have been removed after drop lowering phase",
+ );
+ }
+ }
+ StatementKind::SetDiscriminant { .. } => {
+ if self.mir_phase < MirPhase::Runtime(RuntimePhase::Initial) {
+ self.fail(location, "`SetDiscriminant`is not allowed until deaggregation");
+ }
+ }
+ StatementKind::Deinit(..) => {
+ if self.mir_phase < MirPhase::Runtime(RuntimePhase::Initial) {
+ self.fail(location, "`Deinit`is not allowed until deaggregation");
+ }
+ }
+ StatementKind::Retag(kind, _) => {
+ // FIXME(JakobDegen) The validator should check that `self.mir_phase <
+ // DropsLowered`. However, this causes ICEs with generation of drop shims, which
+ // seem to fail to set their `MirPhase` correctly.
+ if matches!(kind, RetagKind::Raw | RetagKind::TwoPhase) {
+ self.fail(location, format!("explicit `{kind:?}` is forbidden"));
+ }
+ }
+ StatementKind::StorageLive(local) => {
+ // We check that the local is not live when entering a `StorageLive` for it.
+ // Technically, violating this restriction is only UB and not actually indicative
+ // of not well-formed MIR. This means that an optimization which turns MIR that
+ // already has UB into MIR that fails this check is not necessarily wrong. However,
+ // we have no such optimizations at the moment, and so we include this check anyway
+ // to help us catch bugs. If you happen to write an optimization that might cause
+ // this to incorrectly fire, feel free to remove this check.
+ if self.reachable_blocks.contains(location.block) {
+ self.storage_liveness.seek_before_primary_effect(location);
+ let locals_with_storage = self.storage_liveness.get();
+ if locals_with_storage.contains(*local) {
+ self.fail(
+ location,
+ format!("StorageLive({local:?}) which already has storage here"),
+ );
+ }
+ }
+ }
+ StatementKind::StorageDead(_)
+ | StatementKind::Intrinsic(_)
+ | StatementKind::Coverage(_)
+ | StatementKind::ConstEvalCounter
+ | StatementKind::PlaceMention(..)
+ | StatementKind::Nop => {}
+ }
+
+ self.super_statement(statement, location);
+ }
+
+ fn visit_terminator(&mut self, terminator: &Terminator<'tcx>, location: Location) {
+ match &terminator.kind {
+ TerminatorKind::Goto { target } => {
+ self.check_edge(location, *target, EdgeKind::Normal);
+ }
+ TerminatorKind::SwitchInt { targets, discr: _ } => {
+ for (_, target) in targets.iter() {
+ self.check_edge(location, target, EdgeKind::Normal);
+ }
+ self.check_edge(location, targets.otherwise(), EdgeKind::Normal);
+
+ self.value_cache.clear();
+ self.value_cache.extend(targets.iter().map(|(value, _)| value));
+ let has_duplicates = targets.iter().len() != self.value_cache.len();
+ if has_duplicates {
+ self.fail(
+ location,
+ format!(
+ "duplicated values in `SwitchInt` terminator: {:?}",
+ terminator.kind,
+ ),
+ );
+ }
+ }
+ TerminatorKind::Drop { target, unwind, .. } => {
+ self.check_edge(location, *target, EdgeKind::Normal);
+ self.check_unwind_edge(location, *unwind);
+ }
+ TerminatorKind::Call { args, destination, target, unwind, .. } => {
+ if let Some(target) = target {
+ self.check_edge(location, *target, EdgeKind::Normal);
+ }
+ self.check_unwind_edge(location, *unwind);
+
+ // The call destination place and Operand::Move place used as an argument might be
+ // passed by a reference to the callee. Consequently they must be non-overlapping.
+ // Currently this simply checks for duplicate places.
+ self.place_cache.clear();
+ self.place_cache.insert(destination.as_ref());
+ let mut has_duplicates = false;
+ for arg in args {
+ if let Operand::Move(place) = arg {
+ has_duplicates |= !self.place_cache.insert(place.as_ref());
+ }
+ }
+
+ if has_duplicates {
+ self.fail(
+ location,
+ format!(
+ "encountered overlapping memory in `Call` terminator: {:?}",
+ terminator.kind,
+ ),
+ );
+ }
+ }
+ TerminatorKind::Assert { target, unwind, .. } => {
+ self.check_edge(location, *target, EdgeKind::Normal);
+ self.check_unwind_edge(location, *unwind);
+ }
+ TerminatorKind::Yield { resume, drop, .. } => {
+ if self.body.generator.is_none() {
+ self.fail(location, "`Yield` cannot appear outside generator bodies");
+ }
+ if self.mir_phase >= MirPhase::Runtime(RuntimePhase::Initial) {
+ self.fail(location, "`Yield` should have been replaced by generator lowering");
+ }
+ self.check_edge(location, *resume, EdgeKind::Normal);
+ if let Some(drop) = drop {
+ self.check_edge(location, *drop, EdgeKind::Normal);
+ }
+ }
+ TerminatorKind::FalseEdge { real_target, imaginary_target } => {
+ if self.mir_phase >= MirPhase::Runtime(RuntimePhase::Initial) {
+ self.fail(
+ location,
+ "`FalseEdge` should have been removed after drop elaboration",
+ );
+ }
+ self.check_edge(location, *real_target, EdgeKind::Normal);
+ self.check_edge(location, *imaginary_target, EdgeKind::Normal);
+ }
+ TerminatorKind::FalseUnwind { real_target, unwind } => {
+ if self.mir_phase >= MirPhase::Runtime(RuntimePhase::Initial) {
+ self.fail(
+ location,
+ "`FalseUnwind` should have been removed after drop elaboration",
+ );
+ }
+ self.check_edge(location, *real_target, EdgeKind::Normal);
+ self.check_unwind_edge(location, *unwind);
+ }
+ TerminatorKind::InlineAsm { destination, unwind, .. } => {
+ if let Some(destination) = destination {
+ self.check_edge(location, *destination, EdgeKind::Normal);
+ }
+ self.check_unwind_edge(location, *unwind);
+ }
+ TerminatorKind::GeneratorDrop => {
+ if self.body.generator.is_none() {
+ self.fail(location, "`GeneratorDrop` cannot appear outside generator bodies");
+ }
+ if self.mir_phase >= MirPhase::Runtime(RuntimePhase::Initial) {
+ self.fail(
+ location,
+ "`GeneratorDrop` should have been replaced by generator lowering",
+ );
+ }
+ }
+ TerminatorKind::Resume => {
+ let bb = location.block;
+ if !self.body.basic_blocks[bb].is_cleanup {
+ self.fail(location, "Cannot `Resume` from non-cleanup basic block")
+ }
+ if !self.can_unwind {
+ self.fail(location, "Cannot `Resume` in a function that cannot unwind")
+ }
+ }
+ TerminatorKind::Terminate => {
+ let bb = location.block;
+ if !self.body.basic_blocks[bb].is_cleanup {
+ self.fail(location, "Cannot `Terminate` from non-cleanup basic block")
+ }
+ }
+ TerminatorKind::Return => {
+ let bb = location.block;
+ if self.body.basic_blocks[bb].is_cleanup {
+ self.fail(location, "Cannot `Return` from cleanup basic block")
+ }
}
+ TerminatorKind::Unreachable => {}
+ }
+
+ self.super_terminator(terminator, location);
+ }
+
+ fn visit_source_scope(&mut self, scope: SourceScope) {
+ if self.body.source_scopes.get(scope).is_none() {
+ self.tcx.sess.diagnostic().delay_span_bug(
+ self.body.span,
+ format!(
+ "broken MIR in {:?} ({}):\ninvalid source scope {:?}",
+ self.body.source.instance, self.when, scope,
+ ),
+ );
}
}
+}
+
+pub fn validate_types<'tcx>(
+ tcx: TyCtxt<'tcx>,
+ mir_phase: MirPhase,
+ param_env: ty::ParamEnv<'tcx>,
+ body: &Body<'tcx>,
+) -> Vec<(Location, String)> {
+ let mut type_checker = TypeChecker { body, tcx, param_env, mir_phase, failures: Vec::new() };
+ type_checker.visit_body(body);
+ type_checker.failures
+}
+
+struct TypeChecker<'a, 'tcx> {
+ body: &'a Body<'tcx>,
+ tcx: TyCtxt<'tcx>,
+ param_env: ParamEnv<'tcx>,
+ mir_phase: MirPhase,
+ failures: Vec<(Location, String)>,
+}
+
+impl<'a, 'tcx> TypeChecker<'a, 'tcx> {
+ fn fail(&mut self, location: Location, msg: impl Into<String>) {
+ self.failures.push((location, msg.into()));
+ }
+
+ /// Check if src can be assigned into dest.
+ /// This is not precise, it will accept some incorrect assignments.
+ fn mir_assign_valid_types(&self, src: Ty<'tcx>, dest: Ty<'tcx>) -> bool {
+ // Fast path before we normalize.
+ if src == dest {
+ // Equal types, all is good.
+ return true;
+ }
+ // We sometimes have to use `defining_opaque_types` for subtyping
+ // to succeed here and figuring out how exactly that should work
+ // is annoying. It is harmless enough to just not validate anything
+ // in that case. We still check this after analysis as all opaque
+ // types have been revealed at this point.
+ if (src, dest).has_opaque_types() {
+ return true;
+ }
+
+ crate::util::is_subtype(self.tcx, self.param_env, src, dest)
+ }
+}
+
+impl<'a, 'tcx> Visitor<'tcx> for TypeChecker<'a, 'tcx> {
fn visit_operand(&mut self, operand: &Operand<'tcx>, location: Location) {
// This check is somewhat expensive, so only run it when -Zvalidate-mir is passed.
if self.tcx.sess.opts.unstable_opts.validate_mir
@@ -308,7 +589,7 @@ impl<'a, 'tcx> Visitor<'tcx> for TypeChecker<'a, 'tcx> {
let ty = place.ty(&self.body.local_decls, self.tcx).ty;
if !ty.is_copy_modulo_regions(self.tcx, self.param_env) {
- self.fail(location, format!("`Operand::Copy` with non-`Copy` type {}", ty));
+ self.fail(location, format!("`Operand::Copy` with non-`Copy` type {ty}"));
}
}
}
@@ -327,7 +608,7 @@ impl<'a, 'tcx> Visitor<'tcx> for TypeChecker<'a, 'tcx> {
ProjectionElem::Index(index) => {
let index_ty = self.body.local_decls[index].ty;
if index_ty != self.tcx.types.usize {
- self.fail(location, format!("bad index ({:?} != usize)", index_ty))
+ self.fail(location, format!("bad index ({index_ty:?} != usize)"))
}
}
ProjectionElem::Deref
@@ -338,30 +619,29 @@ impl<'a, 'tcx> Visitor<'tcx> for TypeChecker<'a, 'tcx> {
if base_ty.is_box() {
self.fail(
location,
- format!("{:?} dereferenced after ElaborateBoxDerefs", base_ty),
+ format!("{base_ty:?} dereferenced after ElaborateBoxDerefs"),
)
}
}
ProjectionElem::Field(f, ty) => {
let parent_ty = place_ref.ty(&self.body.local_decls, self.tcx);
- let fail_out_of_bounds = |this: &Self, location| {
- this.fail(location, format!("Out of bounds field {:?} for {:?}", f, parent_ty));
+ let fail_out_of_bounds = |this: &mut Self, location| {
+ this.fail(location, format!("Out of bounds field {f:?} for {parent_ty:?}"));
};
- let check_equal = |this: &Self, location, f_ty| {
+ let check_equal = |this: &mut Self, location, f_ty| {
if !this.mir_assign_valid_types(ty, f_ty) {
this.fail(
location,
format!(
- "Field projection `{:?}.{:?}` specified type `{:?}`, but actual type is `{:?}`",
- place_ref, f, ty, f_ty
+ "Field projection `{place_ref:?}.{f:?}` specified type `{ty:?}`, but actual type is `{f_ty:?}`"
)
)
}
};
let kind = match parent_ty.ty.kind() {
- &ty::Alias(ty::Opaque, ty::AliasTy { def_id, substs, .. }) => {
- self.tcx.type_of(def_id).subst(self.tcx, substs).kind()
+ &ty::Alias(ty::Opaque, ty::AliasTy { def_id, args, .. }) => {
+ self.tcx.type_of(def_id).instantiate(self.tcx, args).kind()
}
kind => kind,
};
@@ -374,23 +654,23 @@ impl<'a, 'tcx> Visitor<'tcx> for TypeChecker<'a, 'tcx> {
};
check_equal(self, location, *f_ty);
}
- ty::Adt(adt_def, substs) => {
+ ty::Adt(adt_def, args) => {
let var = parent_ty.variant_index.unwrap_or(FIRST_VARIANT);
let Some(field) = adt_def.variant(var).fields.get(f) else {
fail_out_of_bounds(self, location);
return;
};
- check_equal(self, location, field.ty(self.tcx, substs));
+ check_equal(self, location, field.ty(self.tcx, args));
}
- ty::Closure(_, substs) => {
- let substs = substs.as_closure();
- let Some(f_ty) = substs.upvar_tys().nth(f.as_usize()) else {
+ ty::Closure(_, args) => {
+ let args = args.as_closure();
+ let Some(&f_ty) = args.upvar_tys().get(f.as_usize()) else {
fail_out_of_bounds(self, location);
return;
};
check_equal(self, location, f_ty);
}
- &ty::Generator(def_id, substs, _) => {
+ &ty::Generator(def_id, args, _) => {
let f_ty = if let Some(var) = parent_ty.variant_index {
let gen_body = if def_id == self.body.source.def_id() {
self.body
@@ -399,7 +679,10 @@ impl<'a, 'tcx> Visitor<'tcx> for TypeChecker<'a, 'tcx> {
};
let Some(layout) = gen_body.generator_layout() else {
- self.fail(location, format!("No generator layout for {:?}", parent_ty));
+ self.fail(
+ location,
+ format!("No generator layout for {parent_ty:?}"),
+ );
return;
};
@@ -409,13 +692,17 @@ impl<'a, 'tcx> Visitor<'tcx> for TypeChecker<'a, 'tcx> {
};
let Some(f_ty) = layout.field_tys.get(local) else {
- self.fail(location, format!("Out of bounds local {:?} for {:?}", local, parent_ty));
+ self.fail(
+ location,
+ format!("Out of bounds local {local:?} for {parent_ty:?}"),
+ );
return;
};
- f_ty.ty
+ ty::EarlyBinder::bind(f_ty.ty).instantiate(self.tcx, args)
} else {
- let Some(f_ty) = substs.as_generator().prefix_tys().nth(f.index()) else {
+ let Some(&f_ty) = args.as_generator().prefix_tys().get(f.index())
+ else {
fail_out_of_bounds(self, location);
return;
};
@@ -436,9 +723,9 @@ impl<'a, 'tcx> Visitor<'tcx> for TypeChecker<'a, 'tcx> {
}
fn visit_var_debug_info(&mut self, debuginfo: &VarDebugInfo<'tcx>) {
- let check_place = |place: Place<'_>| {
+ let check_place = |this: &mut Self, place: Place<'_>| {
if place.projection.iter().any(|p| !p.can_use_in_debuginfo()) {
- self.fail(
+ this.fail(
START_BLOCK.start_location(),
format!("illegal place {:?} in debuginfo for {:?}", place, debuginfo.name),
);
@@ -447,21 +734,15 @@ impl<'a, 'tcx> Visitor<'tcx> for TypeChecker<'a, 'tcx> {
match debuginfo.value {
VarDebugInfoContents::Const(_) => {}
VarDebugInfoContents::Place(place) => {
- check_place(place);
- if debuginfo.references != 0 && place.projection.last() == Some(&PlaceElem::Deref) {
- self.fail(
- START_BLOCK.start_location(),
- format!("debuginfo {:?}, has both ref and deref", debuginfo),
- );
- }
+ check_place(self, place);
}
VarDebugInfoContents::Composite { ty, ref fragments } => {
for f in fragments {
- check_place(f.contents);
+ check_place(self, f.contents);
if ty.is_union() || ty.is_enum() {
self.fail(
START_BLOCK.start_location(),
- format!("invalid type {:?} for composite debuginfo", ty),
+ format!("invalid type {ty:?} for composite debuginfo"),
);
}
if f.projection.iter().any(|p| !matches!(p, PlaceElem::Field(..))) {
@@ -488,7 +769,7 @@ impl<'a, 'tcx> Visitor<'tcx> for TypeChecker<'a, 'tcx> {
&& cntxt != PlaceContext::NonUse(NonUseContext::VarDebugInfo)
&& place.projection[1..].contains(&ProjectionElem::Deref)
{
- self.fail(location, format!("{:?}, has deref at the wrong place", place));
+ self.fail(location, format!("{place:?}, has deref at the wrong place"));
}
self.super_place(place, cntxt, location);
@@ -548,7 +829,7 @@ impl<'a, 'tcx> Visitor<'tcx> for TypeChecker<'a, 'tcx> {
Offset => {
check_kinds!(a, "Cannot offset non-pointer type {:?}", ty::RawPtr(..));
if b != self.tcx.types.isize && b != self.tcx.types.usize {
- self.fail(location, format!("Cannot offset by non-isize type {:?}", b));
+ self.fail(location, format!("Cannot offset by non-isize type {b:?}"));
}
}
Eq | Lt | Le | Ne | Ge | Gt => {
@@ -613,13 +894,12 @@ impl<'a, 'tcx> Visitor<'tcx> for TypeChecker<'a, 'tcx> {
self.fail(
location,
format!(
- "Cannot perform checked arithmetic on unequal types {:?} and {:?}",
- a, b
+ "Cannot perform checked arithmetic on unequal types {a:?} and {b:?}"
),
);
}
}
- _ => self.fail(location, format!("There is no checked version of {:?}", op)),
+ _ => self.fail(location, format!("There is no checked version of {op:?}")),
}
}
Rvalue::UnaryOp(op, operand) => {
@@ -714,7 +994,7 @@ impl<'a, 'tcx> Visitor<'tcx> for TypeChecker<'a, 'tcx> {
}
}
Rvalue::NullaryOp(NullOp::OffsetOf(fields), container) => {
- let fail_out_of_bounds = |this: &Self, location, field, ty| {
+ let fail_out_of_bounds = |this: &mut Self, location, field, ty| {
this.fail(location, format!("Out of bounds field {field:?} for {ty:?}"));
};
@@ -730,7 +1010,7 @@ impl<'a, 'tcx> Visitor<'tcx> for TypeChecker<'a, 'tcx> {
current_ty = self.tcx.normalize_erasing_regions(self.param_env, f_ty);
}
- ty::Adt(adt_def, substs) => {
+ ty::Adt(adt_def, args) => {
if adt_def.is_enum() {
self.fail(
location,
@@ -744,7 +1024,7 @@ impl<'a, 'tcx> Visitor<'tcx> for TypeChecker<'a, 'tcx> {
return;
};
- let f_ty = field.ty(self.tcx, substs);
+ let f_ty = field.ty(self.tcx, args);
current_ty = self.tcx.normalize_erasing_regions(self.param_env, f_ty);
}
_ => {
@@ -824,7 +1104,7 @@ impl<'a, 'tcx> Visitor<'tcx> for TypeChecker<'a, 'tcx> {
if !ty.is_bool() {
self.fail(
location,
- format!("`assume` argument must be `bool`, but got: `{}`", ty),
+ format!("`assume` argument must be `bool`, but got: `{ty}`"),
);
}
}
@@ -837,7 +1117,7 @@ impl<'a, 'tcx> Visitor<'tcx> for TypeChecker<'a, 'tcx> {
} else {
self.fail(
location,
- format!("Expected src to be ptr in copy_nonoverlapping, got: {}", src_ty),
+ format!("Expected src to be ptr in copy_nonoverlapping, got: {src_ty}"),
);
return;
};
@@ -847,19 +1127,19 @@ impl<'a, 'tcx> Visitor<'tcx> for TypeChecker<'a, 'tcx> {
} else {
self.fail(
location,
- format!("Expected dst to be ptr in copy_nonoverlapping, got: {}", dst_ty),
+ format!("Expected dst to be ptr in copy_nonoverlapping, got: {dst_ty}"),
);
return;
};
// since CopyNonOverlapping is parametrized by 1 type,
// we only need to check that they are equal and not keep an extra parameter.
if !self.mir_assign_valid_types(op_src_ty, op_dst_ty) {
- self.fail(location, format!("bad arg ({:?} != {:?})", op_src_ty, op_dst_ty));
+ self.fail(location, format!("bad arg ({op_src_ty:?} != {op_dst_ty:?})"));
}
let op_cnt_ty = count.ty(&self.body.local_decls, self.tcx);
if op_cnt_ty != self.tcx.types.usize {
- self.fail(location, format!("bad arg ({:?} != usize)", op_cnt_ty))
+ self.fail(location, format!("bad arg ({op_cnt_ty:?} != usize)"))
}
}
StatementKind::SetDiscriminant { place, .. } => {
@@ -871,8 +1151,7 @@ impl<'a, 'tcx> Visitor<'tcx> for TypeChecker<'a, 'tcx> {
self.fail(
location,
format!(
- "`SetDiscriminant` is only allowed on ADTs and generators, not {:?}",
- pty
+ "`SetDiscriminant` is only allowed on ADTs and generators, not {pty:?}"
),
);
}
@@ -887,29 +1166,11 @@ impl<'a, 'tcx> Visitor<'tcx> for TypeChecker<'a, 'tcx> {
// DropsLowered`. However, this causes ICEs with generation of drop shims, which
// seem to fail to set their `MirPhase` correctly.
if matches!(kind, RetagKind::Raw | RetagKind::TwoPhase) {
- self.fail(location, format!("explicit `{:?}` is forbidden", kind));
+ self.fail(location, format!("explicit `{kind:?}` is forbidden"));
}
}
- StatementKind::StorageLive(local) => {
- // We check that the local is not live when entering a `StorageLive` for it.
- // Technically, violating this restriction is only UB and not actually indicative
- // of not well-formed MIR. This means that an optimization which turns MIR that
- // already has UB into MIR that fails this check is not necessarily wrong. However,
- // we have no such optimizations at the moment, and so we include this check anyway
- // to help us catch bugs. If you happen to write an optimization that might cause
- // this to incorrectly fire, feel free to remove this check.
- if self.reachable_blocks.contains(location.block) {
- self.storage_liveness.seek_before_primary_effect(location);
- let locals_with_storage = self.storage_liveness.get();
- if locals_with_storage.contains(*local) {
- self.fail(
- location,
- format!("StorageLive({local:?}) which already has storage here"),
- );
- }
- }
- }
- StatementKind::StorageDead(_)
+ StatementKind::StorageLive(_)
+ | StatementKind::StorageDead(_)
| StatementKind::Coverage(_)
| StatementKind::ConstEvalCounter
| StatementKind::PlaceMention(..)
@@ -921,9 +1182,6 @@ impl<'a, 'tcx> Visitor<'tcx> for TypeChecker<'a, 'tcx> {
fn visit_terminator(&mut self, terminator: &Terminator<'tcx>, location: Location) {
match &terminator.kind {
- TerminatorKind::Goto { target } => {
- self.check_edge(location, *target, EdgeKind::Normal);
- }
TerminatorKind::SwitchInt { targets, discr } => {
let switch_ty = discr.ty(&self.body.local_decls, self.tcx);
@@ -937,164 +1195,49 @@ impl<'a, 'tcx> Visitor<'tcx> for TypeChecker<'a, 'tcx> {
other => bug!("unhandled type: {:?}", other),
});
- for (value, target) in targets.iter() {
+ for (value, _) in targets.iter() {
if Scalar::<()>::try_from_uint(value, size).is_none() {
self.fail(
location,
- format!("the value {:#x} is not a proper {:?}", value, switch_ty),
+ format!("the value {value:#x} is not a proper {switch_ty:?}"),
)
}
-
- self.check_edge(location, target, EdgeKind::Normal);
- }
- self.check_edge(location, targets.otherwise(), EdgeKind::Normal);
-
- self.value_cache.clear();
- self.value_cache.extend(targets.iter().map(|(value, _)| value));
- let has_duplicates = targets.iter().len() != self.value_cache.len();
- if has_duplicates {
- self.fail(
- location,
- format!(
- "duplicated values in `SwitchInt` terminator: {:?}",
- terminator.kind,
- ),
- );
}
}
- TerminatorKind::Drop { target, unwind, .. } => {
- self.check_edge(location, *target, EdgeKind::Normal);
- self.check_unwind_edge(location, *unwind);
- }
- TerminatorKind::Call { func, args, destination, target, unwind, .. } => {
+ TerminatorKind::Call { func, .. } => {
let func_ty = func.ty(&self.body.local_decls, self.tcx);
match func_ty.kind() {
ty::FnPtr(..) | ty::FnDef(..) => {}
_ => self.fail(
location,
- format!("encountered non-callable type {} in `Call` terminator", func_ty),
+ format!("encountered non-callable type {func_ty} in `Call` terminator"),
),
}
- if let Some(target) = target {
- self.check_edge(location, *target, EdgeKind::Normal);
- }
- self.check_unwind_edge(location, *unwind);
-
- // The call destination place and Operand::Move place used as an argument might be
- // passed by a reference to the callee. Consequently they must be non-overlapping.
- // Currently this simply checks for duplicate places.
- self.place_cache.clear();
- self.place_cache.insert(destination.as_ref());
- let mut has_duplicates = false;
- for arg in args {
- if let Operand::Move(place) = arg {
- has_duplicates |= !self.place_cache.insert(place.as_ref());
- }
- }
-
- if has_duplicates {
- self.fail(
- location,
- format!(
- "encountered overlapping memory in `Call` terminator: {:?}",
- terminator.kind,
- ),
- );
- }
}
- TerminatorKind::Assert { cond, target, unwind, .. } => {
+ TerminatorKind::Assert { cond, .. } => {
let cond_ty = cond.ty(&self.body.local_decls, self.tcx);
if cond_ty != self.tcx.types.bool {
self.fail(
location,
format!(
- "encountered non-boolean condition of type {} in `Assert` terminator",
- cond_ty
+ "encountered non-boolean condition of type {cond_ty} in `Assert` terminator"
),
);
}
- self.check_edge(location, *target, EdgeKind::Normal);
- self.check_unwind_edge(location, *unwind);
- }
- TerminatorKind::Yield { resume, drop, .. } => {
- if self.body.generator.is_none() {
- self.fail(location, "`Yield` cannot appear outside generator bodies");
- }
- if self.mir_phase >= MirPhase::Runtime(RuntimePhase::Initial) {
- self.fail(location, "`Yield` should have been replaced by generator lowering");
- }
- self.check_edge(location, *resume, EdgeKind::Normal);
- if let Some(drop) = drop {
- self.check_edge(location, *drop, EdgeKind::Normal);
- }
}
- TerminatorKind::FalseEdge { real_target, imaginary_target } => {
- if self.mir_phase >= MirPhase::Runtime(RuntimePhase::Initial) {
- self.fail(
- location,
- "`FalseEdge` should have been removed after drop elaboration",
- );
- }
- self.check_edge(location, *real_target, EdgeKind::Normal);
- self.check_edge(location, *imaginary_target, EdgeKind::Normal);
- }
- TerminatorKind::FalseUnwind { real_target, unwind } => {
- if self.mir_phase >= MirPhase::Runtime(RuntimePhase::Initial) {
- self.fail(
- location,
- "`FalseUnwind` should have been removed after drop elaboration",
- );
- }
- self.check_edge(location, *real_target, EdgeKind::Normal);
- self.check_unwind_edge(location, *unwind);
- }
- TerminatorKind::InlineAsm { destination, unwind, .. } => {
- if let Some(destination) = destination {
- self.check_edge(location, *destination, EdgeKind::Normal);
- }
- self.check_unwind_edge(location, *unwind);
- }
- TerminatorKind::GeneratorDrop => {
- if self.body.generator.is_none() {
- self.fail(location, "`GeneratorDrop` cannot appear outside generator bodies");
- }
- if self.mir_phase >= MirPhase::Runtime(RuntimePhase::Initial) {
- self.fail(
- location,
- "`GeneratorDrop` should have been replaced by generator lowering",
- );
- }
- }
- TerminatorKind::Resume | TerminatorKind::Terminate => {
- let bb = location.block;
- if !self.body.basic_blocks[bb].is_cleanup {
- self.fail(
- location,
- "Cannot `Resume` or `Terminate` from non-cleanup basic block",
- )
- }
- }
- TerminatorKind::Return => {
- let bb = location.block;
- if self.body.basic_blocks[bb].is_cleanup {
- self.fail(location, "Cannot `Return` from cleanup basic block")
- }
- }
- TerminatorKind::Unreachable => {}
+ TerminatorKind::Goto { .. }
+ | TerminatorKind::Drop { .. }
+ | TerminatorKind::Yield { .. }
+ | TerminatorKind::FalseEdge { .. }
+ | TerminatorKind::FalseUnwind { .. }
+ | TerminatorKind::InlineAsm { .. }
+ | TerminatorKind::GeneratorDrop
+ | TerminatorKind::Resume
+ | TerminatorKind::Terminate
+ | TerminatorKind::Return
+ | TerminatorKind::Unreachable => {}
}
self.super_terminator(terminator, location);
}
-
- fn visit_source_scope(&mut self, scope: SourceScope) {
- if self.body.source_scopes.get(scope).is_none() {
- self.tcx.sess.diagnostic().delay_span_bug(
- self.body.span,
- format!(
- "broken MIR in {:?} ({}):\ninvalid source scope {:?}",
- self.body.source.instance, self.when, scope,
- ),
- );
- }
- }
}
diff --git a/compiler/rustc_const_eval/src/util/compare_types.rs b/compiler/rustc_const_eval/src/util/compare_types.rs
index d6a2ffb75..83376c8e9 100644
--- a/compiler/rustc_const_eval/src/util/compare_types.rs
+++ b/compiler/rustc_const_eval/src/util/compare_types.rs
@@ -56,8 +56,16 @@ pub fn is_subtype<'tcx>(
// With `Reveal::All`, opaque types get normalized away, with `Reveal::UserFacing`
// we would get unification errors because we're unable to look into opaque types,
// even if they're constrained in our current function.
- //
- // It seems very unlikely that this hides any bugs.
- let _ = infcx.take_opaque_types();
+ for (key, ty) in infcx.take_opaque_types() {
+ let hidden_ty = tcx.type_of(key.def_id).instantiate(tcx, key.args);
+ if hidden_ty != ty.hidden_type.ty {
+ span_bug!(
+ ty.hidden_type.span,
+ "{}, {}",
+ tcx.type_of(key.def_id).instantiate(tcx, key.args),
+ ty.hidden_type.ty
+ );
+ }
+ }
errors.is_empty()
}
diff --git a/compiler/rustc_const_eval/src/util/type_name.rs b/compiler/rustc_const_eval/src/util/type_name.rs
index 4f01e0a24..14a840ad1 100644
--- a/compiler/rustc_const_eval/src/util/type_name.rs
+++ b/compiler/rustc_const_eval/src/util/type_name.rs
@@ -4,8 +4,7 @@ use rustc_hir::definitions::DisambiguatedDefPathData;
use rustc_middle::ty::{
self,
print::{PrettyPrinter, Print, Printer},
- subst::{GenericArg, GenericArgKind},
- Ty, TyCtxt,
+ GenericArg, GenericArgKind, Ty, TyCtxt,
};
use std::fmt::Write;
@@ -56,11 +55,11 @@ impl<'tcx> Printer<'tcx> for AbsolutePathPrinter<'tcx> {
}
// Types with identity (print the module path).
- ty::Adt(ty::AdtDef(Interned(&ty::AdtDefData { did: def_id, .. }, _)), substs)
- | ty::FnDef(def_id, substs)
- | ty::Alias(ty::Projection | ty::Opaque, ty::AliasTy { def_id, substs, .. })
- | ty::Closure(def_id, substs)
- | ty::Generator(def_id, substs, _) => self.print_def_path(def_id, substs),
+ ty::Adt(ty::AdtDef(Interned(&ty::AdtDefData { did: def_id, .. }, _)), args)
+ | ty::FnDef(def_id, args)
+ | ty::Alias(ty::Projection | ty::Opaque, ty::AliasTy { def_id, args, .. })
+ | ty::Closure(def_id, args)
+ | ty::Generator(def_id, args, _) => self.print_def_path(def_id, args),
ty::Foreign(def_id) => self.print_def_path(def_id, &[]),
ty::Alias(ty::Weak, _) => bug!("type_name: unexpected weak projection"),
diff --git a/compiler/rustc_data_structures/Cargo.toml b/compiler/rustc_data_structures/Cargo.toml
index a5c3cb3f8..f77bd53e7 100644
--- a/compiler/rustc_data_structures/Cargo.toml
+++ b/compiler/rustc_data_structures/Cargo.toml
@@ -35,7 +35,7 @@ elsa = "=1.7.1"
itertools = "0.10.1"
[dependencies.parking_lot]
-version = "0.11"
+version = "0.12"
[target.'cfg(windows)'.dependencies.windows]
version = "0.48.0"
diff --git a/compiler/rustc_data_structures/src/base_n.rs b/compiler/rustc_data_structures/src/base_n.rs
index 4567759c0..a3eb2b9c4 100644
--- a/compiler/rustc_data_structures/src/base_n.rs
+++ b/compiler/rustc_data_structures/src/base_n.rs
@@ -16,22 +16,24 @@ const BASE_64: &[u8; MAX_BASE] =
pub fn push_str(mut n: u128, base: usize, output: &mut String) {
debug_assert!(base >= 2 && base <= MAX_BASE);
let mut s = [0u8; 128];
- let mut index = 0;
+ let mut index = s.len();
let base = base as u128;
loop {
+ index -= 1;
s[index] = BASE_64[(n % base) as usize];
- index += 1;
n /= base;
if n == 0 {
break;
}
}
- s[0..index].reverse();
- output.push_str(str::from_utf8(&s[0..index]).unwrap());
+ output.push_str(unsafe {
+ // SAFETY: `s` is populated using only valid utf8 characters from `BASE_64`
+ str::from_utf8_unchecked(&s[index..])
+ });
}
#[inline]
diff --git a/compiler/rustc_data_structures/src/binary_search_util/mod.rs b/compiler/rustc_data_structures/src/binary_search_util/mod.rs
index d40172a2e..bc8a6b9ea 100644
--- a/compiler/rustc_data_structures/src/binary_search_util/mod.rs
+++ b/compiler/rustc_data_structures/src/binary_search_util/mod.rs
@@ -10,41 +10,17 @@ pub fn binary_search_slice<'d, E, K>(data: &'d [E], key_fn: impl Fn(&E) -> K, ke
where
K: Ord,
{
- let Ok(mid) = data.binary_search_by_key(key, &key_fn) else {
+ let size = data.len();
+ let start = data.partition_point(|x| key_fn(x) < *key);
+ // At this point `start` either points at the first entry with equal or
+ // greater key or is equal to `size` in case all elements have smaller keys
+ if start == size || key_fn(&data[start]) != *key {
return &[];
};
- let size = data.len();
-
- // We get back *some* element with the given key -- so do
- // a galloping search backwards to find the *first* one.
- let mut start = mid;
- let mut previous = mid;
- let mut step = 1;
- loop {
- start = start.saturating_sub(step);
- if start == 0 || key_fn(&data[start]) != *key {
- break;
- }
- previous = start;
- step *= 2;
- }
- step = previous - start;
- while step > 1 {
- let half = step / 2;
- let mid = start + half;
- if key_fn(&data[mid]) != *key {
- start = mid;
- }
- step -= half;
- }
- // adjust by one if we have overshot
- if start < size && key_fn(&data[start]) != *key {
- start += 1;
- }
// Now search forward to find the *last* one.
- let mut end = mid;
- let mut previous = mid;
+ let mut end = start;
+ let mut previous = start;
let mut step = 1;
loop {
end = end.saturating_add(step).min(size);
diff --git a/compiler/rustc_data_structures/src/graph/dominators/mod.rs b/compiler/rustc_data_structures/src/graph/dominators/mod.rs
index a5db14d91..85ef2de9b 100644
--- a/compiler/rustc_data_structures/src/graph/dominators/mod.rs
+++ b/compiler/rustc_data_structures/src/graph/dominators/mod.rs
@@ -176,9 +176,7 @@ pub fn dominators<G: ControlFlowGraph>(graph: &G) -> Dominators<G::Node> {
//
// ...this may be the case if a MirPass modifies the CFG to remove
// or rearrange certain blocks/edges.
- let Some(v) = real_to_pre_order[v] else {
- continue
- };
+ let Some(v) = real_to_pre_order[v] else { continue };
// eval returns a vertex x from which semi[x] is minimum among
// vertices semi[v] +> x *> v.
diff --git a/compiler/rustc_data_structures/src/lib.rs b/compiler/rustc_data_structures/src/lib.rs
index 3deb9c5c2..337720897 100644
--- a/compiler/rustc_data_structures/src/lib.rs
+++ b/compiler/rustc_data_structures/src/lib.rs
@@ -37,6 +37,7 @@
#![allow(rustc::potential_query_instability)]
#![deny(rustc::untranslatable_diagnostic)]
#![deny(rustc::diagnostic_outside_of_impl)]
+#![cfg_attr(not(bootstrap), allow(internal_features))]
#![deny(unsafe_op_in_unsafe_fn)]
#[macro_use]
diff --git a/compiler/rustc_data_structures/src/sorted_map.rs b/compiler/rustc_data_structures/src/sorted_map.rs
index 9409057d4..60b343afb 100644
--- a/compiler/rustc_data_structures/src/sorted_map.rs
+++ b/compiler/rustc_data_structures/src/sorted_map.rs
@@ -49,12 +49,11 @@ impl<K: Ord, V> SortedMap<K, V> {
}
#[inline]
- pub fn insert(&mut self, key: K, mut value: V) -> Option<V> {
+ pub fn insert(&mut self, key: K, value: V) -> Option<V> {
match self.lookup_index_for(&key) {
Ok(index) => {
let slot = unsafe { self.data.get_unchecked_mut(index) };
- mem::swap(&mut slot.1, &mut value);
- Some(value)
+ Some(mem::replace(&mut slot.1, value))
}
Err(index) => {
self.data.insert(index, (key, value));
diff --git a/compiler/rustc_data_structures/src/sso/map.rs b/compiler/rustc_data_structures/src/sso/map.rs
index 99581ed23..04e359a54 100644
--- a/compiler/rustc_data_structures/src/sso/map.rs
+++ b/compiler/rustc_data_structures/src/sso/map.rs
@@ -268,11 +268,7 @@ impl<K: Eq + Hash, V> SsoHashMap<K, V> {
pub fn remove_entry(&mut self, key: &K) -> Option<(K, V)> {
match self {
SsoHashMap::Array(array) => {
- if let Some(index) = array.iter().position(|(k, _v)| k == key) {
- Some(array.swap_remove(index))
- } else {
- None
- }
+ array.iter().position(|(k, _v)| k == key).map(|index| array.swap_remove(index))
}
SsoHashMap::Map(map) => map.remove_entry(key),
}
diff --git a/compiler/rustc_data_structures/src/sync/vec.rs b/compiler/rustc_data_structures/src/sync/vec.rs
index e36dded9e..314496ce9 100644
--- a/compiler/rustc_data_structures/src/sync/vec.rs
+++ b/compiler/rustc_data_structures/src/sync/vec.rs
@@ -43,37 +43,23 @@ impl<I: Idx, T: Copy> AppendOnlyIndexVec<I, T> {
#[derive(Default)]
pub struct AppendOnlyVec<T: Copy> {
- #[cfg(not(parallel_compiler))]
- vec: elsa::vec::FrozenVec<T>,
- #[cfg(parallel_compiler)]
- vec: elsa::sync::LockFreeFrozenVec<T>,
+ vec: parking_lot::RwLock<Vec<T>>,
}
impl<T: Copy> AppendOnlyVec<T> {
pub fn new() -> Self {
- Self {
- #[cfg(not(parallel_compiler))]
- vec: elsa::vec::FrozenVec::new(),
- #[cfg(parallel_compiler)]
- vec: elsa::sync::LockFreeFrozenVec::new(),
- }
+ Self { vec: Default::default() }
}
pub fn push(&self, val: T) -> usize {
- #[cfg(not(parallel_compiler))]
- let i = self.vec.len();
- #[cfg(not(parallel_compiler))]
- self.vec.push(val);
- #[cfg(parallel_compiler)]
- let i = self.vec.push(val);
- i
+ let mut v = self.vec.write();
+ let n = v.len();
+ v.push(val);
+ n
}
pub fn get(&self, i: usize) -> Option<T> {
- #[cfg(not(parallel_compiler))]
- return self.vec.get_copy(i);
- #[cfg(parallel_compiler)]
- return self.vec.get(i);
+ self.vec.read().get(i).copied()
}
pub fn iter_enumerated(&self) -> impl Iterator<Item = (usize, T)> + '_ {
diff --git a/compiler/rustc_data_structures/src/sync/worker_local.rs b/compiler/rustc_data_structures/src/sync/worker_local.rs
index d61bb55be..8c84daf4f 100644
--- a/compiler/rustc_data_structures/src/sync/worker_local.rs
+++ b/compiler/rustc_data_structures/src/sync/worker_local.rs
@@ -116,7 +116,7 @@ pub struct WorkerLocal<T> {
// This is safe because the `deref` call will return a reference to a `T` unique to each thread
// or it will panic for threads without an associated local. So there isn't a need for `T` to do
-// it's own synchronization. The `verify` method on `RegistryId` has an issue where the the id
+// it's own synchronization. The `verify` method on `RegistryId` has an issue where the id
// can be reused, but `WorkerLocal` has a reference to `Registry` which will prevent any reuse.
#[cfg(parallel_compiler)]
unsafe impl<T: Send> Sync for WorkerLocal<T> {}
diff --git a/compiler/rustc_data_structures/src/unord.rs b/compiler/rustc_data_structures/src/unord.rs
index 2b21815b6..47c56eba7 100644
--- a/compiler/rustc_data_structures/src/unord.rs
+++ b/compiler/rustc_data_structures/src/unord.rs
@@ -31,6 +31,7 @@ use crate::{
///
/// It's still possible to do the same thing with an `Fn` by using interior mutability,
/// but the chance of doing it accidentally is reduced.
+#[derive(Clone)]
pub struct UnordItems<T, I: Iterator<Item = T>>(I);
impl<T, I: Iterator<Item = T>> UnordItems<T, I> {
@@ -167,6 +168,14 @@ impl<T: Ord, I: Iterator<Item = T>> UnordItems<T, I> {
}
}
+/// A marker trait specifying that `Self` can consume `UnordItems<_>` without
+/// exposing any internal ordering.
+///
+/// Note: right now this is just a marker trait. It could be extended to contain
+/// some useful, common methods though, like `len`, `clear`, or the various
+/// kinds of `to_sorted`.
+trait UnordCollection {}
+
/// This is a set collection type that tries very hard to not expose
/// any internal iteration. This is a useful property when trying to
/// uphold the determinism invariants imposed by the query system.
@@ -181,6 +190,8 @@ pub struct UnordSet<V: Eq + Hash> {
inner: FxHashSet<V>,
}
+impl<V: Eq + Hash> UnordCollection for UnordSet<V> {}
+
impl<V: Eq + Hash> Default for UnordSet<V> {
#[inline]
fn default() -> Self {
@@ -195,6 +206,11 @@ impl<V: Eq + Hash> UnordSet<V> {
}
#[inline]
+ pub fn with_capacity(capacity: usize) -> Self {
+ Self { inner: FxHashSet::with_capacity_and_hasher(capacity, Default::default()) }
+ }
+
+ #[inline]
pub fn len(&self) -> usize {
self.inner.len()
}
@@ -258,9 +274,9 @@ impl<V: Eq + Hash> UnordSet<V> {
#[inline]
pub fn to_sorted_stable_ord(&self) -> Vec<V>
where
- V: Ord + StableOrd + Copy,
+ V: Ord + StableOrd + Clone,
{
- let mut items: Vec<V> = self.inner.iter().copied().collect();
+ let mut items: Vec<V> = self.inner.iter().cloned().collect();
items.sort_unstable();
items
}
@@ -279,16 +295,28 @@ impl<V: Eq + Hash> UnordSet<V> {
to_sorted_vec(hcx, self.inner.into_iter(), cache_sort_key, |x| x)
}
- // We can safely extend this UnordSet from a set of unordered values because that
- // won't expose the internal ordering anywhere.
#[inline]
- pub fn extend_unord<I: Iterator<Item = V>>(&mut self, items: UnordItems<V, I>) {
- self.inner.extend(items.0)
+ pub fn clear(&mut self) {
+ self.inner.clear();
}
+}
+
+pub trait ExtendUnord<T> {
+ /// Extend this unord collection with the given `UnordItems`.
+ /// This method is called `extend_unord` instead of just `extend` so it
+ /// does not conflict with `Extend::extend`. Otherwise there would be many
+ /// places where the two methods would have to be explicitly disambiguated
+ /// via UFCS.
+ fn extend_unord<I: Iterator<Item = T>>(&mut self, items: UnordItems<T, I>);
+}
+// Note: it is important that `C` implements `UnordCollection` in addition to
+// `Extend`, otherwise this impl would leak the internal iteration order of
+// `items`, e.g. when calling `some_vec.extend_unord(some_unord_items)`.
+impl<C: Extend<T> + UnordCollection, T> ExtendUnord<T> for C {
#[inline]
- pub fn clear(&mut self) {
- self.inner.clear();
+ fn extend_unord<I: Iterator<Item = T>>(&mut self, items: UnordItems<T, I>) {
+ self.extend(items.0)
}
}
@@ -312,6 +340,12 @@ impl<V: Hash + Eq> From<FxHashSet<V>> for UnordSet<V> {
}
}
+impl<V: Hash + Eq, I: Iterator<Item = V>> From<UnordItems<V, I>> for UnordSet<V> {
+ fn from(value: UnordItems<V, I>) -> Self {
+ UnordSet { inner: FxHashSet::from_iter(value.0) }
+ }
+}
+
impl<HCX, V: Hash + Eq + HashStable<HCX>> HashStable<HCX> for UnordSet<V> {
#[inline]
fn hash_stable(&self, hcx: &mut HCX, hasher: &mut StableHasher) {
@@ -333,6 +367,8 @@ pub struct UnordMap<K: Eq + Hash, V> {
inner: FxHashMap<K, V>,
}
+impl<K: Eq + Hash, V> UnordCollection for UnordMap<K, V> {}
+
impl<K: Eq + Hash, V> Default for UnordMap<K, V> {
#[inline]
fn default() -> Self {
@@ -363,6 +399,11 @@ impl<K: Hash + Eq, V, I: Iterator<Item = (K, V)>> From<UnordItems<(K, V), I>> fo
impl<K: Eq + Hash, V> UnordMap<K, V> {
#[inline]
+ pub fn with_capacity(capacity: usize) -> Self {
+ Self { inner: FxHashMap::with_capacity_and_hasher(capacity, Default::default()) }
+ }
+
+ #[inline]
pub fn len(&self) -> usize {
self.inner.len()
}
@@ -428,13 +469,6 @@ impl<K: Eq + Hash, V> UnordMap<K, V> {
UnordItems(self.inner.into_iter())
}
- // We can safely extend this UnordMap from a set of unordered values because that
- // won't expose the internal ordering anywhere.
- #[inline]
- pub fn extend<I: Iterator<Item = (K, V)>>(&mut self, items: UnordItems<(K, V), I>) {
- self.inner.extend(items.0)
- }
-
/// Returns the entries of this map in stable sort order (as defined by `ToStableHashKey`).
///
/// The `cache_sort_key` parameter controls if [slice::sort_by_cached_key] or
@@ -554,15 +588,10 @@ impl<V> UnordBag<V> {
pub fn into_items(self) -> UnordItems<V, impl Iterator<Item = V>> {
UnordItems(self.inner.into_iter())
}
-
- // We can safely extend this UnordSet from a set of unordered values because that
- // won't expose the internal ordering anywhere.
- #[inline]
- pub fn extend<I: Iterator<Item = V>>(&mut self, items: UnordItems<V, I>) {
- self.inner.extend(items.0)
- }
}
+impl<T> UnordCollection for UnordBag<T> {}
+
impl<T> Extend<T> for UnordBag<T> {
fn extend<I: IntoIterator<Item = T>>(&mut self, iter: I) {
self.inner.extend(iter)
diff --git a/compiler/rustc_driver/Cargo.toml b/compiler/rustc_driver/Cargo.toml
index 86a54f6be..d7c295418 100644
--- a/compiler/rustc_driver/Cargo.toml
+++ b/compiler/rustc_driver/Cargo.toml
@@ -8,6 +8,3 @@ crate-type = ["dylib"]
[dependencies]
rustc_driver_impl = { path = "../rustc_driver_impl" }
-# FIXME(Nilstrieb): 0.37.12 adds eventfd support for FreeBSD,
-# but FreeBSD 12 does not support it: https://github.com/bytecodealliance/rustix/issues/716
-rustix = "=0.37.11"
diff --git a/compiler/rustc_driver_impl/Cargo.toml b/compiler/rustc_driver_impl/Cargo.toml
index 67352c55c..a7b01618a 100644
--- a/compiler/rustc_driver_impl/Cargo.toml
+++ b/compiler/rustc_driver_impl/Cargo.toml
@@ -6,6 +6,7 @@ edition = "2021"
[lib]
[dependencies]
+time = { version = "0.3", default-features = false, features = ["formatting", ] }
tracing = { version = "0.1.35" }
serde_json = "1.0.59"
rustc_log = { path = "../rustc_log" }
diff --git a/compiler/rustc_driver_impl/messages.ftl b/compiler/rustc_driver_impl/messages.ftl
index 22b4ec6b0..d3bd3244a 100644
--- a/compiler/rustc_driver_impl/messages.ftl
+++ b/compiler/rustc_driver_impl/messages.ftl
@@ -3,7 +3,11 @@ driver_impl_ice_bug_report = we would appreciate a bug report: {$bug_report_url}
driver_impl_ice_exclude_cargo_defaults = some of the compiler flags provided by cargo are hidden
driver_impl_ice_flags = compiler flags: {$flags}
+driver_impl_ice_path = please attach the file at `{$path}` to your bug report
+driver_impl_ice_path_error = the ICE couldn't be written to `{$path}`: {$error}
+driver_impl_ice_path_error_env = the environment variable `RUSTC_ICE` is set to `{$env_var}`
driver_impl_ice_version = rustc {$version} running on {$triple}
+
driver_impl_rlink_empty_version_number = The input does not contain version number
driver_impl_rlink_encoding_version_mismatch = .rlink file was produced with encoding version `{$version_array}`, but the current version is `{$rlink_version}`
@@ -15,5 +19,3 @@ driver_impl_rlink_rustc_version_mismatch = .rlink file was produced by rustc ver
driver_impl_rlink_unable_to_read = failed to read rlink file: `{$err}`
driver_impl_rlink_wrong_file_type = The input does not look like a .rlink file
-
-driver_impl_unpretty_dump_fail = pretty-print failed to write `{$path}` due to error `{$err}`
diff --git a/compiler/rustc_driver_impl/src/lib.rs b/compiler/rustc_driver_impl/src/lib.rs
index 9352fe314..736877bde 100644
--- a/compiler/rustc_driver_impl/src/lib.rs
+++ b/compiler/rustc_driver_impl/src/lib.rs
@@ -7,6 +7,8 @@
#![doc(html_root_url = "https://doc.rust-lang.org/nightly/nightly-rustc/")]
#![feature(lazy_cell)]
#![feature(decl_macro)]
+#![feature(panic_update_hook)]
+#![feature(let_chains)]
#![recursion_limit = "256"]
#![allow(rustc::potential_query_instability)]
#![deny(rustc::untranslatable_diagnostic)]
@@ -25,19 +27,15 @@ use rustc_data_structures::profiling::{
use rustc_data_structures::sync::SeqCst;
use rustc_errors::registry::{InvalidErrorCode, Registry};
use rustc_errors::{markdown, ColorConfig};
-use rustc_errors::{
- DiagnosticMessage, ErrorGuaranteed, Handler, PResult, SubdiagnosticMessage, TerminalUrl,
-};
+use rustc_errors::{DiagnosticMessage, ErrorGuaranteed, Handler, PResult, SubdiagnosticMessage};
use rustc_feature::find_gated_cfg;
use rustc_fluent_macro::fluent_messages;
use rustc_interface::util::{self, collect_crate_types, get_codegen_backend};
use rustc_interface::{interface, Queries};
-use rustc_lint::LintStore;
+use rustc_lint::{unerased_lint_store, LintStore};
use rustc_metadata::locator;
use rustc_session::config::{nightly_options, CG_OPTIONS, Z_OPTIONS};
-use rustc_session::config::{
- ErrorOutputType, Input, OutFileName, OutputType, PrintRequest, TrimmedDefPaths,
-};
+use rustc_session::config::{ErrorOutputType, Input, OutFileName, OutputType, TrimmedDefPaths};
use rustc_session::cstore::MetadataLoader;
use rustc_session::getopts::{self, Matches};
use rustc_session::lint::{Lint, LintId};
@@ -51,14 +49,18 @@ use std::cmp::max;
use std::collections::BTreeMap;
use std::env;
use std::ffi::OsString;
-use std::fs;
+use std::fmt::Write as _;
+use std::fs::{self, File};
use std::io::{self, IsTerminal, Read, Write};
-use std::panic::{self, catch_unwind};
+use std::panic::{self, catch_unwind, PanicInfo};
use std::path::PathBuf;
use std::process::{self, Command, Stdio};
use std::str;
+use std::sync::atomic::{AtomicBool, Ordering};
use std::sync::OnceLock;
-use std::time::Instant;
+use std::time::{Instant, SystemTime};
+use time::format_description::well_known::Rfc3339;
+use time::OffsetDateTime;
#[allow(unused_macros)]
macro do_not_use_print($($t:tt)*) {
@@ -67,6 +69,11 @@ macro do_not_use_print($($t:tt)*) {
)
}
+#[allow(unused_macros)]
+macro do_not_use_safe_print($($t:tt)*) {
+ std::compile_error!("Don't use `safe_print` or `safe_println` here, use `println_info` instead")
+}
+
// This import blocks the use of panicking `print` and `println` in all the code
// below. Please use `safe_print` and `safe_println` to avoid ICE when
// encountering an I/O error during print.
@@ -279,9 +286,6 @@ fn run_compiler(
let sopts = config::build_session_options(&mut early_error_handler, &matches);
- // Set parallel mode before thread pool creation, which will create `Lock`s.
- interface::set_thread_safe_mode(&sopts.unstable_opts);
-
if let Some(ref code) = matches.opt_str("explain") {
handle_explain(&early_error_handler, diagnostics_registry(), code, sopts.color);
return Ok(());
@@ -297,6 +301,7 @@ fn run_compiler(
input: Input::File(PathBuf::new()),
output_file: ofile,
output_dir: odir,
+ ice_file: ice_path().clone(),
file_loader,
locale_resources: DEFAULT_LOCALE_RESOURCES,
lint_caps: Default::default(),
@@ -386,6 +391,10 @@ fn run_compiler(
pretty::print_after_hir_lowering(tcx, *ppm);
Ok(())
})?;
+
+ // Make sure the `output_filenames` query is run for its side
+ // effects of writing the dep-info and reporting errors.
+ queries.global_ctxt()?.enter(|tcx| tcx.output_filenames(()));
} else {
let krate = queries.parse()?.steal();
pretty::print_after_parsing(sess, &krate, *ppm);
@@ -402,15 +411,11 @@ fn run_compiler(
return early_exit();
}
- {
- let plugins = queries.register_plugins()?;
- let (.., lint_store) = &*plugins.borrow();
-
- // Lint plugins are registered; now we can process command line flags.
- if sess.opts.describe_lints {
- describe_lints(sess, lint_store, true);
- return early_exit();
- }
+ if sess.opts.describe_lints {
+ queries
+ .global_ctxt()?
+ .enter(|tcx| describe_lints(sess, unerased_lint_store(tcx), true));
+ return early_exit();
}
// Make sure name resolution and macro expansion is run.
@@ -648,8 +653,6 @@ fn show_md_content_with_pager(content: &str, color: ColorConfig) {
pub fn try_process_rlink(sess: &Session, compiler: &interface::Compiler) -> Compilation {
if sess.opts.unstable_opts.link_only {
if let Input::File(file) = &sess.io.input {
- // FIXME: #![crate_type] and #![crate_name] support not implemented yet
- sess.init_crate_types(collect_crate_types(sess, &[]));
let outputs = compiler.build_output_filenames(sess, &[]);
let rlink_data = fs::read(file).unwrap_or_else(|err| {
sess.emit_fatal(RlinkUnableToRead { err });
@@ -717,10 +720,17 @@ fn print_crate_info(
sess: &Session,
parse_attrs: bool,
) -> Compilation {
- use rustc_session::config::PrintRequest::*;
+ use rustc_session::config::PrintKind::*;
+
+ // This import prevents the following code from using the printing macros
+ // used by the rest of the module. Within this function, we only write to
+ // the output specified by `sess.io.output_file`.
+ #[allow(unused_imports)]
+ use {do_not_use_safe_print as safe_print, do_not_use_safe_print as safe_println};
+
// NativeStaticLibs and LinkArgs are special - printed during linking
// (empty iterator returns true)
- if sess.opts.prints.iter().all(|&p| p == NativeStaticLibs || p == LinkArgs) {
+ if sess.opts.prints.iter().all(|p| p.kind == NativeStaticLibs || p.kind == LinkArgs) {
return Compilation::Continue;
}
@@ -736,17 +746,23 @@ fn print_crate_info(
} else {
None
};
+
for req in &sess.opts.prints {
- match *req {
+ let mut crate_info = String::new();
+ macro println_info($($arg:tt)*) {
+ crate_info.write_fmt(format_args!("{}\n", format_args!($($arg)*))).unwrap()
+ }
+
+ match req.kind {
TargetList => {
let mut targets = rustc_target::spec::TARGETS.to_vec();
targets.sort_unstable();
- safe_println!("{}", targets.join("\n"));
+ println_info!("{}", targets.join("\n"));
}
- Sysroot => safe_println!("{}", sess.sysroot.display()),
- TargetLibdir => safe_println!("{}", sess.target_tlib_path.dir.display()),
+ Sysroot => println_info!("{}", sess.sysroot.display()),
+ TargetLibdir => println_info!("{}", sess.target_tlib_path.dir.display()),
TargetSpec => {
- safe_println!("{}", serde_json::to_string_pretty(&sess.target.to_json()).unwrap());
+ println_info!("{}", serde_json::to_string_pretty(&sess.target.to_json()).unwrap());
}
AllTargetSpecs => {
let mut targets = BTreeMap::new();
@@ -755,26 +771,30 @@ fn print_crate_info(
let target = Target::expect_builtin(&triple);
targets.insert(name, target.to_json());
}
- safe_println!("{}", serde_json::to_string_pretty(&targets).unwrap());
+ println_info!("{}", serde_json::to_string_pretty(&targets).unwrap());
}
- FileNames | CrateName => {
+ FileNames => {
let Some(attrs) = attrs.as_ref() else {
// no crate attributes, print out an error and exit
return Compilation::Continue;
};
let t_outputs = rustc_interface::util::build_output_filenames(attrs, sess);
let id = rustc_session::output::find_crate_name(sess, attrs);
- if *req == PrintRequest::CrateName {
- safe_println!("{id}");
- continue;
- }
let crate_types = collect_crate_types(sess, attrs);
for &style in &crate_types {
let fname =
rustc_session::output::filename_for_input(sess, style, id, &t_outputs);
- safe_println!("{}", fname.as_path().file_name().unwrap().to_string_lossy());
+ println_info!("{}", fname.as_path().file_name().unwrap().to_string_lossy());
}
}
+ CrateName => {
+ let Some(attrs) = attrs.as_ref() else {
+ // no crate attributes, print out an error and exit
+ return Compilation::Continue;
+ };
+ let id = rustc_session::output::find_crate_name(sess, attrs);
+ println_info!("{id}");
+ }
Cfg => {
let mut cfgs = sess
.parse_sess
@@ -806,13 +826,13 @@ fn print_crate_info(
cfgs.sort();
for cfg in cfgs {
- safe_println!("{cfg}");
+ println_info!("{cfg}");
}
}
CallingConventions => {
let mut calling_conventions = rustc_target::spec::abi::all_names();
calling_conventions.sort_unstable();
- safe_println!("{}", calling_conventions.join("\n"));
+ println_info!("{}", calling_conventions.join("\n"));
}
RelocationModels
| CodeModels
@@ -820,7 +840,7 @@ fn print_crate_info(
| TargetCPUs
| StackProtectorStrategies
| TargetFeatures => {
- codegen_backend.print(*req, sess);
+ codegen_backend.print(req, &mut crate_info, sess);
}
// Any output here interferes with Cargo's parsing of other printed output
NativeStaticLibs => {}
@@ -830,7 +850,7 @@ fn print_crate_info(
for split in &[Off, Packed, Unpacked] {
if sess.target.options.supported_split_debuginfo.contains(split) {
- safe_println!("{split}");
+ println_info!("{split}");
}
}
}
@@ -838,7 +858,7 @@ fn print_crate_info(
use rustc_target::spec::current_apple_deployment_target;
if sess.target.is_like_osx {
- safe_println!(
+ println_info!(
"deployment_target={}",
current_apple_deployment_target(&sess.target)
.expect("unknown Apple target OS")
@@ -849,6 +869,8 @@ fn print_crate_info(
}
}
}
+
+ req.out.overwrite(&crate_info, sess);
}
Compilation::Stop
}
@@ -1295,9 +1317,29 @@ pub fn catch_with_exit_code(f: impl FnOnce() -> interface::Result<()>) -> i32 {
}
}
-/// Stores the default panic hook, from before [`install_ice_hook`] was called.
-static DEFAULT_HOOK: OnceLock<Box<dyn Fn(&panic::PanicInfo<'_>) + Sync + Send + 'static>> =
- OnceLock::new();
+pub static ICE_PATH: OnceLock<Option<PathBuf>> = OnceLock::new();
+
+pub fn ice_path() -> &'static Option<PathBuf> {
+ ICE_PATH.get_or_init(|| {
+ if !rustc_feature::UnstableFeatures::from_environment(None).is_nightly_build() {
+ return None;
+ }
+ if let Ok("0") = std::env::var("RUST_BACKTRACE").as_deref() {
+ return None;
+ }
+ let mut path = match std::env::var("RUSTC_ICE").as_deref() {
+ // Explicitly opting out of writing ICEs to disk.
+ Ok("0") => return None,
+ Ok(s) => PathBuf::from(s),
+ Err(_) => std::env::current_dir().unwrap_or_default(),
+ };
+ let now: OffsetDateTime = SystemTime::now().into();
+ let file_now = now.format(&Rfc3339).unwrap_or(String::new());
+ let pid = std::process::id();
+ path.push(format!("rustc-ice-{file_now}-{pid}.txt"));
+ Some(path)
+ })
+}
/// Installs a panic hook that will print the ICE message on unexpected panics.
///
@@ -1321,33 +1363,59 @@ pub fn install_ice_hook(bug_report_url: &'static str, extra_info: fn(&Handler))
std::env::set_var("RUST_BACKTRACE", "full");
}
- let default_hook = DEFAULT_HOOK.get_or_init(panic::take_hook);
-
- panic::set_hook(Box::new(move |info| {
- // If the error was caused by a broken pipe then this is not a bug.
- // Write the error and return immediately. See #98700.
- #[cfg(windows)]
- if let Some(msg) = info.payload().downcast_ref::<String>() {
- if msg.starts_with("failed printing to stdout: ") && msg.ends_with("(os error 232)") {
- // the error code is already going to be reported when the panic unwinds up the stack
- let handler = EarlyErrorHandler::new(ErrorOutputType::default());
- let _ = handler.early_error_no_abort(msg.clone());
- return;
- }
- };
-
- // Invoke the default handler, which prints the actual panic message and optionally a backtrace
- // Don't do this for delayed bugs, which already emit their own more useful backtrace.
- if !info.payload().is::<rustc_errors::DelayedBugPanic>() {
- (*default_hook)(info);
+ panic::update_hook(Box::new(
+ move |default_hook: &(dyn Fn(&PanicInfo<'_>) + Send + Sync + 'static),
+ info: &PanicInfo<'_>| {
+ // If the error was caused by a broken pipe then this is not a bug.
+ // Write the error and return immediately. See #98700.
+ #[cfg(windows)]
+ if let Some(msg) = info.payload().downcast_ref::<String>() {
+ if msg.starts_with("failed printing to stdout: ") && msg.ends_with("(os error 232)")
+ {
+ // the error code is already going to be reported when the panic unwinds up the stack
+ let handler = EarlyErrorHandler::new(ErrorOutputType::default());
+ let _ = handler.early_error_no_abort(msg.clone());
+ return;
+ }
+ };
- // Separate the output with an empty line
- eprintln!();
- }
+ // Invoke the default handler, which prints the actual panic message and optionally a backtrace
+ // Don't do this for delayed bugs, which already emit their own more useful backtrace.
+ if !info.payload().is::<rustc_errors::DelayedBugPanic>() {
+ default_hook(info);
+ // Separate the output with an empty line
+ eprintln!();
+
+ if let Some(ice_path) = ice_path()
+ && let Ok(mut out) =
+ File::options().create(true).append(true).open(&ice_path)
+ {
+ // The current implementation always returns `Some`.
+ let location = info.location().unwrap();
+ let msg = match info.payload().downcast_ref::<&'static str>() {
+ Some(s) => *s,
+ None => match info.payload().downcast_ref::<String>() {
+ Some(s) => &s[..],
+ None => "Box<dyn Any>",
+ },
+ };
+ let thread = std::thread::current();
+ let name = thread.name().unwrap_or("<unnamed>");
+ let _ = write!(
+ &mut out,
+ "thread '{name}' panicked at {location}:\n\
+ {msg}\n\
+ stack backtrace:\n\
+ {:#}",
+ std::backtrace::Backtrace::force_capture()
+ );
+ }
+ }
- // Print the ICE message
- report_ice(info, bug_report_url, extra_info);
- }));
+ // Print the ICE message
+ report_ice(info, bug_report_url, extra_info);
+ },
+ ));
}
/// Prints the ICE message, including query stack, but without backtrace.
@@ -1361,17 +1429,9 @@ pub fn report_ice(info: &panic::PanicInfo<'_>, bug_report_url: &str, extra_info:
rustc_errors::fallback_fluent_bundle(crate::DEFAULT_LOCALE_RESOURCES.to_vec(), false);
let emitter = Box::new(rustc_errors::emitter::EmitterWriter::stderr(
rustc_errors::ColorConfig::Auto,
- None,
- None,
fallback_bundle,
- false,
- false,
- None,
- false,
- false,
- TerminalUrl::No,
));
- let handler = rustc_errors::Handler::with_emitter(true, None, emitter);
+ let handler = rustc_errors::Handler::with_emitter(emitter);
// a .span_bug or .bug call has already printed what
// it wants to print.
@@ -1382,10 +1442,40 @@ pub fn report_ice(info: &panic::PanicInfo<'_>, bug_report_url: &str, extra_info:
}
handler.emit_note(session_diagnostics::IceBugReport { bug_report_url });
- handler.emit_note(session_diagnostics::IceVersion {
- version: util::version_str!().unwrap_or("unknown_version"),
- triple: config::host_triple(),
- });
+
+ let version = util::version_str!().unwrap_or("unknown_version");
+ let triple = config::host_triple();
+
+ static FIRST_PANIC: AtomicBool = AtomicBool::new(true);
+
+ let file = if let Some(path) = ice_path().as_ref() {
+ // Create the ICE dump target file.
+ match crate::fs::File::options().create(true).append(true).open(&path) {
+ Ok(mut file) => {
+ handler
+ .emit_note(session_diagnostics::IcePath { path: path.display().to_string() });
+ if FIRST_PANIC.swap(false, Ordering::SeqCst) {
+ let _ = write!(file, "\n\nrustc version: {version}\nplatform: {triple}");
+ }
+ Some(file)
+ }
+ Err(err) => {
+ // The path ICE couldn't be written to disk, provide feedback to the user as to why.
+ handler.emit_warning(session_diagnostics::IcePathError {
+ path: path.display().to_string(),
+ error: err.to_string(),
+ env_var: std::env::var("RUSTC_ICE")
+ .ok()
+ .map(|env_var| session_diagnostics::IcePathErrorEnv { env_var }),
+ });
+ handler.emit_note(session_diagnostics::IceVersion { version, triple });
+ None
+ }
+ }
+ } else {
+ handler.emit_note(session_diagnostics::IceVersion { version, triple });
+ None
+ };
if let Some((flags, excluded_cargo_defaults)) = extra_compiler_flags() {
handler.emit_note(session_diagnostics::IceFlags { flags: flags.join(" ") });
@@ -1399,7 +1489,7 @@ pub fn report_ice(info: &panic::PanicInfo<'_>, bug_report_url: &str, extra_info:
let num_frames = if backtrace { None } else { Some(2) };
- interface::try_print_query_stack(&handler, num_frames);
+ interface::try_print_query_stack(&handler, num_frames, file);
// We don't trust this callback not to panic itself, so run it at the end after we're sure we've
// printed all the relevant info.
@@ -1453,13 +1543,13 @@ mod signal_handler {
/// When an error signal (such as SIGABRT or SIGSEGV) is delivered to the
/// process, print a stack trace and then exit.
pub(super) fn install() {
+ use std::alloc::{alloc, Layout};
+
unsafe {
- const ALT_STACK_SIZE: usize = libc::MINSIGSTKSZ + 64 * 1024;
+ let alt_stack_size: usize = min_sigstack_size() + 64 * 1024;
let mut alt_stack: libc::stack_t = std::mem::zeroed();
- alt_stack.ss_sp =
- std::alloc::alloc(std::alloc::Layout::from_size_align(ALT_STACK_SIZE, 1).unwrap())
- as *mut libc::c_void;
- alt_stack.ss_size = ALT_STACK_SIZE;
+ alt_stack.ss_sp = alloc(Layout::from_size_align(alt_stack_size, 1).unwrap()).cast();
+ alt_stack.ss_size = alt_stack_size;
libc::sigaltstack(&alt_stack, std::ptr::null_mut());
let mut sa: libc::sigaction = std::mem::zeroed();
@@ -1469,6 +1559,23 @@ mod signal_handler {
libc::sigaction(libc::SIGSEGV, &sa, std::ptr::null_mut());
}
}
+
+ /// Modern kernels on modern hardware can have dynamic signal stack sizes.
+ #[cfg(any(target_os = "linux", target_os = "android"))]
+ fn min_sigstack_size() -> usize {
+ const AT_MINSIGSTKSZ: core::ffi::c_ulong = 51;
+ let dynamic_sigstksz = unsafe { libc::getauxval(AT_MINSIGSTKSZ) };
+ // If getauxval couldn't find the entry, it returns 0,
+ // so take the higher of the "constant" and auxval.
+ // This transparently supports older kernels which don't provide AT_MINSIGSTKSZ
+ libc::MINSIGSTKSZ.max(dynamic_sigstksz as _)
+ }
+
+ /// Not all OS support hardware where this is needed.
+ #[cfg(not(any(target_os = "linux", target_os = "android")))]
+ fn min_sigstack_size() -> usize {
+ libc::MINSIGSTKSZ
+ }
}
#[cfg(not(all(unix, any(target_env = "gnu", target_os = "macos"))))]
diff --git a/compiler/rustc_driver_impl/src/pretty.rs b/compiler/rustc_driver_impl/src/pretty.rs
index 24a5f4030..222c7b5d6 100644
--- a/compiler/rustc_driver_impl/src/pretty.rs
+++ b/compiler/rustc_driver_impl/src/pretty.rs
@@ -1,6 +1,5 @@
//! The various pretty-printing routines.
-use crate::session_diagnostics::UnprettyDumpFail;
use rustc_ast as ast;
use rustc_ast_pretty::pprust;
use rustc_errors::ErrorGuaranteed;
@@ -358,17 +357,7 @@ fn get_source(sess: &Session) -> (String, FileName) {
}
fn write_or_print(out: &str, sess: &Session) {
- match &sess.io.output_file {
- None | Some(OutFileName::Stdout) => print!("{out}"),
- Some(OutFileName::Real(p)) => {
- if let Err(e) = std::fs::write(p, out) {
- sess.emit_fatal(UnprettyDumpFail {
- path: p.display().to_string(),
- err: e.to_string(),
- });
- }
- }
- }
+ sess.io.output_file.as_ref().unwrap_or(&OutFileName::Stdout).overwrite(out, sess);
}
pub fn print_after_parsing(sess: &Session, krate: &ast::Crate, ppm: PpMode) {
diff --git a/compiler/rustc_driver_impl/src/session_diagnostics.rs b/compiler/rustc_driver_impl/src/session_diagnostics.rs
index 638b368f7..5eb587c54 100644
--- a/compiler/rustc_driver_impl/src/session_diagnostics.rs
+++ b/compiler/rustc_driver_impl/src/session_diagnostics.rs
@@ -1,4 +1,4 @@
-use rustc_macros::Diagnostic;
+use rustc_macros::{Diagnostic, Subdiagnostic};
#[derive(Diagnostic)]
#[diag(driver_impl_rlink_unable_to_read)]
@@ -33,13 +33,6 @@ pub(crate) struct RLinkRustcVersionMismatch<'a> {
pub(crate) struct RlinkNotAFile;
#[derive(Diagnostic)]
-#[diag(driver_impl_unpretty_dump_fail)]
-pub(crate) struct UnprettyDumpFail {
- pub path: String,
- pub err: String,
-}
-
-#[derive(Diagnostic)]
#[diag(driver_impl_ice)]
pub(crate) struct Ice;
@@ -57,6 +50,27 @@ pub(crate) struct IceVersion<'a> {
}
#[derive(Diagnostic)]
+#[diag(driver_impl_ice_path)]
+pub(crate) struct IcePath {
+ pub path: String,
+}
+
+#[derive(Diagnostic)]
+#[diag(driver_impl_ice_path_error)]
+pub(crate) struct IcePathError {
+ pub path: String,
+ pub error: String,
+ #[subdiagnostic]
+ pub env_var: Option<IcePathErrorEnv>,
+}
+
+#[derive(Subdiagnostic)]
+#[note(driver_impl_ice_path_error_env)]
+pub(crate) struct IcePathErrorEnv {
+ pub env_var: String,
+}
+
+#[derive(Diagnostic)]
#[diag(driver_impl_ice_flags)]
pub(crate) struct IceFlags {
pub flags: String,
diff --git a/compiler/rustc_error_codes/src/error_codes/E0092.md b/compiler/rustc_error_codes/src/error_codes/E0092.md
index 5cbe2a188..84ec0656d 100644
--- a/compiler/rustc_error_codes/src/error_codes/E0092.md
+++ b/compiler/rustc_error_codes/src/error_codes/E0092.md
@@ -4,6 +4,7 @@ Erroneous code example:
```compile_fail,E0092
#![feature(intrinsics)]
+#![allow(internal_features)]
extern "rust-intrinsic" {
fn atomic_foo(); // error: unrecognized atomic operation
@@ -17,6 +18,7 @@ functions are defined in `compiler/rustc_codegen_llvm/src/intrinsic.rs` and in
```
#![feature(intrinsics)]
+#![allow(internal_features)]
extern "rust-intrinsic" {
fn atomic_fence_seqcst(); // ok!
diff --git a/compiler/rustc_error_codes/src/error_codes/E0093.md b/compiler/rustc_error_codes/src/error_codes/E0093.md
index b1683cf4f..2bda4d74f 100644
--- a/compiler/rustc_error_codes/src/error_codes/E0093.md
+++ b/compiler/rustc_error_codes/src/error_codes/E0093.md
@@ -4,6 +4,7 @@ Erroneous code example:
```compile_fail,E0093
#![feature(intrinsics)]
+#![allow(internal_features)]
extern "rust-intrinsic" {
fn foo(); // error: unrecognized intrinsic function: `foo`
@@ -22,6 +23,7 @@ functions are defined in `compiler/rustc_codegen_llvm/src/intrinsic.rs` and in
```
#![feature(intrinsics)]
+#![allow(internal_features)]
extern "rust-intrinsic" {
fn atomic_fence_seqcst(); // ok!
diff --git a/compiler/rustc_error_codes/src/error_codes/E0094.md b/compiler/rustc_error_codes/src/error_codes/E0094.md
index cc546bdbb..67a8c3678 100644
--- a/compiler/rustc_error_codes/src/error_codes/E0094.md
+++ b/compiler/rustc_error_codes/src/error_codes/E0094.md
@@ -4,6 +4,7 @@ Erroneous code example:
```compile_fail,E0094
#![feature(intrinsics)]
+#![allow(internal_features)]
extern "rust-intrinsic" {
#[rustc_safe_intrinsic]
@@ -18,6 +19,7 @@ Example:
```
#![feature(intrinsics)]
+#![allow(internal_features)]
extern "rust-intrinsic" {
#[rustc_safe_intrinsic]
diff --git a/compiler/rustc_error_codes/src/error_codes/E0132.md b/compiler/rustc_error_codes/src/error_codes/E0132.md
index a23cc988b..51258739b 100644
--- a/compiler/rustc_error_codes/src/error_codes/E0132.md
+++ b/compiler/rustc_error_codes/src/error_codes/E0132.md
@@ -13,7 +13,7 @@ It is not possible to declare type parameters on a function that has the `start`
attribute. Such a function must have the following type signature (for more
information, view [the unstable book][1]):
-[1]: https://doc.rust-lang.org/unstable-book/language-features/lang-items.html#writing-an-executable-without-stdlib
+[1]: https://doc.rust-lang.org/unstable-book/language-features/start.html
```
# let _:
diff --git a/compiler/rustc_error_codes/src/error_codes/E0152.md b/compiler/rustc_error_codes/src/error_codes/E0152.md
index ef17b8b4c..d86276657 100644
--- a/compiler/rustc_error_codes/src/error_codes/E0152.md
+++ b/compiler/rustc_error_codes/src/error_codes/E0152.md
@@ -20,6 +20,6 @@ attributes:
#![no_std]
```
-See also the [unstable book][1].
+See also [this section of the Rustonomicon][beneath std].
-[1]: https://doc.rust-lang.org/unstable-book/language-features/lang-items.html#writing-an-executable-without-stdlib
+[beneath std]: https://doc.rust-lang.org/nomicon/beneath-std.html
diff --git a/compiler/rustc_error_codes/src/error_codes/E0208.md b/compiler/rustc_error_codes/src/error_codes/E0208.md
index c6db9b5d6..2b811b4b8 100644
--- a/compiler/rustc_error_codes/src/error_codes/E0208.md
+++ b/compiler/rustc_error_codes/src/error_codes/E0208.md
@@ -8,6 +8,7 @@ Erroneous code example:
```compile_fail
// NOTE: this feature is perma-unstable and should *only* be used for
// testing purposes.
+#![allow(internal_features)]
#![feature(rustc_attrs)]
#[rustc_variance]
diff --git a/compiler/rustc_error_codes/src/error_codes/E0211.md b/compiler/rustc_error_codes/src/error_codes/E0211.md
index 8c2462ebd..70f14fffa 100644
--- a/compiler/rustc_error_codes/src/error_codes/E0211.md
+++ b/compiler/rustc_error_codes/src/error_codes/E0211.md
@@ -5,6 +5,7 @@ used. Erroneous code examples:
```compile_fail
#![feature(intrinsics)]
+#![allow(internal_features)]
extern "rust-intrinsic" {
#[rustc_safe_intrinsic]
@@ -41,6 +42,7 @@ For the first code example, please check the function definition. Example:
```
#![feature(intrinsics)]
+#![allow(internal_features)]
extern "rust-intrinsic" {
#[rustc_safe_intrinsic]
diff --git a/compiler/rustc_error_codes/src/error_codes/E0230.md b/compiler/rustc_error_codes/src/error_codes/E0230.md
index cfb72e743..87ea90e73 100644
--- a/compiler/rustc_error_codes/src/error_codes/E0230.md
+++ b/compiler/rustc_error_codes/src/error_codes/E0230.md
@@ -5,6 +5,7 @@ compiled:
```compile_fail,E0230
#![feature(rustc_attrs)]
+#![allow(internal_features)]
#[rustc_on_unimplemented = "error on `{Self}` with params `<{A},{B}>`"] // error
trait BadAnnotation<A> {}
diff --git a/compiler/rustc_error_codes/src/error_codes/E0231.md b/compiler/rustc_error_codes/src/error_codes/E0231.md
index 23a0a88ec..a1aaf90df 100644
--- a/compiler/rustc_error_codes/src/error_codes/E0231.md
+++ b/compiler/rustc_error_codes/src/error_codes/E0231.md
@@ -5,6 +5,7 @@ compiled:
```compile_fail,E0231
#![feature(rustc_attrs)]
+#![allow(internal_features)]
#[rustc_on_unimplemented = "error on `{Self}` with params `<{A},{}>`"] // error!
trait BadAnnotation<A> {}
diff --git a/compiler/rustc_error_codes/src/error_codes/E0232.md b/compiler/rustc_error_codes/src/error_codes/E0232.md
index b310caefa..0e50cf589 100644
--- a/compiler/rustc_error_codes/src/error_codes/E0232.md
+++ b/compiler/rustc_error_codes/src/error_codes/E0232.md
@@ -5,6 +5,7 @@ compiled:
```compile_fail,E0232
#![feature(rustc_attrs)]
+#![allow(internal_features)]
#[rustc_on_unimplemented(lorem="")] // error!
trait BadAnnotation {}
diff --git a/compiler/rustc_error_codes/src/error_codes/E0264.md b/compiler/rustc_error_codes/src/error_codes/E0264.md
index e2a27f7b1..d79060762 100644
--- a/compiler/rustc_error_codes/src/error_codes/E0264.md
+++ b/compiler/rustc_error_codes/src/error_codes/E0264.md
@@ -4,6 +4,7 @@ Erroneous code example:
```compile_fail,E0264
#![feature(lang_items)]
+#![allow(internal_features)]
extern "C" {
#[lang = "cake"] // error: unknown external lang item: `cake`
@@ -16,6 +17,7 @@ A list of available external lang items is available in
```
#![feature(lang_items)]
+#![allow(internal_features)]
extern "C" {
#[lang = "panic_impl"] // ok!
diff --git a/compiler/rustc_error_codes/src/error_codes/E0391.md b/compiler/rustc_error_codes/src/error_codes/E0391.md
index dff50ccaa..457fbd002 100644
--- a/compiler/rustc_error_codes/src/error_codes/E0391.md
+++ b/compiler/rustc_error_codes/src/error_codes/E0391.md
@@ -14,3 +14,6 @@ trait SecondTrait : FirstTrait {
The previous example contains a circular dependency between two traits:
`FirstTrait` depends on `SecondTrait` which itself depends on `FirstTrait`.
+
+See https://rustc-dev-guide.rust-lang.org/overview.html#queries and
+https://rustc-dev-guide.rust-lang.org/query.html for more information.
diff --git a/compiler/rustc_error_codes/src/error_codes/E0439.md b/compiler/rustc_error_codes/src/error_codes/E0439.md
index 24268aef2..369226c38 100644
--- a/compiler/rustc_error_codes/src/error_codes/E0439.md
+++ b/compiler/rustc_error_codes/src/error_codes/E0439.md
@@ -16,7 +16,7 @@ extern "platform-intrinsic" {
The `simd_shuffle` function needs the length of the array passed as
last parameter in its name. Example:
-```
+```ignore (no longer compiles)
#![feature(platform_intrinsics)]
extern "platform-intrinsic" {
diff --git a/compiler/rustc_error_codes/src/error_codes/E0539.md b/compiler/rustc_error_codes/src/error_codes/E0539.md
index c53d60a5f..cd28afbc4 100644
--- a/compiler/rustc_error_codes/src/error_codes/E0539.md
+++ b/compiler/rustc_error_codes/src/error_codes/E0539.md
@@ -4,6 +4,7 @@ Erroneous code example:
```compile_fail,E0539
#![feature(staged_api)]
+#![allow(internal_features)]
#![stable(since = "1.0.0", feature = "test")]
#[deprecated(note)] // error!
@@ -28,6 +29,7 @@ To fix these issues you need to give required key-value pairs.
```
#![feature(staged_api)]
+#![allow(internal_features)]
#![stable(since = "1.0.0", feature = "test")]
#[deprecated(since = "1.39.0", note = "reason")] // ok!
diff --git a/compiler/rustc_error_codes/src/error_codes/E0542.md b/compiler/rustc_error_codes/src/error_codes/E0542.md
index c69e57417..be186dbd2 100644
--- a/compiler/rustc_error_codes/src/error_codes/E0542.md
+++ b/compiler/rustc_error_codes/src/error_codes/E0542.md
@@ -4,6 +4,7 @@ Erroneous code example:
```compile_fail,E0542
#![feature(staged_api)]
+#![allow(internal_features)]
#![stable(since = "1.0.0", feature = "test")]
#[stable(feature = "_stable_fn")] // invalid
@@ -23,6 +24,7 @@ To fix this issue, you need to provide the `since` field. Example:
```
#![feature(staged_api)]
+#![allow(internal_features)]
#![stable(since = "1.0.0", feature = "test")]
#[stable(feature = "_stable_fn", since = "1.0.0")] // ok!
diff --git a/compiler/rustc_error_codes/src/error_codes/E0543.md b/compiler/rustc_error_codes/src/error_codes/E0543.md
index d0b2e2f7a..5051c72a1 100644
--- a/compiler/rustc_error_codes/src/error_codes/E0543.md
+++ b/compiler/rustc_error_codes/src/error_codes/E0543.md
@@ -4,6 +4,7 @@ Erroneous code example:
```compile_fail,E0543
#![feature(staged_api)]
+#![allow(internal_features)]
#![stable(since = "1.0.0", feature = "test")]
#[stable(since = "0.1.0", feature = "_deprecated_fn")]
@@ -17,6 +18,7 @@ To fix this issue, you need to provide the `note` field. Example:
```
#![feature(staged_api)]
+#![allow(internal_features)]
#![stable(since = "1.0.0", feature = "test")]
#[stable(since = "0.1.0", feature = "_deprecated_fn")]
diff --git a/compiler/rustc_error_codes/src/error_codes/E0544.md b/compiler/rustc_error_codes/src/error_codes/E0544.md
index 2227e2a06..202401f9d 100644
--- a/compiler/rustc_error_codes/src/error_codes/E0544.md
+++ b/compiler/rustc_error_codes/src/error_codes/E0544.md
@@ -4,6 +4,7 @@ Erroneous code example:
```compile_fail,E0544
#![feature(staged_api)]
+#![allow(internal_features)]
#![stable(since = "1.0.0", feature = "rust1")]
#[stable(feature = "rust1", since = "1.0.0")]
@@ -15,6 +16,7 @@ To fix this issue, ensure that each item has at most one stability attribute.
```
#![feature(staged_api)]
+#![allow(internal_features)]
#![stable(since = "1.0.0", feature = "rust1")]
#[stable(feature = "test", since = "2.0.0")] // ok!
diff --git a/compiler/rustc_error_codes/src/error_codes/E0545.md b/compiler/rustc_error_codes/src/error_codes/E0545.md
index 7aba084f4..880378ebd 100644
--- a/compiler/rustc_error_codes/src/error_codes/E0545.md
+++ b/compiler/rustc_error_codes/src/error_codes/E0545.md
@@ -4,6 +4,7 @@ Erroneous code example:
```compile_fail,E0545
#![feature(staged_api)]
+#![allow(internal_features)]
#![stable(since = "1.0.0", feature = "test")]
#[unstable(feature = "_unstable_fn", issue = "0")] // invalid
@@ -18,6 +19,7 @@ Example:
```
#![feature(staged_api)]
+#![allow(internal_features)]
#![stable(since = "1.0.0", feature = "test")]
#[unstable(feature = "_unstable_fn", issue = "none")] // ok!
diff --git a/compiler/rustc_error_codes/src/error_codes/E0546.md b/compiler/rustc_error_codes/src/error_codes/E0546.md
index a33dcb7a9..8c98eaa07 100644
--- a/compiler/rustc_error_codes/src/error_codes/E0546.md
+++ b/compiler/rustc_error_codes/src/error_codes/E0546.md
@@ -4,6 +4,7 @@ Erroneous code example:
```compile_fail,E0546
#![feature(staged_api)]
+#![allow(internal_features)]
#![stable(since = "1.0.0", feature = "test")]
#[unstable(issue = "none")] // invalid
@@ -17,6 +18,7 @@ To fix this issue, you need to provide the `feature` field. Example:
```
#![feature(staged_api)]
+#![allow(internal_features)]
#![stable(since = "1.0.0", feature = "test")]
#[unstable(feature = "unstable_fn", issue = "none")] // ok!
diff --git a/compiler/rustc_error_codes/src/error_codes/E0547.md b/compiler/rustc_error_codes/src/error_codes/E0547.md
index 4950325df..5b0f7cd44 100644
--- a/compiler/rustc_error_codes/src/error_codes/E0547.md
+++ b/compiler/rustc_error_codes/src/error_codes/E0547.md
@@ -4,6 +4,7 @@ Erroneous code example:
```compile_fail,E0547
#![feature(staged_api)]
+#![allow(internal_features)]
#![stable(since = "1.0.0", feature = "test")]
#[unstable(feature = "_unstable_fn")] // invalid
@@ -17,6 +18,7 @@ To fix this issue, you need to provide the `issue` field. Example:
```
#![feature(staged_api)]
+#![allow(internal_features)]
#![stable(since = "1.0.0", feature = "test")]
#[unstable(feature = "_unstable_fn", issue = "none")] // ok!
diff --git a/compiler/rustc_error_codes/src/error_codes/E0549.md b/compiler/rustc_error_codes/src/error_codes/E0549.md
index 70e458a98..cc6a47fe2 100644
--- a/compiler/rustc_error_codes/src/error_codes/E0549.md
+++ b/compiler/rustc_error_codes/src/error_codes/E0549.md
@@ -5,6 +5,7 @@ Erroneous code example:
```compile_fail,E0549
#![feature(staged_api)]
+#![allow(internal_features)]
#![stable(since = "1.0.0", feature = "test")]
#[deprecated(
@@ -19,6 +20,7 @@ Example:
```
#![feature(staged_api)]
+#![allow(internal_features)]
#![stable(since = "1.0.0", feature = "test")]
#[stable(since = "1.0.0", feature = "test")]
diff --git a/compiler/rustc_error_codes/src/error_codes/E0577.md b/compiler/rustc_error_codes/src/error_codes/E0577.md
index eba2d3b14..383ca61f6 100644
--- a/compiler/rustc_error_codes/src/error_codes/E0577.md
+++ b/compiler/rustc_error_codes/src/error_codes/E0577.md
@@ -3,7 +3,7 @@ Something other than a module was found in visibility scope.
Erroneous code example:
```compile_fail,E0577,edition2018
-pub struct Sea;
+pub enum Sea {}
pub (in crate::Sea) struct Shark; // error!
diff --git a/compiler/rustc_error_codes/src/error_codes/E0622.md b/compiler/rustc_error_codes/src/error_codes/E0622.md
index 3ba3ed10e..5d71ee994 100644
--- a/compiler/rustc_error_codes/src/error_codes/E0622.md
+++ b/compiler/rustc_error_codes/src/error_codes/E0622.md
@@ -4,6 +4,8 @@ Erroneous code example:
```compile_fail,E0622
#![feature(intrinsics)]
+#![allow(internal_features)]
+
extern "rust-intrinsic" {
pub static breakpoint: fn(); // error: intrinsic must be a function
}
@@ -17,6 +19,8 @@ error, just declare a function. Example:
```no_run
#![feature(intrinsics)]
+#![allow(internal_features)]
+
extern "rust-intrinsic" {
pub fn breakpoint(); // ok!
}
diff --git a/compiler/rustc_error_codes/src/error_codes/E0691.md b/compiler/rustc_error_codes/src/error_codes/E0691.md
index 60060cacb..483c74c0f 100644
--- a/compiler/rustc_error_codes/src/error_codes/E0691.md
+++ b/compiler/rustc_error_codes/src/error_codes/E0691.md
@@ -11,7 +11,8 @@ struct ForceAlign32;
#[repr(transparent)]
struct Wrapper(f32, ForceAlign32); // error: zero-sized field in transparent
- // struct has alignment larger than 1
+ // struct has alignment of 32, which
+ // is larger than 1
```
A transparent struct, enum, or union is supposed to be represented exactly like
diff --git a/compiler/rustc_error_codes/src/error_codes/E0773.md b/compiler/rustc_error_codes/src/error_codes/E0773.md
index b19a58bf3..aa65a475a 100644
--- a/compiler/rustc_error_codes/src/error_codes/E0773.md
+++ b/compiler/rustc_error_codes/src/error_codes/E0773.md
@@ -5,6 +5,7 @@ Erroneous code example:
```compile_fail,E0773
#![feature(decl_macro)]
#![feature(rustc_attrs)]
+#![allow(internal_features)]
#[rustc_builtin_macro]
pub macro test($item:item) {
@@ -24,6 +25,7 @@ To fix the issue, remove the duplicate declaration:
```
#![feature(decl_macro)]
#![feature(rustc_attrs)]
+#![allow(internal_features)]
#[rustc_builtin_macro]
pub macro test($item:item) {
diff --git a/compiler/rustc_error_codes/src/error_codes/E0789.md b/compiler/rustc_error_codes/src/error_codes/E0789.md
index 89b7cd422..2c0018cc7 100644
--- a/compiler/rustc_error_codes/src/error_codes/E0789.md
+++ b/compiler/rustc_error_codes/src/error_codes/E0789.md
@@ -10,6 +10,7 @@ Erroneous code example:
// used outside of the compiler and standard library.
#![feature(rustc_attrs)]
#![feature(staged_api)]
+#![allow(internal_features)]
#![unstable(feature = "foo_module", reason = "...", issue = "123")]
diff --git a/compiler/rustc_error_messages/src/lib.rs b/compiler/rustc_error_messages/src/lib.rs
index 51e1fe531..3bf155050 100644
--- a/compiler/rustc_error_messages/src/lib.rs
+++ b/compiler/rustc_error_messages/src/lib.rs
@@ -4,6 +4,7 @@
#![feature(type_alias_impl_trait)]
#![deny(rustc::untranslatable_diagnostic)]
#![deny(rustc::diagnostic_outside_of_impl)]
+#![cfg_attr(not(bootstrap), allow(internal_features))]
#[macro_use]
extern crate tracing;
@@ -71,17 +72,17 @@ pub enum TranslationBundleError {
impl fmt::Display for TranslationBundleError {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
match self {
- TranslationBundleError::ReadFtl(e) => write!(f, "could not read ftl file: {}", e),
+ TranslationBundleError::ReadFtl(e) => write!(f, "could not read ftl file: {e}"),
TranslationBundleError::ParseFtl(e) => {
- write!(f, "could not parse ftl file: {}", e)
+ write!(f, "could not parse ftl file: {e}")
}
- TranslationBundleError::AddResource(e) => write!(f, "failed to add resource: {}", e),
+ TranslationBundleError::AddResource(e) => write!(f, "failed to add resource: {e}"),
TranslationBundleError::MissingLocale => write!(f, "missing locale directory"),
TranslationBundleError::ReadLocalesDir(e) => {
- write!(f, "could not read locales dir: {}", e)
+ write!(f, "could not read locales dir: {e}")
}
TranslationBundleError::ReadLocalesDirEntry(e) => {
- write!(f, "could not read locales dir entry: {}", e)
+ write!(f, "could not read locales dir entry: {e}")
}
TranslationBundleError::LocaleIsNotDir => {
write!(f, "`$sysroot/share/locales/$locale` is not a directory")
@@ -354,6 +355,13 @@ impl DiagnosticMessage {
}
}
}
+
+ pub fn as_str(&self) -> Option<&str> {
+ match self {
+ DiagnosticMessage::Eager(s) | DiagnosticMessage::Str(s) => Some(s),
+ DiagnosticMessage::FluentIdentifier(_, _) => None,
+ }
+ }
}
impl From<String> for DiagnosticMessage {
@@ -526,6 +534,14 @@ impl MultiSpan {
pub fn has_span_labels(&self) -> bool {
self.span_labels.iter().any(|(sp, _)| !sp.is_dummy())
}
+
+ /// Clone this `MultiSpan` without keeping any of the span labels - sometimes a `MultiSpan` is
+ /// to be re-used in another diagnostic, but includes `span_labels` which have translated
+ /// messages. These translated messages would fail to translate without their diagnostic
+ /// arguments which are unlikely to be cloned alongside the `Span`.
+ pub fn clone_ignoring_labels(&self) -> Self {
+ Self { primary_spans: self.primary_spans.clone(), ..MultiSpan::new() }
+ }
}
impl From<Span> for MultiSpan {
diff --git a/compiler/rustc_errors/Cargo.toml b/compiler/rustc_errors/Cargo.toml
index e8bcd7c11..faab9f09d 100644
--- a/compiler/rustc_errors/Cargo.toml
+++ b/compiler/rustc_errors/Cargo.toml
@@ -25,6 +25,7 @@ annotate-snippets = "0.9"
termize = "0.1.1"
serde = { version = "1.0.125", features = [ "derive" ] }
serde_json = "1.0.59"
+derive_setters = "0.1.6"
[target.'cfg(windows)'.dependencies.windows]
version = "0.48.0"
diff --git a/compiler/rustc_errors/messages.ftl b/compiler/rustc_errors/messages.ftl
index 8e8223c3c..d68dba0be 100644
--- a/compiler/rustc_errors/messages.ftl
+++ b/compiler/rustc_errors/messages.ftl
@@ -1,3 +1,25 @@
+errors_delayed_at_with_newline =
+ delayed at {$emitted_at}
+ {$note}
+
+errors_delayed_at_without_newline =
+ delayed at {$emitted_at} - {$note}
+
+errors_expected_lifetime_parameter =
+ expected lifetime {$count ->
+ [1] parameter
+ *[other] parameters
+ }
+
+errors_indicate_anonymous_lifetime =
+ indicate the anonymous {$count ->
+ [1] lifetime
+ *[other] lifetimes
+ }
+
+errors_invalid_flushed_delayed_diagnostic_level =
+ `flushed_delayed` got diagnostic with level {$level}, instead of the expected `DelayedBug`
+
errors_target_inconsistent_architecture =
inconsistent target specification: "data-layout" claims architecture is {$dl}-endian, while "target-endian" is `{$target}`
diff --git a/compiler/rustc_errors/src/annotate_snippet_emitter_writer.rs b/compiler/rustc_errors/src/annotate_snippet_emitter_writer.rs
index 9872b3bda..a88fba6da 100644
--- a/compiler/rustc_errors/src/annotate_snippet_emitter_writer.rs
+++ b/compiler/rustc_errors/src/annotate_snippet_emitter_writer.rs
@@ -157,10 +157,8 @@ impl AnnotateSnippetEmitterWriter {
{
annotated_files.swap(0, pos);
}
- // owned: line source, line index, annotations
- type Owned = (String, usize, Vec<crate::snippet::Annotation>);
- let filename = source_map.filename_for_diagnostics(&primary_lo.file.name);
- let origin = filename.to_string_lossy();
+ // owned: file name, line source, line index, annotations
+ type Owned = (String, String, usize, Vec<crate::snippet::Annotation>);
let annotated_files: Vec<Owned> = annotated_files
.into_iter()
.flat_map(|annotated_file| {
@@ -169,7 +167,15 @@ impl AnnotateSnippetEmitterWriter {
.lines
.into_iter()
.map(|line| {
- (source_string(file.clone(), &line), line.line_index, line.annotations)
+ // Ensure the source file is present before we try
+ // to load a string from it.
+ source_map.ensure_source_file_source_present(file.clone());
+ (
+ format!("{}", source_map.filename_for_diagnostics(&file.name)),
+ source_string(file.clone(), &line),
+ line.line_index,
+ line.annotations,
+ )
})
.collect::<Vec<Owned>>()
})
@@ -192,11 +198,11 @@ impl AnnotateSnippetEmitterWriter {
},
slices: annotated_files
.iter()
- .map(|(source, line_index, annotations)| {
+ .map(|(file_name, source, line_index, annotations)| {
Slice {
source,
line_start: *line_index,
- origin: Some(&origin),
+ origin: Some(&file_name),
// FIXME(#59346): Not really sure when `fold` should be true or false
fold: false,
annotations: annotations
diff --git a/compiler/rustc_errors/src/diagnostic.rs b/compiler/rustc_errors/src/diagnostic.rs
index ed0d06ed0..a96e317df 100644
--- a/compiler/rustc_errors/src/diagnostic.rs
+++ b/compiler/rustc_errors/src/diagnostic.rs
@@ -420,13 +420,13 @@ impl Diagnostic {
let expected_label = if expected_label.is_empty() {
"expected".to_string()
} else {
- format!("expected {}", expected_label)
+ format!("expected {expected_label}")
};
let found_label = found_label.to_string();
let found_label = if found_label.is_empty() {
"found".to_string()
} else {
- format!("found {}", found_label)
+ format!("found {found_label}")
};
let (found_padding, expected_padding) = if expected_label.len() > found_label.len() {
(expected_label.len() - found_label.len(), 0)
@@ -439,13 +439,13 @@ impl Diagnostic {
StringPart::Normal(ref s) => (s.to_owned(), Style::NoStyle),
StringPart::Highlighted(ref s) => (s.to_owned(), Style::Highlight),
}));
- msg.push((format!("`{}\n", expected_extra), Style::NoStyle));
+ msg.push((format!("`{expected_extra}\n"), Style::NoStyle));
msg.push((format!("{}{} `", " ".repeat(found_padding), found_label), Style::NoStyle));
msg.extend(found.0.iter().map(|x| match *x {
StringPart::Normal(ref s) => (s.to_owned(), Style::NoStyle),
StringPart::Highlighted(ref s) => (s.to_owned(), Style::Highlight),
}));
- msg.push((format!("`{}", found_extra), Style::NoStyle));
+ msg.push((format!("`{found_extra}"), Style::NoStyle));
// For now, just attach these as notes.
self.highlighted_note(msg);
@@ -454,7 +454,7 @@ impl Diagnostic {
pub fn note_trait_signature(&mut self, name: Symbol, signature: String) -> &mut Self {
self.highlighted_note(vec![
- (format!("`{}` from trait: `", name), Style::NoStyle),
+ (format!("`{name}` from trait: `"), Style::NoStyle),
(signature, Style::Highlight),
("`".to_string(), Style::NoStyle),
]);
diff --git a/compiler/rustc_errors/src/diagnostic_builder.rs b/compiler/rustc_errors/src/diagnostic_builder.rs
index 08ff2cfba..5e23ae655 100644
--- a/compiler/rustc_errors/src/diagnostic_builder.rs
+++ b/compiler/rustc_errors/src/diagnostic_builder.rs
@@ -536,7 +536,9 @@ impl<'a, G: EmissionGuarantee> DiagnosticBuilder<'a, G> {
}
};
- if handler.flags.dont_buffer_diagnostics || handler.flags.treat_err_as_bug.is_some() {
+ if handler.inner.lock().flags.dont_buffer_diagnostics
+ || handler.inner.lock().flags.treat_err_as_bug.is_some()
+ {
self.emit();
return None;
}
diff --git a/compiler/rustc_errors/src/diagnostic_impls.rs b/compiler/rustc_errors/src/diagnostic_impls.rs
index 10fe7fc74..a170e3a89 100644
--- a/compiler/rustc_errors/src/diagnostic_impls.rs
+++ b/compiler/rustc_errors/src/diagnostic_impls.rs
@@ -1,3 +1,4 @@
+use crate::diagnostic::DiagnosticLocation;
use crate::{fluent_generated as fluent, AddToDiagnostic};
use crate::{DiagnosticArgValue, DiagnosticBuilder, Handler, IntoDiagnostic, IntoDiagnosticArg};
use rustc_ast as ast;
@@ -10,6 +11,7 @@ use rustc_span::Span;
use rustc_target::abi::TargetDataLayoutErrors;
use rustc_target::spec::{PanicStrategy, SplitDebuginfo, StackProtector, TargetTriple};
use rustc_type_ir as type_ir;
+use std::backtrace::Backtrace;
use std::borrow::Cow;
use std::fmt;
use std::num::ParseIntError;
@@ -102,7 +104,7 @@ impl IntoDiagnosticArg for bool {
impl IntoDiagnosticArg for char {
fn into_diagnostic_arg(self) -> DiagnosticArgValue<'static> {
- DiagnosticArgValue::Str(Cow::Owned(format!("{:?}", self)))
+ DiagnosticArgValue::Str(Cow::Owned(format!("{self:?}")))
}
}
@@ -164,6 +166,12 @@ impl IntoDiagnosticArg for hir::ConstContext {
}
}
+impl IntoDiagnosticArg for ast::Expr {
+ fn into_diagnostic_arg(self) -> DiagnosticArgValue<'static> {
+ DiagnosticArgValue::Str(Cow::Owned(pprust::expr_to_string(&self)))
+ }
+}
+
impl IntoDiagnosticArg for ast::Path {
fn into_diagnostic_arg(self) -> DiagnosticArgValue<'static> {
DiagnosticArgValue::Str(Cow::Owned(pprust::path_to_string(&self)))
@@ -311,3 +319,62 @@ pub enum LabelKind {
Label,
Help,
}
+
+#[derive(Subdiagnostic)]
+#[label(errors_expected_lifetime_parameter)]
+pub struct ExpectedLifetimeParameter {
+ #[primary_span]
+ pub span: Span,
+ pub count: usize,
+}
+
+#[derive(Subdiagnostic)]
+#[note(errors_delayed_at_with_newline)]
+pub struct DelayedAtWithNewline {
+ #[primary_span]
+ pub span: Span,
+ pub emitted_at: DiagnosticLocation,
+ pub note: Backtrace,
+}
+#[derive(Subdiagnostic)]
+#[note(errors_delayed_at_without_newline)]
+pub struct DelayedAtWithoutNewline {
+ #[primary_span]
+ pub span: Span,
+ pub emitted_at: DiagnosticLocation,
+ pub note: Backtrace,
+}
+
+impl IntoDiagnosticArg for DiagnosticLocation {
+ fn into_diagnostic_arg(self) -> DiagnosticArgValue<'static> {
+ DiagnosticArgValue::Str(Cow::from(self.to_string()))
+ }
+}
+
+impl IntoDiagnosticArg for Backtrace {
+ fn into_diagnostic_arg(self) -> DiagnosticArgValue<'static> {
+ DiagnosticArgValue::Str(Cow::from(self.to_string()))
+ }
+}
+
+#[derive(Subdiagnostic)]
+#[note(errors_invalid_flushed_delayed_diagnostic_level)]
+pub struct InvalidFlushedDelayedDiagnosticLevel {
+ #[primary_span]
+ pub span: Span,
+ pub level: rustc_errors::Level,
+}
+impl IntoDiagnosticArg for rustc_errors::Level {
+ fn into_diagnostic_arg(self) -> DiagnosticArgValue<'static> {
+ DiagnosticArgValue::Str(Cow::from(self.to_string()))
+ }
+}
+
+#[derive(Subdiagnostic)]
+#[suggestion(errors_indicate_anonymous_lifetime, code = "{suggestion}", style = "verbose")]
+pub struct IndicateAnonymousLifetime {
+ #[primary_span]
+ pub span: Span,
+ pub count: usize,
+ pub suggestion: String,
+}
diff --git a/compiler/rustc_errors/src/emitter.rs b/compiler/rustc_errors/src/emitter.rs
index 9d4d159fd..0cae06881 100644
--- a/compiler/rustc_errors/src/emitter.rs
+++ b/compiler/rustc_errors/src/emitter.rs
@@ -7,8 +7,6 @@
//!
//! The output types are defined in `rustc_session::config::ErrorOutputType`.
-use Destination::*;
-
use rustc_span::source_map::SourceMap;
use rustc_span::{FileLines, SourceFile, Span};
@@ -24,6 +22,7 @@ use crate::{
};
use rustc_lint_defs::pluralize;
+use derive_setters::Setters;
use rustc_data_structures::fx::{FxHashMap, FxIndexMap};
use rustc_data_structures::sync::Lrc;
use rustc_error_messages::{FluentArgs, SpanLabel};
@@ -35,8 +34,8 @@ use std::io::prelude::*;
use std::io::{self, IsTerminal};
use std::iter;
use std::path::Path;
-use termcolor::{Ansi, BufferWriter, ColorChoice, ColorSpec, StandardStream};
-use termcolor::{Buffer, Color, WriteColor};
+use termcolor::{Ansi, Buffer, BufferWriter, ColorChoice, ColorSpec, StandardStream};
+use termcolor::{Color, WriteColor};
/// Default column width, used in tests and when terminal dimensions cannot be determined.
const DEFAULT_COLUMN_WIDTH: usize = 140;
@@ -60,31 +59,15 @@ impl HumanReadableErrorType {
}
pub fn new_emitter(
self,
- dst: Box<dyn Write + Send>,
- source_map: Option<Lrc<SourceMap>>,
- bundle: Option<Lrc<FluentBundle>>,
+ mut dst: Box<dyn WriteColor + Send>,
fallback_bundle: LazyFallbackBundle,
- teach: bool,
- diagnostic_width: Option<usize>,
- macro_backtrace: bool,
- track_diagnostics: bool,
- terminal_url: TerminalUrl,
) -> EmitterWriter {
let (short, color_config) = self.unzip();
let color = color_config.suggests_using_colors();
- EmitterWriter::new(
- dst,
- source_map,
- bundle,
- fallback_bundle,
- short,
- teach,
- color,
- diagnostic_width,
- macro_backtrace,
- track_diagnostics,
- terminal_url,
- )
+ if !dst.supports_color() && color {
+ dst = Box::new(Ansi::new(dst));
+ }
+ EmitterWriter::new(dst, fallback_bundle).short_message(short)
}
}
@@ -279,12 +262,12 @@ pub trait Emitter: Translate {
let msg = if substitution.is_empty() || sugg.style.hide_inline() {
// This substitution is only removal OR we explicitly don't want to show the
// code inline (`hide_inline`). Therefore, we don't show the substitution.
- format!("help: {}", &msg)
+ format!("help: {msg}")
} else {
// Show the default suggestion text with the substitution
format!(
"help: {}{}: `{}`",
- &msg,
+ msg,
if self.source_map().is_some_and(|sm| is_case_difference(
sm,
substitution,
@@ -639,10 +622,13 @@ impl ColorConfig {
}
/// Handles the writing of `HumanReadableErrorType::Default` and `HumanReadableErrorType::Short`
+#[derive(Setters)]
pub struct EmitterWriter {
+ #[setters(skip)]
dst: Destination,
sm: Option<Lrc<SourceMap>>,
fluent_bundle: Option<Lrc<FluentBundle>>,
+ #[setters(skip)]
fallback_bundle: LazyFallbackBundle,
short_message: bool,
teach: bool,
@@ -662,65 +648,32 @@ pub struct FileWithAnnotatedLines {
}
impl EmitterWriter {
- pub fn stderr(
- color_config: ColorConfig,
- source_map: Option<Lrc<SourceMap>>,
- fluent_bundle: Option<Lrc<FluentBundle>>,
- fallback_bundle: LazyFallbackBundle,
- short_message: bool,
- teach: bool,
- diagnostic_width: Option<usize>,
- macro_backtrace: bool,
- track_diagnostics: bool,
- terminal_url: TerminalUrl,
- ) -> EmitterWriter {
- let dst = Destination::from_stderr(color_config);
+ pub fn stderr(color_config: ColorConfig, fallback_bundle: LazyFallbackBundle) -> EmitterWriter {
+ let dst = from_stderr(color_config);
+ Self::create(dst, fallback_bundle)
+ }
+
+ fn create(dst: Destination, fallback_bundle: LazyFallbackBundle) -> EmitterWriter {
EmitterWriter {
dst,
- sm: source_map,
- fluent_bundle,
+ sm: None,
+ fluent_bundle: None,
fallback_bundle,
- short_message,
- teach,
+ short_message: false,
+ teach: false,
ui_testing: false,
- diagnostic_width,
- macro_backtrace,
- track_diagnostics,
- terminal_url,
+ diagnostic_width: None,
+ macro_backtrace: false,
+ track_diagnostics: false,
+ terminal_url: TerminalUrl::No,
}
}
pub fn new(
- dst: Box<dyn Write + Send>,
- source_map: Option<Lrc<SourceMap>>,
- fluent_bundle: Option<Lrc<FluentBundle>>,
+ dst: Box<dyn WriteColor + Send>,
fallback_bundle: LazyFallbackBundle,
- short_message: bool,
- teach: bool,
- colored: bool,
- diagnostic_width: Option<usize>,
- macro_backtrace: bool,
- track_diagnostics: bool,
- terminal_url: TerminalUrl,
) -> EmitterWriter {
- EmitterWriter {
- dst: Raw(dst, colored),
- sm: source_map,
- fluent_bundle,
- fallback_bundle,
- short_message,
- teach,
- ui_testing: false,
- diagnostic_width,
- macro_backtrace,
- track_diagnostics,
- terminal_url,
- }
- }
-
- pub fn ui_testing(mut self, ui_testing: bool) -> Self {
- self.ui_testing = ui_testing;
- self
+ Self::create(dst, fallback_bundle)
}
fn maybe_anonymized(&self, line_num: usize) -> Cow<'static, str> {
@@ -1982,7 +1935,7 @@ impl EmitterWriter {
// We special case `#[derive(_)]\n` and other attribute suggestions, because those
// are the ones where context is most useful.
let file_lines = sm
- .span_to_lines(span.primary_span().unwrap().shrink_to_hi())
+ .span_to_lines(parts[0].span.shrink_to_hi())
.expect("span_to_lines failed when emitting suggestion");
let line_num = sm.lookup_char_pos(parts[0].span.lo()).line;
if let Some(line) = file_lines.file.get_line(line_num - 1) {
@@ -2145,7 +2098,7 @@ impl EmitterWriter {
&mut self.dst,
self.short_message,
) {
- panic!("failed to emit error: {}", e)
+ panic!("failed to emit error: {e}")
}
}
if !self.short_message {
@@ -2161,7 +2114,7 @@ impl EmitterWriter {
true,
None,
) {
- panic!("failed to emit error: {}", err);
+ panic!("failed to emit error: {err}");
}
}
for sugg in suggestions {
@@ -2180,7 +2133,7 @@ impl EmitterWriter {
true,
None,
) {
- panic!("failed to emit error: {}", e);
+ panic!("failed to emit error: {e}");
}
}
SuggestionStyle::HideCodeInline
@@ -2193,22 +2146,21 @@ impl EmitterWriter {
&Level::Help,
max_line_num_len,
) {
- panic!("failed to emit error: {}", e);
+ panic!("failed to emit error: {e}");
}
}
}
}
}
}
- Err(e) => panic!("failed to emit error: {}", e),
+ Err(e) => panic!("failed to emit error: {e}"),
}
- let mut dst = self.dst.writable();
- match writeln!(dst) {
- Err(e) => panic!("failed to emit error: {}", e),
+ match writeln!(self.dst) {
+ Err(e) => panic!("failed to emit error: {e}"),
_ => {
- if let Err(e) = dst.flush() {
- panic!("failed to emit error: {}", e)
+ if let Err(e) = self.dst.flush() {
+ panic!("failed to emit error: {e}")
}
}
}
@@ -2618,8 +2570,6 @@ fn emit_to_destination(
) -> io::Result<()> {
use crate::lock;
- let mut dst = dst.writable();
-
// In order to prevent error message interleaving, where multiple error lines get intermixed
// when multiple compiler processes error simultaneously, we emit errors with additional
// steps.
@@ -2635,7 +2585,8 @@ fn emit_to_destination(
let _buffer_lock = lock::acquire_global_lock("rustc_errors");
for (pos, line) in rendered_buffer.iter().enumerate() {
for part in line {
- dst.apply_style(*lvl, part.style)?;
+ let style = part.style.color_spec(*lvl);
+ dst.set_color(&style)?;
write!(dst, "{}", part.text)?;
dst.reset()?;
}
@@ -2647,61 +2598,69 @@ fn emit_to_destination(
Ok(())
}
-pub enum Destination {
- Terminal(StandardStream),
- Buffered(BufferWriter),
- // The bool denotes whether we should be emitting ansi color codes or not
- Raw(Box<(dyn Write + Send)>, bool),
-}
+pub type Destination = Box<(dyn WriteColor + Send)>;
-pub enum WritableDst<'a> {
- Terminal(&'a mut StandardStream),
- Buffered(&'a mut BufferWriter, Buffer),
- Raw(&'a mut (dyn Write + Send)),
- ColoredRaw(Ansi<&'a mut (dyn Write + Send)>),
+struct Buffy {
+ buffer_writer: BufferWriter,
+ buffer: Buffer,
}
-impl Destination {
- fn from_stderr(color: ColorConfig) -> Destination {
- let choice = color.to_color_choice();
- // On Windows we'll be performing global synchronization on the entire
- // system for emitting rustc errors, so there's no need to buffer
- // anything.
- //
- // On non-Windows we rely on the atomicity of `write` to ensure errors
- // don't get all jumbled up.
- if cfg!(windows) {
- Terminal(StandardStream::stderr(choice))
- } else {
- Buffered(BufferWriter::stderr(choice))
- }
+impl Write for Buffy {
+ fn write(&mut self, buf: &[u8]) -> io::Result<usize> {
+ self.buffer.write(buf)
}
- fn writable(&mut self) -> WritableDst<'_> {
- match *self {
- Destination::Terminal(ref mut t) => WritableDst::Terminal(t),
- Destination::Buffered(ref mut t) => {
- let buf = t.buffer();
- WritableDst::Buffered(t, buf)
- }
- Destination::Raw(ref mut t, false) => WritableDst::Raw(t),
- Destination::Raw(ref mut t, true) => WritableDst::ColoredRaw(Ansi::new(t)),
+ fn flush(&mut self) -> io::Result<()> {
+ self.buffer_writer.print(&self.buffer)?;
+ self.buffer.clear();
+ Ok(())
+ }
+}
+
+impl Drop for Buffy {
+ fn drop(&mut self) {
+ if !self.buffer.is_empty() {
+ self.flush().unwrap();
+ panic!("buffers need to be flushed in order to print their contents");
}
}
+}
+impl WriteColor for Buffy {
fn supports_color(&self) -> bool {
- match *self {
- Self::Terminal(ref stream) => stream.supports_color(),
- Self::Buffered(ref buffer) => buffer.buffer().supports_color(),
- Self::Raw(_, supports_color) => supports_color,
- }
+ self.buffer.supports_color()
+ }
+
+ fn set_color(&mut self, spec: &ColorSpec) -> io::Result<()> {
+ self.buffer.set_color(spec)
+ }
+
+ fn reset(&mut self) -> io::Result<()> {
+ self.buffer.reset()
+ }
+}
+
+fn from_stderr(color: ColorConfig) -> Destination {
+ let choice = color.to_color_choice();
+ // On Windows we'll be performing global synchronization on the entire
+ // system for emitting rustc errors, so there's no need to buffer
+ // anything.
+ //
+ // On non-Windows we rely on the atomicity of `write` to ensure errors
+ // don't get all jumbled up.
+ if cfg!(windows) {
+ Box::new(StandardStream::stderr(choice))
+ } else {
+ let buffer_writer = BufferWriter::stderr(choice);
+ let buffer = buffer_writer.buffer();
+ Box::new(Buffy { buffer_writer, buffer })
}
}
-impl<'a> WritableDst<'a> {
- fn apply_style(&mut self, lvl: Level, style: Style) -> io::Result<()> {
+impl Style {
+ fn color_spec(&self, lvl: Level) -> ColorSpec {
let mut spec = ColorSpec::new();
- match style {
+ match self {
Style::Addition => {
spec.set_fg(Some(Color::Green)).set_intense(true);
}
@@ -2746,53 +2705,7 @@ impl<'a> WritableDst<'a> {
spec.set_bold(true);
}
}
- self.set_color(&spec)
- }
-
- fn set_color(&mut self, color: &ColorSpec) -> io::Result<()> {
- match *self {
- WritableDst::Terminal(ref mut t) => t.set_color(color),
- WritableDst::Buffered(_, ref mut t) => t.set_color(color),
- WritableDst::ColoredRaw(ref mut t) => t.set_color(color),
- WritableDst::Raw(_) => Ok(()),
- }
- }
-
- fn reset(&mut self) -> io::Result<()> {
- match *self {
- WritableDst::Terminal(ref mut t) => t.reset(),
- WritableDst::Buffered(_, ref mut t) => t.reset(),
- WritableDst::ColoredRaw(ref mut t) => t.reset(),
- WritableDst::Raw(_) => Ok(()),
- }
- }
-}
-
-impl<'a> Write for WritableDst<'a> {
- fn write(&mut self, bytes: &[u8]) -> io::Result<usize> {
- match *self {
- WritableDst::Terminal(ref mut t) => t.write(bytes),
- WritableDst::Buffered(_, ref mut buf) => buf.write(bytes),
- WritableDst::Raw(ref mut w) => w.write(bytes),
- WritableDst::ColoredRaw(ref mut t) => t.write(bytes),
- }
- }
-
- fn flush(&mut self) -> io::Result<()> {
- match *self {
- WritableDst::Terminal(ref mut t) => t.flush(),
- WritableDst::Buffered(_, ref mut buf) => buf.flush(),
- WritableDst::Raw(ref mut w) => w.flush(),
- WritableDst::ColoredRaw(ref mut w) => w.flush(),
- }
- }
-}
-
-impl<'a> Drop for WritableDst<'a> {
- fn drop(&mut self) {
- if let WritableDst::Buffered(ref mut dst, ref mut buf) = self {
- drop(dst.print(buf));
- }
+ spec
}
}
diff --git a/compiler/rustc_errors/src/json.rs b/compiler/rustc_errors/src/json.rs
index f32d6b96b..b8f58e305 100644
--- a/compiler/rustc_errors/src/json.rs
+++ b/compiler/rustc_errors/src/json.rs
@@ -10,6 +10,7 @@
// FIXME: spec the JSON output properly.
use rustc_span::source_map::{FilePathMapping, SourceMap};
+use termcolor::{ColorSpec, WriteColor};
use crate::emitter::{Emitter, HumanReadableErrorType};
use crate::registry::Registry;
@@ -159,7 +160,7 @@ impl Emitter for JsonEmitter {
}
.and_then(|_| self.dst.flush());
if let Err(e) = result {
- panic!("failed to print diagnostics: {:?}", e);
+ panic!("failed to print diagnostics: {e:?}");
}
}
@@ -172,7 +173,7 @@ impl Emitter for JsonEmitter {
}
.and_then(|_| self.dst.flush());
if let Err(e) = result {
- panic!("failed to print notification: {:?}", e);
+ panic!("failed to print notification: {e:?}");
}
}
@@ -194,7 +195,7 @@ impl Emitter for JsonEmitter {
}
.and_then(|_| self.dst.flush());
if let Err(e) = result {
- panic!("failed to print future breakage report: {:?}", e);
+ panic!("failed to print future breakage report: {e:?}");
}
}
@@ -208,7 +209,7 @@ impl Emitter for JsonEmitter {
}
.and_then(|_| self.dst.flush());
if let Err(e) = result {
- panic!("failed to print unused externs: {:?}", e);
+ panic!("failed to print unused externs: {e:?}");
}
}
@@ -356,20 +357,29 @@ impl Diagnostic {
self.0.lock().unwrap().flush()
}
}
+ impl WriteColor for BufWriter {
+ fn supports_color(&self) -> bool {
+ false
+ }
+
+ fn set_color(&mut self, _spec: &ColorSpec) -> io::Result<()> {
+ Ok(())
+ }
+
+ fn reset(&mut self) -> io::Result<()> {
+ Ok(())
+ }
+ }
let buf = BufWriter::default();
let output = buf.clone();
je.json_rendered
- .new_emitter(
- Box::new(buf),
- Some(je.sm.clone()),
- je.fluent_bundle.clone(),
- je.fallback_bundle.clone(),
- false,
- je.diagnostic_width,
- je.macro_backtrace,
- je.track_diagnostics,
- je.terminal_url,
- )
+ .new_emitter(Box::new(buf), je.fallback_bundle.clone())
+ .sm(Some(je.sm.clone()))
+ .fluent_bundle(je.fluent_bundle.clone())
+ .diagnostic_width(je.diagnostic_width)
+ .macro_backtrace(je.macro_backtrace)
+ .track_diagnostics(je.track_diagnostics)
+ .terminal_url(je.terminal_url)
.ui_testing(je.ui_testing)
.emit_diagnostic(diag);
let output = Arc::try_unwrap(output.0).unwrap().into_inner().unwrap();
diff --git a/compiler/rustc_errors/src/json/tests.rs b/compiler/rustc_errors/src/json/tests.rs
index 671dc449e..1f9a2981e 100644
--- a/compiler/rustc_errors/src/json/tests.rs
+++ b/compiler/rustc_errors/src/json/tests.rs
@@ -64,7 +64,7 @@ fn test_positions(code: &str, span: (u32, u32), expected_output: SpanTestData) {
);
let span = Span::with_root_ctxt(BytePos(span.0), BytePos(span.1));
- let handler = Handler::with_emitter(true, None, Box::new(je));
+ let handler = Handler::with_emitter(Box::new(je));
handler.span_err(span, "foo");
let bytes = output.lock().unwrap();
diff --git a/compiler/rustc_errors/src/lib.rs b/compiler/rustc_errors/src/lib.rs
index b9db25103..34518b537 100644
--- a/compiler/rustc_errors/src/lib.rs
+++ b/compiler/rustc_errors/src/lib.rs
@@ -15,6 +15,7 @@
#![feature(box_patterns)]
#![feature(error_reporter)]
#![allow(incomplete_features)]
+#![cfg_attr(not(bootstrap), allow(internal_features))]
#[macro_use]
extern crate rustc_macros;
@@ -22,6 +23,8 @@ extern crate rustc_macros;
#[macro_use]
extern crate tracing;
+extern crate self as rustc_errors;
+
pub use emitter::ColorConfig;
use rustc_lint_defs::LintExpectationId;
@@ -47,9 +50,10 @@ use std::borrow::Cow;
use std::error::Report;
use std::fmt;
use std::hash::Hash;
+use std::io::Write;
use std::num::NonZeroUsize;
use std::panic;
-use std::path::Path;
+use std::path::{Path, PathBuf};
use termcolor::{Color, ColorSpec};
@@ -376,13 +380,16 @@ pub struct ExplicitBug;
/// rather than a failed assertion, etc.
pub struct DelayedBugPanic;
+use crate::diagnostic_impls::{DelayedAtWithNewline, DelayedAtWithoutNewline};
pub use diagnostic::{
AddToDiagnostic, DecorateLint, Diagnostic, DiagnosticArg, DiagnosticArgValue, DiagnosticId,
DiagnosticStyledString, IntoDiagnosticArg, SubDiagnostic,
};
pub use diagnostic_builder::{DiagnosticBuilder, EmissionGuarantee, Noted};
pub use diagnostic_impls::{
- DiagnosticArgFromDisplay, DiagnosticSymbolList, LabelKind, SingleLabelManySpans,
+ DiagnosticArgFromDisplay, DiagnosticSymbolList, ExpectedLifetimeParameter,
+ IndicateAnonymousLifetime, InvalidFlushedDelayedDiagnosticLevel, LabelKind,
+ SingleLabelManySpans,
};
use std::backtrace::{Backtrace, BacktraceStatus};
@@ -390,7 +397,6 @@ use std::backtrace::{Backtrace, BacktraceStatus};
/// Certain errors (fatal, bug, unimpl) may cause immediate exit,
/// others log errors for later reporting.
pub struct Handler {
- flags: HandlerFlags,
inner: Lock<HandlerInner>,
}
@@ -446,11 +452,11 @@ struct HandlerInner {
/// have been converted.
check_unstable_expect_diagnostics: bool,
- /// Expected [`Diagnostic`][diagnostic::Diagnostic]s store a [`LintExpectationId`] as part of
+ /// Expected [`Diagnostic`][struct@diagnostic::Diagnostic]s store a [`LintExpectationId`] as part of
/// the lint level. [`LintExpectationId`]s created early during the compilation
/// (before `HirId`s have been defined) are not stable and can therefore not be
/// stored on disk. This buffer stores these diagnostics until the ID has been
- /// replaced by a stable [`LintExpectationId`]. The [`Diagnostic`][diagnostic::Diagnostic]s are the
+ /// replaced by a stable [`LintExpectationId`]. The [`Diagnostic`][struct@diagnostic::Diagnostic]s are the
/// submitted for storage and added to the list of fulfilled expectations.
unstable_expect_diagnostics: Vec<Diagnostic>,
@@ -461,6 +467,10 @@ struct HandlerInner {
///
/// [RFC-2383]: https://rust-lang.github.io/rfcs/2383-lint-reasons.html
fulfilled_expectations: FxHashSet<LintExpectationId>,
+
+ /// The file where the ICE information is stored. This allows delayed_span_bug backtraces to be
+ /// stored along side the main panic backtrace.
+ ice_file: Option<PathBuf>,
}
/// A key denoting where from a diagnostic was stashed.
@@ -544,63 +554,36 @@ impl Drop for HandlerInner {
impl Handler {
pub fn with_tty_emitter(
- color_config: ColorConfig,
- can_emit_warnings: bool,
- treat_err_as_bug: Option<NonZeroUsize>,
sm: Option<Lrc<SourceMap>>,
- fluent_bundle: Option<Lrc<FluentBundle>>,
fallback_bundle: LazyFallbackBundle,
) -> Self {
- Self::with_tty_emitter_and_flags(
- color_config,
- sm,
- fluent_bundle,
- fallback_bundle,
- HandlerFlags { can_emit_warnings, treat_err_as_bug, ..Default::default() },
- )
+ let emitter = Box::new(EmitterWriter::stderr(ColorConfig::Auto, fallback_bundle).sm(sm));
+ Self::with_emitter(emitter)
+ }
+ pub fn disable_warnings(mut self) -> Self {
+ self.inner.get_mut().flags.can_emit_warnings = false;
+ self
}
- pub fn with_tty_emitter_and_flags(
- color_config: ColorConfig,
- sm: Option<Lrc<SourceMap>>,
- fluent_bundle: Option<Lrc<FluentBundle>>,
- fallback_bundle: LazyFallbackBundle,
- flags: HandlerFlags,
- ) -> Self {
- let emitter = Box::new(EmitterWriter::stderr(
- color_config,
- sm,
- fluent_bundle,
- fallback_bundle,
- false,
- false,
- None,
- flags.macro_backtrace,
- flags.track_diagnostics,
- TerminalUrl::No,
- ));
- Self::with_emitter_and_flags(emitter, flags)
- }
-
- pub fn with_emitter(
- can_emit_warnings: bool,
- treat_err_as_bug: Option<NonZeroUsize>,
- emitter: Box<dyn Emitter + sync::Send>,
- ) -> Self {
- Handler::with_emitter_and_flags(
- emitter,
- HandlerFlags { can_emit_warnings, treat_err_as_bug, ..Default::default() },
- )
+ pub fn treat_err_as_bug(mut self, treat_err_as_bug: NonZeroUsize) -> Self {
+ self.inner.get_mut().flags.treat_err_as_bug = Some(treat_err_as_bug);
+ self
}
- pub fn with_emitter_and_flags(
- emitter: Box<dyn Emitter + sync::Send>,
- flags: HandlerFlags,
- ) -> Self {
+ pub fn with_flags(mut self, flags: HandlerFlags) -> Self {
+ self.inner.get_mut().flags = flags;
+ self
+ }
+
+ pub fn with_ice_file(mut self, ice_file: PathBuf) -> Self {
+ self.inner.get_mut().ice_file = Some(ice_file);
+ self
+ }
+
+ pub fn with_emitter(emitter: Box<dyn Emitter + sync::Send>) -> Self {
Self {
- flags,
inner: Lock::new(HandlerInner {
- flags,
+ flags: HandlerFlags { can_emit_warnings: true, ..Default::default() },
lint_err_count: 0,
err_count: 0,
warn_count: 0,
@@ -618,6 +601,7 @@ impl Handler {
check_unstable_expect_diagnostics: false,
unstable_expect_diagnostics: Vec::new(),
fulfilled_expectations: Default::default(),
+ ice_file: None,
}),
}
}
@@ -645,7 +629,7 @@ impl Handler {
// This is here to not allow mutation of flags;
// as of this writing it's only used in tests in librustc_middle.
pub fn can_emit_warnings(&self) -> bool {
- self.flags.can_emit_warnings
+ self.inner.lock().flags.can_emit_warnings
}
/// Resets the diagnostic error count as well as the cached emitted diagnostics.
@@ -991,7 +975,7 @@ impl Handler {
self.emit_diag_at_span(Diagnostic::new_with_code(Warning(None), Some(code), msg), span);
}
- pub fn span_bug(&self, span: impl Into<MultiSpan>, msg: impl Into<DiagnosticMessage>) -> ! {
+ pub fn span_bug(&self, span: impl Into<MultiSpan>, msg: impl Into<String>) -> ! {
self.inner.borrow_mut().span_bug(span, msg)
}
@@ -1000,7 +984,7 @@ impl Handler {
pub fn delay_span_bug(
&self,
span: impl Into<MultiSpan>,
- msg: impl Into<DiagnosticMessage>,
+ msg: impl Into<String>,
) -> ErrorGuaranteed {
self.inner.borrow_mut().delay_span_bug(span, msg)
}
@@ -1473,7 +1457,7 @@ impl HandlerInner {
let _ = self.fatal(errors);
}
(_, _) => {
- let _ = self.fatal(format!("{}; {}", &errors, &warnings));
+ let _ = self.fatal(format!("{errors}; {warnings}"));
}
}
@@ -1584,8 +1568,8 @@ impl HandlerInner {
}
#[track_caller]
- fn span_bug(&mut self, sp: impl Into<MultiSpan>, msg: impl Into<DiagnosticMessage>) -> ! {
- self.emit_diag_at_span(Diagnostic::new(Bug, msg), sp);
+ fn span_bug(&mut self, sp: impl Into<MultiSpan>, msg: impl Into<String>) -> ! {
+ self.emit_diag_at_span(Diagnostic::new(Bug, msg.into()), sp);
panic::panic_any(ExplicitBug);
}
@@ -1598,7 +1582,7 @@ impl HandlerInner {
fn delay_span_bug(
&mut self,
sp: impl Into<MultiSpan>,
- msg: impl Into<DiagnosticMessage>,
+ msg: impl Into<String>,
) -> ErrorGuaranteed {
// This is technically `self.treat_err_as_bug()` but `delay_span_bug` is called before
// incrementing `err_count` by one, so we need to +1 the comparing.
@@ -1607,9 +1591,9 @@ impl HandlerInner {
self.err_count() + self.lint_err_count + self.delayed_bug_count() + 1 >= c.get()
}) {
// FIXME: don't abort here if report_delayed_bugs is off
- self.span_bug(sp, msg);
+ self.span_bug(sp, msg.into());
}
- let mut diagnostic = Diagnostic::new(Level::DelayedBug, msg);
+ let mut diagnostic = Diagnostic::new(Level::DelayedBug, msg.into());
diagnostic.set_span(sp.into());
self.emit_diagnostic(&mut diagnostic).unwrap()
}
@@ -1657,8 +1641,21 @@ impl HandlerInner {
explanation: impl Into<DiagnosticMessage> + Copy,
) {
let mut no_bugs = true;
+ // If backtraces are enabled, also print the query stack
+ let backtrace = std::env::var_os("RUST_BACKTRACE").map_or(true, |x| &x != "0");
for bug in bugs {
- let mut bug = bug.decorate();
+ if let Some(file) = self.ice_file.as_ref()
+ && let Ok(mut out) = std::fs::File::options().create(true).append(true).open(file)
+ {
+ let _ = write!(
+ &mut out,
+ "delayed span bug: {}\n{}\n",
+ bug.inner.styled_message().iter().filter_map(|(msg, _)| msg.as_str()).collect::<String>(),
+ &bug.note
+ );
+ }
+ let mut bug =
+ if backtrace || self.ice_file.is_none() { bug.decorate() } else { bug.inner };
if no_bugs {
// Put the overall explanation before the `DelayedBug`s, to
@@ -1671,11 +1668,10 @@ impl HandlerInner {
if bug.level != Level::DelayedBug {
// NOTE(eddyb) not panicking here because we're already producing
// an ICE, and the more information the merrier.
- bug.note(format!(
- "`flushed_delayed` got diagnostic with level {:?}, \
- instead of the expected `DelayedBug`",
- bug.level,
- ));
+ bug.subdiagnostic(InvalidFlushedDelayedDiagnosticLevel {
+ span: bug.span.primary_span().unwrap(),
+ level: bug.level,
+ });
}
bug.level = Level::Bug;
@@ -1714,13 +1710,11 @@ impl HandlerInner {
(count, delayed_count, as_bug) => {
if delayed_count > 0 {
panic!(
- "aborting after {} errors and {} delayed bugs due to `-Z treat-err-as-bug={}`",
- count, delayed_count, as_bug,
+ "aborting after {count} errors and {delayed_count} delayed bugs due to `-Z treat-err-as-bug={as_bug}`",
)
} else {
panic!(
- "aborting after {} errors due to `-Z treat-err-as-bug={}`",
- count, as_bug,
+ "aborting after {count} errors due to `-Z treat-err-as-bug={as_bug}`",
)
}
}
@@ -1742,12 +1736,22 @@ impl DelayedDiagnostic {
fn decorate(mut self) -> Diagnostic {
match self.note.status() {
BacktraceStatus::Captured => {
- self.inner.note(format!("delayed at {}\n{}", self.inner.emitted_at, self.note));
+ let inner = &self.inner;
+ self.inner.subdiagnostic(DelayedAtWithNewline {
+ span: inner.span.primary_span().unwrap(),
+ emitted_at: inner.emitted_at.clone(),
+ note: self.note,
+ });
}
// Avoid the needless newline when no backtrace has been captured,
// the display impl should just be a single line.
_ => {
- self.inner.note(format!("delayed at {} - {}", self.inner.emitted_at, self.note));
+ let inner = &self.inner;
+ self.inner.subdiagnostic(DelayedAtWithoutNewline {
+ span: inner.span.primary_span().unwrap(),
+ emitted_at: inner.emitted_at.clone(),
+ note: self.note,
+ });
}
}
@@ -1839,20 +1843,36 @@ pub fn add_elided_lifetime_in_path_suggestion(
incl_angl_brckt: bool,
insertion_span: Span,
) {
- diag.span_label(path_span, format!("expected lifetime parameter{}", pluralize!(n)));
+ diag.subdiagnostic(ExpectedLifetimeParameter { span: path_span, count: n });
if !source_map.is_span_accessible(insertion_span) {
// Do not try to suggest anything if generated by a proc-macro.
return;
}
let anon_lts = vec!["'_"; n].join(", ");
let suggestion =
- if incl_angl_brckt { format!("<{}>", anon_lts) } else { format!("{}, ", anon_lts) };
- diag.span_suggestion_verbose(
- insertion_span.shrink_to_hi(),
- format!("indicate the anonymous lifetime{}", pluralize!(n)),
+ if incl_angl_brckt { format!("<{anon_lts}>") } else { format!("{anon_lts}, ") };
+
+ diag.subdiagnostic(IndicateAnonymousLifetime {
+ span: insertion_span.shrink_to_hi(),
+ count: n,
suggestion,
- Applicability::MachineApplicable,
- );
+ });
+}
+
+pub fn report_ambiguity_error<'a, G: EmissionGuarantee>(
+ db: &mut DiagnosticBuilder<'a, G>,
+ ambiguity: rustc_lint_defs::AmbiguityErrorDiag,
+) {
+ db.span_label(ambiguity.label_span, ambiguity.label_msg);
+ db.note(ambiguity.note_msg);
+ db.span_note(ambiguity.b1_span, ambiguity.b1_note_msg);
+ for help_msg in ambiguity.b1_help_msgs {
+ db.help(help_msg);
+ }
+ db.span_note(ambiguity.b2_span, ambiguity.b2_note_msg);
+ for help_msg in ambiguity.b2_help_msgs {
+ db.help(help_msg);
+ }
}
#[derive(Clone, Copy, PartialEq, Hash, Debug)]
diff --git a/compiler/rustc_errors/src/markdown/parse.rs b/compiler/rustc_errors/src/markdown/parse.rs
index 362a451fd..d3a08da62 100644
--- a/compiler/rustc_errors/src/markdown/parse.rs
+++ b/compiler/rustc_errors/src/markdown/parse.rs
@@ -272,7 +272,7 @@ fn parse_ordered_li(buf: &[u8]) -> Parsed<'_> {
fn get_indented_section(buf: &[u8]) -> (&[u8], &[u8]) {
let mut end = buf.len();
for (idx, window) in buf.windows(2).enumerate() {
- let &[ch, next_ch] = window else {unreachable!("always 2 elements")};
+ let &[ch, next_ch] = window else { unreachable!("always 2 elements") };
if idx >= buf.len().saturating_sub(2) && next_ch == b'\n' {
// End of stream
end = buf.len().saturating_sub(1);
diff --git a/compiler/rustc_errors/src/markdown/term.rs b/compiler/rustc_errors/src/markdown/term.rs
index e45ba6d2c..88c3c8b9f 100644
--- a/compiler/rustc_errors/src/markdown/term.rs
+++ b/compiler/rustc_errors/src/markdown/term.rs
@@ -149,7 +149,7 @@ fn write_wrapping<B: io::Write>(
let Some((end_idx, _ch)) = iter.nth(ch_count) else {
// Write entire line
buf.write_all(to_write.as_bytes())?;
- cur.set(cur.get()+to_write.chars().count());
+ cur.set(cur.get() + to_write.chars().count());
break;
};
diff --git a/compiler/rustc_errors/src/markdown/tests/term.rs b/compiler/rustc_errors/src/markdown/tests/term.rs
index 3b31c6d62..6f68fb25a 100644
--- a/compiler/rustc_errors/src/markdown/tests/term.rs
+++ b/compiler/rustc_errors/src/markdown/tests/term.rs
@@ -63,7 +63,7 @@ fn test_wrapping_write() {
#[test]
fn test_output() {
// Capture `--bless` when run via ./x
- let bless = std::env::var("RUSTC_BLESS").unwrap_or_default() == "1";
+ let bless = std::env::var_os("RUSTC_BLESS").is_some_and(|v| v != "0");
let ast = MdStream::parse_str(INPUT);
let bufwtr = BufferWriter::stderr(ColorChoice::Always);
let mut buffer = bufwtr.buffer();
diff --git a/compiler/rustc_expand/Cargo.toml b/compiler/rustc_expand/Cargo.toml
index 2dae0e3f5..02da5b5dc 100644
--- a/compiler/rustc_expand/Cargo.toml
+++ b/compiler/rustc_expand/Cargo.toml
@@ -27,3 +27,4 @@ rustc_span = { path = "../rustc_span" }
smallvec = { version = "1.8.1", features = ["union", "may_dangle"] }
thin-vec = "0.2.12"
tracing = "0.1"
+termcolor = "1.2"
diff --git a/compiler/rustc_expand/src/base.rs b/compiler/rustc_expand/src/base.rs
index 8a251ea29..c4d2a374f 100644
--- a/compiler/rustc_expand/src/base.rs
+++ b/compiler/rustc_expand/src/base.rs
@@ -18,6 +18,7 @@ use rustc_errors::{
Applicability, DiagnosticBuilder, DiagnosticMessage, ErrorGuaranteed, IntoDiagnostic,
MultiSpan, PResult,
};
+use rustc_feature::Features;
use rustc_lint_defs::builtin::PROC_MACRO_BACK_COMPAT;
use rustc_lint_defs::{BufferedEarlyLint, BuiltinLintDiagnostics, RegisteredTools};
use rustc_parse::{self, parser, MACRO_ARGUMENTS};
@@ -767,6 +768,7 @@ impl SyntaxExtension {
/// and other properties converted from attributes.
pub fn new(
sess: &Session,
+ features: &Features,
kind: SyntaxExtensionKind,
span: Span,
helper_attrs: Vec<Symbol>,
@@ -816,7 +818,7 @@ impl SyntaxExtension {
allow_internal_unstable: (!allow_internal_unstable.is_empty())
.then(|| allow_internal_unstable.into()),
stability: stability.map(|(s, _)| s),
- deprecation: attr::find_deprecation(&sess, attrs).map(|(d, _)| d),
+ deprecation: attr::find_deprecation(&sess, features, attrs).map(|(d, _)| d),
helper_attrs,
edition,
builtin_name,
@@ -957,6 +959,7 @@ pub trait LintStoreExpand {
fn pre_expansion_lint(
&self,
sess: &Session,
+ features: &Features,
registered_tools: &RegisteredTools,
node_id: NodeId,
attrs: &[Attribute],
@@ -1147,7 +1150,7 @@ impl<'a> ExtCtxt<'a> {
pub fn span_warn<S: Into<MultiSpan>>(&self, sp: S, msg: impl Into<DiagnosticMessage>) {
self.sess.parse_sess.span_diagnostic.span_warn(sp, msg);
}
- pub fn span_bug<S: Into<MultiSpan>>(&self, sp: S, msg: impl Into<DiagnosticMessage>) -> ! {
+ pub fn span_bug<S: Into<MultiSpan>>(&self, sp: S, msg: impl Into<String>) -> ! {
self.sess.parse_sess.span_diagnostic.span_bug(sp, msg);
}
pub fn trace_macros_diag(&mut self) {
@@ -1366,7 +1369,7 @@ pub fn parse_macro_name_and_helper_attrs(
return None;
}
let Some(trait_attr) = list[0].meta_item() else {
- diag.emit_err(errors::NotAMetaItem {span: list[0].span()});
+ diag.emit_err(errors::NotAMetaItem { span: list[0].span() });
return None;
};
let trait_ident = match trait_attr.ident() {
diff --git a/compiler/rustc_expand/src/build.rs b/compiler/rustc_expand/src/build.rs
index 264f30fb1..7de469944 100644
--- a/compiler/rustc_expand/src/build.rs
+++ b/compiler/rustc_expand/src/build.rs
@@ -643,7 +643,16 @@ impl<'a> ExtCtxt<'a> {
span,
name,
AttrVec::new(),
- ast::ItemKind::Const(ast::ConstItem { defaultness, ty, expr: Some(expr) }.into()),
+ ast::ItemKind::Const(
+ ast::ConstItem {
+ defaultness,
+ // FIXME(generic_const_items): Pass the generics as a parameter.
+ generics: ast::Generics::default(),
+ ty,
+ expr: Some(expr),
+ }
+ .into(),
+ ),
)
}
diff --git a/compiler/rustc_expand/src/config.rs b/compiler/rustc_expand/src/config.rs
index 3e43eae00..8658cea13 100644
--- a/compiler/rustc_expand/src/config.rs
+++ b/compiler/rustc_expand/src/config.rs
@@ -313,9 +313,10 @@ impl<'a> StripUnconfigured<'a> {
/// the attribute is incorrect.
pub(crate) fn expand_cfg_attr(&self, attr: &Attribute, recursive: bool) -> Vec<Attribute> {
let Some((cfg_predicate, expanded_attrs)) =
- rustc_parse::parse_cfg_attr(attr, &self.sess.parse_sess) else {
- return vec![];
- };
+ rustc_parse::parse_cfg_attr(attr, &self.sess.parse_sess)
+ else {
+ return vec![];
+ };
// Lint on zero attributes in source.
if expanded_attrs.is_empty() {
@@ -364,17 +365,21 @@ impl<'a> StripUnconfigured<'a> {
// Use the `#` in `#[cfg_attr(pred, attr)]` as the `#` token
// for `attr` when we expand it to `#[attr]`
- let mut orig_trees = orig_tokens.into_trees();
- let TokenTree::Token(pound_token @ Token { kind: TokenKind::Pound, .. }, _) = orig_trees.next().unwrap() else {
- panic!("Bad tokens for attribute {:?}", attr);
+ let mut orig_trees = orig_tokens.trees();
+ let TokenTree::Token(pound_token @ Token { kind: TokenKind::Pound, .. }, _) =
+ orig_trees.next().unwrap().clone()
+ else {
+ panic!("Bad tokens for attribute {attr:?}");
};
let pound_span = pound_token.span;
let mut trees = vec![AttrTokenTree::Token(pound_token, Spacing::Alone)];
if attr.style == AttrStyle::Inner {
// For inner attributes, we do the same thing for the `!` in `#![some_attr]`
- let TokenTree::Token(bang_token @ Token { kind: TokenKind::Not, .. }, _) = orig_trees.next().unwrap() else {
- panic!("Bad tokens for attribute {:?}", attr);
+ let TokenTree::Token(bang_token @ Token { kind: TokenKind::Not, .. }, _) =
+ orig_trees.next().unwrap().clone()
+ else {
+ panic!("Bad tokens for attribute {attr:?}");
};
trees.push(AttrTokenTree::Token(bang_token, Spacing::Alone));
}
@@ -385,7 +390,7 @@ impl<'a> StripUnconfigured<'a> {
Delimiter::Bracket,
item.tokens
.as_ref()
- .unwrap_or_else(|| panic!("Missing tokens for {:?}", item))
+ .unwrap_or_else(|| panic!("Missing tokens for {item:?}"))
.to_attr_token_stream(),
);
trees.push(bracket_group);
diff --git a/compiler/rustc_expand/src/expand.rs b/compiler/rustc_expand/src/expand.rs
index 9850723a8..34d16bf00 100644
--- a/compiler/rustc_expand/src/expand.rs
+++ b/compiler/rustc_expand/src/expand.rs
@@ -651,7 +651,8 @@ impl<'a, 'b> MacroExpander<'a, 'b> {
ExpandResult::Ready(match invoc.kind {
InvocationKind::Bang { mac, .. } => match ext {
SyntaxExtensionKind::Bang(expander) => {
- let Ok(tok_result) = expander.expand(self.cx, span, mac.args.tokens.clone()) else {
+ let Ok(tok_result) = expander.expand(self.cx, span, mac.args.tokens.clone())
+ else {
return ExpandResult::Ready(fragment_kind.dummy(span));
};
self.parse_ast_fragment(tok_result, fragment_kind, &mac.path, span)
@@ -704,7 +705,8 @@ impl<'a, 'b> MacroExpander<'a, 'b> {
self.cx.emit_err(UnsupportedKeyValue { span });
}
let inner_tokens = attr_item.args.inner_tokens();
- let Ok(tok_result) = expander.expand(self.cx, span, inner_tokens, tokens) else {
+ let Ok(tok_result) = expander.expand(self.cx, span, inner_tokens, tokens)
+ else {
return ExpandResult::Ready(fragment_kind.dummy(span));
};
self.parse_ast_fragment(tok_result, fragment_kind, &attr_item.path, span)
@@ -794,14 +796,14 @@ impl<'a, 'b> MacroExpander<'a, 'b> {
| Annotatable::FieldDef(..)
| Annotatable::Variant(..) => panic!("unexpected annotatable"),
};
- if self.cx.ecfg.proc_macro_hygiene() {
+ if self.cx.ecfg.features.proc_macro_hygiene {
return;
}
feature_err(
&self.cx.sess.parse_sess,
sym::proc_macro_hygiene,
span,
- format!("custom attributes cannot be applied to {}", kind),
+ format!("custom attributes cannot be applied to {kind}"),
)
.emit();
}
@@ -832,7 +834,7 @@ impl<'a, 'b> MacroExpander<'a, 'b> {
}
}
- if !self.cx.ecfg.proc_macro_hygiene() {
+ if !self.cx.ecfg.features.proc_macro_hygiene {
annotatable
.visit_with(&mut GateProcMacroInput { parse_sess: &self.cx.sess.parse_sess });
}
@@ -1087,9 +1089,7 @@ impl InvocationCollectorNode for P<ast::Item> {
// Work around borrow checker not seeing through `P`'s deref.
let (ident, span, mut attrs) = (node.ident, node.span, mem::take(&mut node.attrs));
- let ItemKind::Mod(_, mod_kind) = &mut node.kind else {
- unreachable!()
- };
+ let ItemKind::Mod(_, mod_kind) = &mut node.kind else { unreachable!() };
let ecx = &mut collector.cx;
let (file_path, dir_path, dir_ownership) = match mod_kind {
@@ -1122,6 +1122,7 @@ impl InvocationCollectorNode for P<ast::Item> {
if let Some(lint_store) = ecx.lint_store {
lint_store.pre_expansion_lint(
ecx.sess,
+ ecx.ecfg.features,
ecx.resolver.registered_tools(),
ecx.current_expansion.lint_node_id,
&attrs,
@@ -1580,7 +1581,7 @@ impl<'a, 'b> InvocationCollector<'a, 'b> {
fn cfg(&self) -> StripUnconfigured<'_> {
StripUnconfigured {
sess: &self.cx.sess,
- features: self.cx.ecfg.features,
+ features: Some(self.cx.ecfg.features),
config_tokens: false,
lint_node_id: self.cx.current_expansion.lint_node_id,
}
@@ -1676,7 +1677,7 @@ impl<'a, 'b> InvocationCollector<'a, 'b> {
// Detect use of feature-gated or invalid attributes on macro invocations
// since they will not be detected after macro expansion.
fn check_attributes(&self, attrs: &[ast::Attribute], call: &ast::MacCall) {
- let features = self.cx.ecfg.features.unwrap();
+ let features = self.cx.ecfg.features;
let mut attrs = attrs.iter().peekable();
let mut span: Option<Span> = None;
while let Some(attr) = attrs.next() {
@@ -1707,7 +1708,7 @@ impl<'a, 'b> InvocationCollector<'a, 'b> {
&UNUSED_ATTRIBUTES,
attr.span,
self.cx.current_expansion.lint_node_id,
- format!("unused attribute `{}`", attr_name),
+ format!("unused attribute `{attr_name}`"),
BuiltinLintDiagnostics::UnusedBuiltinAttribute {
attr_name,
macro_name: pprust::path_to_string(&call.path),
@@ -1976,7 +1977,7 @@ impl<'a, 'b> MutVisitor for InvocationCollector<'a, 'b> {
pub struct ExpansionConfig<'feat> {
pub crate_name: String,
- pub features: Option<&'feat Features>,
+ pub features: &'feat Features,
pub recursion_limit: Limit,
pub trace_mac: bool,
/// If false, strip `#[test]` nodes
@@ -1987,11 +1988,11 @@ pub struct ExpansionConfig<'feat> {
pub proc_macro_backtrace: bool,
}
-impl<'feat> ExpansionConfig<'feat> {
- pub fn default(crate_name: String) -> ExpansionConfig<'static> {
+impl ExpansionConfig<'_> {
+ pub fn default(crate_name: String, features: &Features) -> ExpansionConfig<'_> {
ExpansionConfig {
crate_name,
- features: None,
+ features,
recursion_limit: Limit::new(1024),
trace_mac: false,
should_test: false,
@@ -1999,8 +2000,4 @@ impl<'feat> ExpansionConfig<'feat> {
proc_macro_backtrace: false,
}
}
-
- fn proc_macro_hygiene(&self) -> bool {
- self.features.is_some_and(|features| features.proc_macro_hygiene)
- }
}
diff --git a/compiler/rustc_expand/src/lib.rs b/compiler/rustc_expand/src/lib.rs
index 83a5043b0..c4a9b2ace 100644
--- a/compiler/rustc_expand/src/lib.rs
+++ b/compiler/rustc_expand/src/lib.rs
@@ -11,6 +11,7 @@
#![feature(try_blocks)]
#![recursion_limit = "256"]
#![deny(rustc::untranslatable_diagnostic)]
+#![cfg_attr(not(bootstrap), allow(internal_features))]
#[macro_use]
extern crate rustc_macros;
diff --git a/compiler/rustc_expand/src/mbe/diagnostics.rs b/compiler/rustc_expand/src/mbe/diagnostics.rs
index 3593bed2d..e06037564 100644
--- a/compiler/rustc_expand/src/mbe/diagnostics.rs
+++ b/compiler/rustc_expand/src/mbe/diagnostics.rs
@@ -42,7 +42,8 @@ pub(super) fn failed_to_match_macro<'cx>(
return result;
}
- let Some(BestFailure { token, msg: label, remaining_matcher, .. }) = tracker.best_failure else {
+ let Some(BestFailure { token, msg: label, remaining_matcher, .. }) = tracker.best_failure
+ else {
return DummyResult::any(sp);
};
@@ -256,7 +257,7 @@ pub(super) fn emit_frag_parse_err(
e.span_suggestion_verbose(
site_span,
"surround the macro invocation with `{}` to interpret the expansion as a statement",
- format!("{{ {}; }}", snippet),
+ format!("{{ {snippet}; }}"),
Applicability::MaybeIncorrect,
);
}
diff --git a/compiler/rustc_expand/src/mbe/macro_check.rs b/compiler/rustc_expand/src/mbe/macro_check.rs
index 34f998274..95f5bb2d2 100644
--- a/compiler/rustc_expand/src/mbe/macro_check.rs
+++ b/compiler/rustc_expand/src/mbe/macro_check.rs
@@ -593,7 +593,7 @@ fn check_ops_is_prefix(
return;
}
}
- buffer_lint(sess, span.into(), node_id, format!("unknown macro variable `{}`", name));
+ buffer_lint(sess, span.into(), node_id, format!("unknown macro variable `{name}`"));
}
/// Returns whether `binder_ops` is a prefix of `occurrence_ops`.
@@ -626,7 +626,7 @@ fn ops_is_prefix(
if i >= occurrence_ops.len() {
let mut span = MultiSpan::from_span(span);
span.push_span_label(binder.span, "expected repetition");
- let message = format!("variable '{}' is still repeating at this depth", name);
+ let message = format!("variable '{name}' is still repeating at this depth");
buffer_lint(sess, span, node_id, message);
return;
}
diff --git a/compiler/rustc_expand/src/mbe/macro_parser.rs b/compiler/rustc_expand/src/mbe/macro_parser.rs
index f0e67cfd5..7e85beaad 100644
--- a/compiler/rustc_expand/src/mbe/macro_parser.rs
+++ b/compiler/rustc_expand/src/mbe/macro_parser.rs
@@ -81,7 +81,7 @@ use rustc_data_structures::fx::FxHashMap;
use rustc_data_structures::sync::Lrc;
use rustc_errors::ErrorGuaranteed;
use rustc_lint_defs::pluralize;
-use rustc_parse::parser::{NtOrTt, Parser};
+use rustc_parse::parser::{ParseNtResult, Parser};
use rustc_span::symbol::Ident;
use rustc_span::symbol::MacroRulesNormalizedIdent;
use rustc_span::Span;
@@ -156,7 +156,7 @@ impl Display for MatcherLoc {
MatcherLoc::MetaVarDecl { bind, kind, .. } => {
write!(f, "meta-variable `${bind}")?;
if let Some(kind) = kind {
- write!(f, ":{}", kind)?;
+ write!(f, ":{kind}")?;
}
write!(f, "`")?;
Ok(())
@@ -692,8 +692,8 @@ impl TtParser {
Ok(nt) => nt,
};
let m = match nt {
- NtOrTt::Nt(nt) => MatchedNonterminal(Lrc::new(nt)),
- NtOrTt::Tt(tt) => MatchedTokenTree(tt),
+ ParseNtResult::Nt(nt) => MatchedNonterminal(Lrc::new(nt)),
+ ParseNtResult::Tt(tt) => MatchedTokenTree(tt),
};
mp.push_match(next_metavar, seq_depth, m);
mp.idx += 1;
@@ -723,7 +723,7 @@ impl TtParser {
.iter()
.map(|mp| match &matcher[mp.idx] {
MatcherLoc::MetaVarDecl { bind, kind: Some(kind), .. } => {
- format!("{} ('{}')", kind, bind)
+ format!("{kind} ('{bind}')")
}
_ => unreachable!(),
})
@@ -736,8 +736,8 @@ impl TtParser {
"local ambiguity when calling macro `{}`: multiple parsing options: {}",
self.macro_name,
match self.next_mps.len() {
- 0 => format!("built-in NTs {}.", nts),
- n => format!("built-in NTs {} or {n} other option{s}.", nts, s = pluralize!(n)),
+ 0 => format!("built-in NTs {nts}."),
+ n => format!("built-in NTs {nts} or {n} other option{s}.", s = pluralize!(n)),
}
),
)
@@ -757,7 +757,7 @@ impl TtParser {
match ret_val.entry(MacroRulesNormalizedIdent::new(bind)) {
Vacant(spot) => spot.insert(res.next().unwrap()),
Occupied(..) => {
- return Error(span, format!("duplicated bind name: {}", bind));
+ return Error(span, format!("duplicated bind name: {bind}"));
}
};
} else {
diff --git a/compiler/rustc_expand/src/mbe/macro_rules.rs b/compiler/rustc_expand/src/mbe/macro_rules.rs
index 42cc0a6b1..a5959d68f 100644
--- a/compiler/rustc_expand/src/mbe/macro_rules.rs
+++ b/compiler/rustc_expand/src/mbe/macro_rules.rs
@@ -16,6 +16,7 @@ use rustc_ast_pretty::pprust;
use rustc_attr::{self as attr, TransparencyError};
use rustc_data_structures::fx::{FxHashMap, FxIndexMap};
use rustc_errors::{Applicability, ErrorGuaranteed};
+use rustc_feature::Features;
use rustc_lint_defs::builtin::{
RUST_2021_INCOMPATIBLE_OR_PATTERNS, SEMICOLON_IN_EXPRESSIONS_FROM_MACROS,
};
@@ -249,7 +250,7 @@ fn expand_macro<'cx>(
trace_macros_note(&mut cx.expansions, sp, msg);
}
- let p = Parser::new(sess, tts, false, None);
+ let p = Parser::new(sess, tts, None);
if is_local {
cx.resolver.record_macro_rule_usage(node_id, i);
@@ -257,7 +258,7 @@ fn expand_macro<'cx>(
// Let the context choose how to interpret the result.
// Weird, but useful for X-macros.
- return Box::new(ParserAnyMacro {
+ Box::new(ParserAnyMacro {
parser: p,
// Pass along the original expansion site and the name of the macro
@@ -269,18 +270,17 @@ fn expand_macro<'cx>(
is_trailing_mac: cx.current_expansion.is_trailing_mac,
arm_span,
is_local,
- });
+ })
}
Err(CanRetry::No(_)) => {
debug!("Will not retry matching as an error was emitted already");
- return DummyResult::any(sp);
+ DummyResult::any(sp)
}
Err(CanRetry::Yes) => {
- // Retry and emit a better error below.
+ // Retry and emit a better error.
+ diagnostics::failed_to_match_macro(cx, sp, def_span, name, arg, lhses)
}
}
-
- diagnostics::failed_to_match_macro(cx, sp, def_span, name, arg, lhses)
}
pub(super) enum CanRetry {
@@ -376,6 +376,7 @@ pub(super) fn try_match_macro<'matcher, T: Tracker<'matcher>>(
/// Converts a macro item into a syntax extension.
pub fn compile_declarative_macro(
sess: &Session,
+ features: &Features,
def: &ast::Item,
edition: Edition,
) -> (SyntaxExtension, Vec<(usize, Span)>) {
@@ -383,6 +384,7 @@ pub fn compile_declarative_macro(
let mk_syn_ext = |expander| {
SyntaxExtension::new(
sess,
+ features,
SyntaxExtensionKind::LegacyBang(expander),
def.span,
Vec::new(),
@@ -447,7 +449,7 @@ pub fn compile_declarative_macro(
let create_parser = || {
let body = macro_def.body.tokens.clone();
- Parser::new(&sess.parse_sess, body, true, rustc_parse::MACRO_ARGUMENTS)
+ Parser::new(&sess.parse_sess, body, rustc_parse::MACRO_ARGUMENTS)
};
let parser = create_parser();
@@ -457,8 +459,8 @@ pub fn compile_declarative_macro(
match tt_parser.parse_tt(&mut Cow::Owned(parser), &argument_gram, &mut NoopTracker) {
Success(m) => m,
Failure(()) => {
- // The fast `NoopTracker` doesn't have any info on failure, so we need to retry it with another one
- // that gives us the information we need.
+ // The fast `NoopTracker` doesn't have any info on failure, so we need to retry it
+ // with another one that gives us the information we need.
// For this we need to reclone the macro body as the previous parser consumed it.
let retry_parser = create_parser();
@@ -500,11 +502,11 @@ pub fn compile_declarative_macro(
.map(|m| {
if let MatchedTokenTree(tt) = m {
let tt = mbe::quoted::parse(
- TokenStream::new(vec![tt.clone()]),
+ &TokenStream::new(vec![tt.clone()]),
true,
&sess.parse_sess,
def.id,
- sess.features_untracked(),
+ features,
edition,
)
.pop()
@@ -524,11 +526,11 @@ pub fn compile_declarative_macro(
.map(|m| {
if let MatchedTokenTree(tt) = m {
return mbe::quoted::parse(
- TokenStream::new(vec![tt.clone()]),
+ &TokenStream::new(vec![tt.clone()]),
false,
&sess.parse_sess,
def.id,
- sess.features_untracked(),
+ features,
edition,
)
.pop()
@@ -554,7 +556,7 @@ pub fn compile_declarative_macro(
let (transparency, transparency_error) = attr::find_transparency(&def.attrs, macro_rules);
match transparency_error {
Some(TransparencyError::UnknownTransparency(value, span)) => {
- diag.span_err(span, format!("unknown macro transparency: `{}`", value));
+ diag.span_err(span, format!("unknown macro transparency: `{value}`"));
}
Some(TransparencyError::MultipleTransparencyAttrs(old_span, new_span)) => {
diag.span_err(vec![old_span, new_span], "multiple macro transparency attributes");
@@ -1197,10 +1199,10 @@ fn check_matcher_core<'tt>(
may_be = may_be
),
);
- err.span_label(sp, format!("not allowed after `{}` fragments", kind));
+ err.span_label(sp, format!("not allowed after `{kind}` fragments"));
if kind == NonterminalKind::PatWithOr
- && sess.edition.rust_2021()
+ && sess.edition.at_least_rust_2021()
&& next_token.is_token(&BinOp(token::BinOpToken::Or))
{
let suggestion = quoted_tt_to_string(&TokenTree::MetaVarDecl(
@@ -1221,8 +1223,7 @@ fn check_matcher_core<'tt>(
&[] => {}
&[t] => {
err.note(format!(
- "only {} is allowed after `{}` fragments",
- t, kind,
+ "only {t} is allowed after `{kind}` fragments",
));
}
ts => {
@@ -1327,7 +1328,7 @@ fn is_in_follow(tok: &mbe::TokenTree, kind: NonterminalKind) -> IsInFollow {
_ => IsInFollow::No(TOKENS),
}
}
- NonterminalKind::PatWithOr { .. } => {
+ NonterminalKind::PatWithOr => {
const TOKENS: &[&str] = &["`=>`", "`,`", "`=`", "`if`", "`in`"];
match tok {
TokenTree::Token(token) => match token.kind {
@@ -1407,9 +1408,9 @@ fn is_in_follow(tok: &mbe::TokenTree, kind: NonterminalKind) -> IsInFollow {
fn quoted_tt_to_string(tt: &mbe::TokenTree) -> String {
match tt {
mbe::TokenTree::Token(token) => pprust::token_to_string(&token).into(),
- mbe::TokenTree::MetaVar(_, name) => format!("${}", name),
- mbe::TokenTree::MetaVarDecl(_, name, Some(kind)) => format!("${}:{}", name, kind),
- mbe::TokenTree::MetaVarDecl(_, name, None) => format!("${}:", name),
+ mbe::TokenTree::MetaVar(_, name) => format!("${name}"),
+ mbe::TokenTree::MetaVarDecl(_, name, Some(kind)) => format!("${name}:{kind}"),
+ mbe::TokenTree::MetaVarDecl(_, name, None) => format!("${name}:"),
_ => panic!(
"{}",
"unexpected mbe::TokenTree::{Sequence or Delimited} \
@@ -1418,6 +1419,11 @@ fn quoted_tt_to_string(tt: &mbe::TokenTree) -> String {
}
}
-pub(super) fn parser_from_cx(sess: &ParseSess, tts: TokenStream, recovery: Recovery) -> Parser<'_> {
- Parser::new(sess, tts, true, rustc_parse::MACRO_ARGUMENTS).recovery(recovery)
+pub(super) fn parser_from_cx(
+ sess: &ParseSess,
+ mut tts: TokenStream,
+ recovery: Recovery,
+) -> Parser<'_> {
+ tts.desugar_doc_comments();
+ Parser::new(sess, tts, rustc_parse::MACRO_ARGUMENTS).recovery(recovery)
}
diff --git a/compiler/rustc_expand/src/mbe/metavar_expr.rs b/compiler/rustc_expand/src/mbe/metavar_expr.rs
index 6e9196150..7c37aadc6 100644
--- a/compiler/rustc_expand/src/mbe/metavar_expr.rs
+++ b/compiler/rustc_expand/src/mbe/metavar_expr.rs
@@ -93,7 +93,17 @@ fn parse_count<'sess>(
span: Span,
) -> PResult<'sess, MetaVarExpr> {
let ident = parse_ident(iter, sess, span)?;
- let depth = if try_eat_comma(iter) { Some(parse_depth(iter, sess, span)?) } else { None };
+ let depth = if try_eat_comma(iter) {
+ if iter.look_ahead(0).is_none() {
+ return Err(sess.span_diagnostic.struct_span_err(
+ span,
+ "`count` followed by a comma must have an associated index indicating its depth",
+ ));
+ }
+ Some(parse_depth(iter, sess, span)?)
+ } else {
+ None
+ };
Ok(MetaVarExpr::Count(ident, depth))
}
@@ -104,13 +114,10 @@ fn parse_depth<'sess>(
span: Span,
) -> PResult<'sess, usize> {
let Some(tt) = iter.next() else { return Ok(0) };
- let TokenTree::Token(token::Token {
- kind: token::TokenKind::Literal(lit), ..
- }, _) = tt else {
- return Err(sess.span_diagnostic.struct_span_err(
- span,
- "meta-variable expression depth must be a literal"
- ));
+ let TokenTree::Token(token::Token { kind: token::TokenKind::Literal(lit), .. }, _) = tt else {
+ return Err(sess
+ .span_diagnostic
+ .struct_span_err(span, "meta-variable expression depth must be a literal"));
};
if let Ok(lit_kind) = LitKind::from_token_lit(*lit)
&& let LitKind::Int(n_u128, LitIntType::Unsuffixed) = lit_kind
diff --git a/compiler/rustc_expand/src/mbe/quoted.rs b/compiler/rustc_expand/src/mbe/quoted.rs
index 40bfa3715..6546199f5 100644
--- a/compiler/rustc_expand/src/mbe/quoted.rs
+++ b/compiler/rustc_expand/src/mbe/quoted.rs
@@ -36,7 +36,7 @@ const VALID_FRAGMENT_NAMES_MSG: &str = "valid fragment specifiers are \
///
/// A collection of `self::TokenTree`. There may also be some errors emitted to `sess`.
pub(super) fn parse(
- input: tokenstream::TokenStream,
+ input: &tokenstream::TokenStream,
parsing_patterns: bool,
sess: &ParseSess,
node_id: NodeId,
@@ -48,7 +48,7 @@ pub(super) fn parse(
// For each token tree in `input`, parse the token into a `self::TokenTree`, consuming
// additional trees if need be.
- let mut trees = input.into_trees();
+ let mut trees = input.trees();
while let Some(tree) = trees.next() {
// Given the parsed tree, if there is a metavar and we are expecting matchers, actually
// parse out the matcher (i.e., in `$id:ident` this would parse the `:` and `ident`).
@@ -56,7 +56,7 @@ pub(super) fn parse(
match tree {
TokenTree::MetaVar(start_sp, ident) if parsing_patterns => {
let span = match trees.next() {
- Some(tokenstream::TokenTree::Token(Token { kind: token::Colon, span }, _)) => {
+ Some(&tokenstream::TokenTree::Token(Token { kind: token::Colon, span }, _)) => {
match trees.next() {
Some(tokenstream::TokenTree::Token(token, _)) => match token.ident() {
Some((frag, _)) => {
@@ -96,10 +96,10 @@ pub(super) fn parse(
}
_ => token.span,
},
- tree => tree.as_ref().map_or(span, tokenstream::TokenTree::span),
+ tree => tree.map_or(span, tokenstream::TokenTree::span),
}
}
- tree => tree.as_ref().map_or(start_sp, tokenstream::TokenTree::span),
+ tree => tree.map_or(start_sp, tokenstream::TokenTree::span),
};
result.push(TokenTree::MetaVarDecl(span, ident, None));
@@ -134,9 +134,9 @@ fn maybe_emit_macro_metavar_expr_feature(features: &Features, sess: &ParseSess,
/// - `parsing_patterns`: same as [parse].
/// - `sess`: the parsing session. Any errors will be emitted to this session.
/// - `features`: language features so we can do feature gating.
-fn parse_tree(
- tree: tokenstream::TokenTree,
- outer_trees: &mut impl Iterator<Item = tokenstream::TokenTree>,
+fn parse_tree<'a>(
+ tree: &'a tokenstream::TokenTree,
+ outer_trees: &mut impl Iterator<Item = &'a tokenstream::TokenTree>,
parsing_patterns: bool,
sess: &ParseSess,
node_id: NodeId,
@@ -146,13 +146,13 @@ fn parse_tree(
// Depending on what `tree` is, we could be parsing different parts of a macro
match tree {
// `tree` is a `$` token. Look at the next token in `trees`
- tokenstream::TokenTree::Token(Token { kind: token::Dollar, span }, _) => {
+ &tokenstream::TokenTree::Token(Token { kind: token::Dollar, span }, _) => {
// FIXME: Handle `Invisible`-delimited groups in a more systematic way
// during parsing.
let mut next = outer_trees.next();
- let mut trees: Box<dyn Iterator<Item = tokenstream::TokenTree>>;
+ let mut trees: Box<dyn Iterator<Item = &tokenstream::TokenTree>>;
if let Some(tokenstream::TokenTree::Delimited(_, Delimiter::Invisible, tts)) = next {
- trees = Box::new(tts.into_trees());
+ trees = Box::new(tts.trees());
next = trees.next();
} else {
trees = Box::new(outer_trees);
@@ -160,7 +160,7 @@ fn parse_tree(
match next {
// `tree` is followed by a delimited set of token trees.
- Some(tokenstream::TokenTree::Delimited(delim_span, delim, tts)) => {
+ Some(&tokenstream::TokenTree::Delimited(delim_span, delim, ref tts)) => {
if parsing_patterns {
if delim != Delimiter::Parenthesis {
span_dollar_dollar_or_metavar_in_the_lhs_err(
@@ -194,7 +194,7 @@ fn parse_tree(
Delimiter::Parenthesis => {}
_ => {
let tok = pprust::token_kind_to_string(&token::OpenDelim(delim));
- let msg = format!("expected `(` or `{{`, found `{}`", tok);
+ let msg = format!("expected `(` or `{{`, found `{tok}`");
sess.span_diagnostic.span_err(delim_span.entire(), msg);
}
}
@@ -228,7 +228,7 @@ fn parse_tree(
}
// `tree` is followed by another `$`. This is an escaped `$`.
- Some(tokenstream::TokenTree::Token(Token { kind: token::Dollar, span }, _)) => {
+ Some(&tokenstream::TokenTree::Token(Token { kind: token::Dollar, span }, _)) => {
if parsing_patterns {
span_dollar_dollar_or_metavar_in_the_lhs_err(
sess,
@@ -256,11 +256,11 @@ fn parse_tree(
}
// `tree` is an arbitrary token. Keep it.
- tokenstream::TokenTree::Token(token, _) => TokenTree::Token(token),
+ tokenstream::TokenTree::Token(token, _) => TokenTree::Token(token.clone()),
// `tree` is the beginning of a delimited set of tokens (e.g., `(` or `{`). We need to
// descend into the delimited set and further parse it.
- tokenstream::TokenTree::Delimited(span, delim, tts) => TokenTree::Delimited(
+ &tokenstream::TokenTree::Delimited(span, delim, ref tts) => TokenTree::Delimited(
span,
Delimited {
delim,
@@ -286,16 +286,16 @@ fn kleene_op(token: &Token) -> Option<KleeneOp> {
/// - Ok(Ok((op, span))) if the next token tree is a KleeneOp
/// - Ok(Err(tok, span)) if the next token tree is a token but not a KleeneOp
/// - Err(span) if the next token tree is not a token
-fn parse_kleene_op(
- input: &mut impl Iterator<Item = tokenstream::TokenTree>,
+fn parse_kleene_op<'a>(
+ input: &mut impl Iterator<Item = &'a tokenstream::TokenTree>,
span: Span,
) -> Result<Result<(KleeneOp, Span), Token>, Span> {
match input.next() {
Some(tokenstream::TokenTree::Token(token, _)) => match kleene_op(&token) {
Some(op) => Ok(Ok((op, token.span))),
- None => Ok(Err(token)),
+ None => Ok(Err(token.clone())),
},
- tree => Err(tree.as_ref().map_or(span, tokenstream::TokenTree::span)),
+ tree => Err(tree.map_or(span, tokenstream::TokenTree::span)),
}
}
@@ -311,8 +311,8 @@ fn parse_kleene_op(
/// session `sess`. If the next one (or possibly two) tokens in `input` correspond to a Kleene
/// operator and separator, then a tuple with `(separator, KleeneOp)` is returned. Otherwise, an
/// error with the appropriate span is emitted to `sess` and a dummy value is returned.
-fn parse_sep_and_kleene_op(
- input: &mut impl Iterator<Item = tokenstream::TokenTree>,
+fn parse_sep_and_kleene_op<'a>(
+ input: &mut impl Iterator<Item = &'a tokenstream::TokenTree>,
span: Span,
sess: &ParseSess,
) -> (Option<Token>, KleeneToken) {
diff --git a/compiler/rustc_expand/src/mbe/transcribe.rs b/compiler/rustc_expand/src/mbe/transcribe.rs
index d523d3eac..15e7ab3fe 100644
--- a/compiler/rustc_expand/src/mbe/transcribe.rs
+++ b/compiler/rustc_expand/src/mbe/transcribe.rs
@@ -182,9 +182,7 @@ pub(super) fn transcribe<'a>(
LockstepIterSize::Constraint(len, _) => {
// We do this to avoid an extra clone above. We know that this is a
// sequence already.
- let mbe::TokenTree::Sequence(sp, seq) = seq else {
- unreachable!()
- };
+ let mbe::TokenTree::Sequence(sp, seq) = seq else { unreachable!() };
// Is the repetition empty?
if len == 0 {
@@ -222,16 +220,15 @@ pub(super) fn transcribe<'a>(
MatchedTokenTree(tt) => {
// `tt`s are emitted into the output stream directly as "raw tokens",
// without wrapping them into groups.
- let token = tt.clone();
- result.push(token);
+ result.push(tt.clone());
}
MatchedNonterminal(nt) => {
// Other variables are emitted into the output stream as groups with
// `Delimiter::Invisible` to maintain parsing priorities.
// `Interpolated` is currently used for such groups in rustc parser.
marker.visit_span(&mut sp);
- let token = TokenTree::token_alone(token::Interpolated(nt.clone()), sp);
- result.push(token);
+ result
+ .push(TokenTree::token_alone(token::Interpolated(nt.clone()), sp));
}
MatchedSeq(..) => {
// We were unable to descend far enough. This is an error.
@@ -399,7 +396,9 @@ fn lockstep_iter_size(
}
TokenTree::MetaVarExpr(_, expr) => {
let default_rslt = LockstepIterSize::Unconstrained;
- let Some(ident) = expr.ident() else { return default_rslt; };
+ let Some(ident) = expr.ident() else {
+ return default_rslt;
+ };
let name = MacroRulesNormalizedIdent::new(ident);
match lookup_cur_matched(name, interpolations, repeats) {
Some(MatchedSeq(ads)) => {
diff --git a/compiler/rustc_expand/src/parse/tests.rs b/compiler/rustc_expand/src/parse/tests.rs
index 8b37728b6..bdc20882a 100644
--- a/compiler/rustc_expand/src/parse/tests.rs
+++ b/compiler/rustc_expand/src/parse/tests.rs
@@ -1,4 +1,6 @@
-use crate::tests::{matches_codepattern, string_to_stream, with_error_checking_parse};
+use crate::tests::{
+ matches_codepattern, string_to_stream, with_error_checking_parse, with_expected_parse_error,
+};
use rustc_ast::ptr::P;
use rustc_ast::token::{self, Delimiter, Token};
@@ -51,11 +53,15 @@ fn string_to_item(source_str: String) -> Option<P<ast::Item>> {
with_error_checking_parse(source_str, &sess(), |p| p.parse_item(ForceCollect::No))
}
-#[should_panic]
#[test]
fn bad_path_expr_1() {
+ // This should trigger error: expected identifier, found keyword `return`
create_default_session_globals_then(|| {
- string_to_expr("::abc::def::return".to_string());
+ with_expected_parse_error(
+ "::abc::def::return",
+ "expected identifier, found keyword `return`",
+ |p| p.parse_expr(),
+ );
})
}
@@ -63,9 +69,8 @@ fn bad_path_expr_1() {
#[test]
fn string_to_tts_macro() {
create_default_session_globals_then(|| {
- let tts: Vec<_> =
- string_to_stream("macro_rules! zip (($a)=>($a))".to_string()).into_trees().collect();
- let tts: &[TokenTree] = &tts[..];
+ let stream = string_to_stream("macro_rules! zip (($a)=>($a))".to_string());
+ let tts = &stream.trees().collect::<Vec<_>>()[..];
match tts {
[
@@ -294,9 +299,7 @@ fn ttdelim_span() {
.unwrap();
let ast::ExprKind::MacCall(mac) = &expr.kind else { panic!("not a macro") };
- let tts: Vec<_> = mac.args.tokens.clone().into_trees().collect();
-
- let span = tts.iter().rev().next().unwrap().span();
+ let span = mac.args.tokens.trees().last().unwrap().span();
match sess.source_map().span_to_snippet(span) {
Ok(s) => assert_eq!(&s[..], "{ body }"),
diff --git a/compiler/rustc_expand/src/placeholders.rs b/compiler/rustc_expand/src/placeholders.rs
index e9af688ee..82cac2292 100644
--- a/compiler/rustc_expand/src/placeholders.rs
+++ b/compiler/rustc_expand/src/placeholders.rs
@@ -2,6 +2,7 @@ use crate::expand::{AstFragment, AstFragmentKind};
use rustc_ast as ast;
use rustc_ast::mut_visit::*;
use rustc_ast::ptr::P;
+use rustc_ast::token::Delimiter;
use rustc_data_structures::fx::FxHashMap;
use rustc_span::source_map::DUMMY_SP;
use rustc_span::symbol::Ident;
@@ -18,7 +19,7 @@ pub fn placeholder(
path: ast::Path { span: DUMMY_SP, segments: ThinVec::new(), tokens: None },
args: P(ast::DelimArgs {
dspan: ast::tokenstream::DelimSpan::dummy(),
- delim: ast::MacDelimiter::Parenthesis,
+ delim: Delimiter::Parenthesis,
tokens: ast::tokenstream::TokenStream::new(Vec::new()),
}),
})
diff --git a/compiler/rustc_expand/src/proc_macro.rs b/compiler/rustc_expand/src/proc_macro.rs
index 41b24407f..c617cd76e 100644
--- a/compiler/rustc_expand/src/proc_macro.rs
+++ b/compiler/rustc_expand/src/proc_macro.rs
@@ -95,7 +95,7 @@ impl base::AttrProcMacro for AttrProcMacro {
|e| {
let mut err = ecx.struct_span_err(span, "custom attribute panicked");
if let Some(s) = e.as_str() {
- err.help(format!("message: {}", s));
+ err.help(format!("message: {s}"));
}
err.emit()
},
@@ -148,7 +148,7 @@ impl MultiItemModifier for DeriveProcMacro {
Err(e) => {
let mut err = ecx.struct_span_err(span, "proc-macro derive panicked");
if let Some(s) = e.as_str() {
- err.help(format!("message: {}", s));
+ err.help(format!("message: {s}"));
}
err.emit();
return ExpandResult::Ready(vec![]);
diff --git a/compiler/rustc_expand/src/proc_macro_server.rs b/compiler/rustc_expand/src/proc_macro_server.rs
index ecd231511..2dc9b51a3 100644
--- a/compiler/rustc_expand/src/proc_macro_server.rs
+++ b/compiler/rustc_expand/src/proc_macro_server.rs
@@ -94,10 +94,10 @@ impl FromInternal<(TokenStream, &mut Rustc<'_, '_>)> for Vec<TokenTree<TokenStre
// Estimate the capacity as `stream.len()` rounded up to the next power
// of two to limit the number of required reallocations.
let mut trees = Vec::with_capacity(stream.len().next_power_of_two());
- let mut cursor = stream.into_trees();
+ let mut cursor = stream.trees();
while let Some(tree) = cursor.next() {
- let (Token { kind, span }, joint) = match tree {
+ let (Token { kind, span }, joint) = match tree.clone() {
tokenstream::TokenTree::Delimited(span, delim, tts) => {
let delimiter = pm::Delimiter::from_internal(delim);
trees.push(TokenTree::Group(Group {
@@ -622,7 +622,7 @@ impl server::SourceFile for Rustc<'_, '_> {
impl server::Span for Rustc<'_, '_> {
fn debug(&mut self, span: Self::Span) -> String {
if self.ecx.ecfg.span_debug {
- format!("{:?}", span)
+ format!("{span:?}")
} else {
format!("{:?} bytes({}..{})", span.ctxt(), span.lo().0, span.hi().0)
}
diff --git a/compiler/rustc_expand/src/tests.rs b/compiler/rustc_expand/src/tests.rs
index 8a5e09475..8e3219c13 100644
--- a/compiler/rustc_expand/src/tests.rs
+++ b/compiler/rustc_expand/src/tests.rs
@@ -8,7 +8,8 @@ use rustc_span::{BytePos, Span};
use rustc_data_structures::sync::Lrc;
use rustc_errors::emitter::EmitterWriter;
-use rustc_errors::{Handler, MultiSpan, PResult, TerminalUrl};
+use rustc_errors::{Handler, MultiSpan, PResult};
+use termcolor::WriteColor;
use std::io;
use std::io::prelude::*;
@@ -22,6 +23,23 @@ fn string_to_parser(ps: &ParseSess, source_str: String) -> Parser<'_> {
new_parser_from_source_str(ps, PathBuf::from("bogofile").into(), source_str)
}
+fn create_test_handler() -> (Handler, Lrc<SourceMap>, Arc<Mutex<Vec<u8>>>) {
+ let output = Arc::new(Mutex::new(Vec::new()));
+ let source_map = Lrc::new(SourceMap::new(FilePathMapping::empty()));
+ let fallback_bundle = rustc_errors::fallback_fluent_bundle(
+ vec![crate::DEFAULT_LOCALE_RESOURCE, rustc_parse::DEFAULT_LOCALE_RESOURCE],
+ false,
+ );
+ let emitter = EmitterWriter::new(Box::new(Shared { data: output.clone() }), fallback_bundle)
+ .sm(Some(source_map.clone()))
+ .diagnostic_width(Some(140));
+ let handler = Handler::with_emitter(Box::new(emitter));
+ (handler, source_map, output)
+}
+
+/// Returns the result of parsing the given string via the given callback.
+///
+/// If there are any errors, this will panic.
pub(crate) fn with_error_checking_parse<'a, T, F>(s: String, ps: &'a ParseSess, f: F) -> T
where
F: FnOnce(&mut Parser<'a>) -> PResult<'a, T>,
@@ -32,6 +50,26 @@ where
x
}
+/// Verifies that parsing the given string using the given callback will
+/// generate an error that contains the given text.
+pub(crate) fn with_expected_parse_error<T, F>(source_str: &str, expected_output: &str, f: F)
+where
+ F: for<'a> FnOnce(&mut Parser<'a>) -> PResult<'a, T>,
+{
+ let (handler, source_map, output) = create_test_handler();
+ let ps = ParseSess::with_span_handler(handler, source_map);
+ let mut p = string_to_parser(&ps, source_str.to_string());
+ let result = f(&mut p);
+ assert!(result.is_ok());
+
+ let bytes = output.lock().unwrap();
+ let actual_output = str::from_utf8(&bytes).unwrap();
+ println!("expected output:\n------\n{}------", expected_output);
+ println!("actual output:\n------\n{}------", actual_output);
+
+ assert!(actual_output.contains(expected_output))
+}
+
/// Maps a string to tts, using a made-up filename.
pub(crate) fn string_to_stream(source_str: String) -> TokenStream {
let ps = ParseSess::new(
@@ -118,6 +156,20 @@ pub(crate) struct Shared<T: Write> {
pub data: Arc<Mutex<T>>,
}
+impl<T: Write> WriteColor for Shared<T> {
+ fn supports_color(&self) -> bool {
+ false
+ }
+
+ fn set_color(&mut self, _spec: &termcolor::ColorSpec) -> io::Result<()> {
+ Ok(())
+ }
+
+ fn reset(&mut self) -> io::Result<()> {
+ Ok(())
+ }
+}
+
impl<T: Write> Write for Shared<T> {
fn write(&mut self, buf: &[u8]) -> io::Result<usize> {
self.data.lock().unwrap().write(buf)
@@ -130,13 +182,7 @@ impl<T: Write> Write for Shared<T> {
fn test_harness(file_text: &str, span_labels: Vec<SpanLabel>, expected_output: &str) {
create_default_session_if_not_set_then(|_| {
- let output = Arc::new(Mutex::new(Vec::new()));
-
- let fallback_bundle = rustc_errors::fallback_fluent_bundle(
- vec![crate::DEFAULT_LOCALE_RESOURCE, rustc_parse::DEFAULT_LOCALE_RESOURCE],
- false,
- );
- let source_map = Lrc::new(SourceMap::new(FilePathMapping::empty()));
+ let (handler, source_map, output) = create_test_handler();
source_map.new_source_file(Path::new("test.rs").to_owned().into(), file_text.to_owned());
let primary_span = make_span(&file_text, &span_labels[0].start, &span_labels[0].end);
@@ -148,20 +194,6 @@ fn test_harness(file_text: &str, span_labels: Vec<SpanLabel>, expected_output: &
println!("text: {:?}", source_map.span_to_snippet(span));
}
- let emitter = EmitterWriter::new(
- Box::new(Shared { data: output.clone() }),
- Some(source_map.clone()),
- None,
- fallback_bundle,
- false,
- false,
- false,
- None,
- false,
- false,
- TerminalUrl::No,
- );
- let handler = Handler::with_emitter(true, None, Box::new(emitter));
#[allow(rustc::untranslatable_diagnostic)]
handler.span_err(msp, "foo");
diff --git a/compiler/rustc_feature/src/accepted.rs b/compiler/rustc_feature/src/accepted.rs
index 0170d52e8..afcf30d0b 100644
--- a/compiler/rustc_feature/src/accepted.rs
+++ b/compiler/rustc_feature/src/accepted.rs
@@ -53,6 +53,8 @@ declare_features! (
/// Allows the sysV64 ABI to be specified on all platforms
/// instead of just the platforms on which it is the C ABI.
(accepted, abi_sysv64, "1.24.0", Some(36167), None),
+ /// Allows using the `thiscall` ABI.
+ (accepted, abi_thiscall, "1.73.0", None, None),
/// Allows using ADX intrinsics from `core::arch::{x86, x86_64}`.
(accepted, adx_target_feature, "1.61.0", Some(44839), None),
/// Allows explicit discriminants on non-unit enum variants.
diff --git a/compiler/rustc_feature/src/active.rs b/compiler/rustc_feature/src/active.rs
index 56a2c5eff..f5bc140c0 100644
--- a/compiler/rustc_feature/src/active.rs
+++ b/compiler/rustc_feature/src/active.rs
@@ -16,12 +16,22 @@ macro_rules! set {
}};
}
+#[derive(PartialEq)]
+enum FeatureStatus {
+ Default,
+ Incomplete,
+ Internal,
+}
+
macro_rules! declare_features {
- (__status_to_bool active) => {
- false
+ (__status_to_enum active) => {
+ FeatureStatus::Default
};
- (__status_to_bool incomplete) => {
- true
+ (__status_to_enum incomplete) => {
+ FeatureStatus::Incomplete
+ };
+ (__status_to_enum internal) => {
+ FeatureStatus::Internal
};
($(
$(#[doc = $doc:tt])* ($status:ident, $feature:ident, $ver:expr, $issue:expr, $edition:expr),
@@ -83,7 +93,7 @@ macro_rules! declare_features {
pub fn incomplete(&self, feature: Symbol) -> bool {
match feature {
$(
- sym::$feature => declare_features!(__status_to_bool $status),
+ sym::$feature => declare_features!(__status_to_enum $status) == FeatureStatus::Incomplete,
)*
// accepted and removed features aren't in this file but are never incomplete
_ if self.declared_lang_features.iter().any(|f| f.0 == feature) => false,
@@ -91,6 +101,22 @@ macro_rules! declare_features {
_ => panic!("`{}` was not listed in `declare_features`", feature),
}
}
+
+ /// Some features are internal to the compiler and standard library and should not
+ /// be used in normal projects. We warn the user about these
+ /// to alert them.
+ pub fn internal(&self, feature: Symbol) -> bool {
+ match feature {
+ $(
+ sym::$feature => declare_features!(__status_to_enum $status) == FeatureStatus::Internal,
+ )*
+ // accepted and removed features aren't in this file but are never internal
+ // (a removed feature might have been internal, but it doesn't matter anymore)
+ _ if self.declared_lang_features.iter().any(|f| f.0 == feature) => false,
+ _ if self.declared_lib_features.iter().any(|f| f.0 == feature) => false,
+ _ => panic!("`{}` was not listed in `declare_features`", feature),
+ }
+ }
}
};
}
@@ -130,36 +156,34 @@ declare_features! (
// -------------------------------------------------------------------------
// no-tracking-issue-start
- /// Allows using the `thiscall` ABI.
- (active, abi_thiscall, "1.19.0", None, None),
/// Allows using the `unadjusted` ABI; perma-unstable.
(active, abi_unadjusted, "1.16.0", None, None),
/// Allows using the `vectorcall` ABI.
(active, abi_vectorcall, "1.7.0", None, None),
/// Allows using `#![needs_allocator]`, an implementation detail of `#[global_allocator]`.
- (active, allocator_internals, "1.20.0", None, None),
+ (internal, allocator_internals, "1.20.0", None, None),
/// Allows using `#[allow_internal_unsafe]`. This is an
/// attribute on `macro_rules!` and can't use the attribute handling
/// below (it has to be checked before expansion possibly makes
/// macros disappear).
- (active, allow_internal_unsafe, "1.0.0", None, None),
+ (internal, allow_internal_unsafe, "1.0.0", None, None),
/// Allows using `#[allow_internal_unstable]`. This is an
/// attribute on `macro_rules!` and can't use the attribute handling
/// below (it has to be checked before expansion possibly makes
/// macros disappear).
- (active, allow_internal_unstable, "1.0.0", None, None),
+ (internal, allow_internal_unstable, "1.0.0", None, None),
/// Allows using anonymous lifetimes in argument-position impl-trait.
(active, anonymous_lifetime_in_impl_trait, "1.63.0", None, None),
/// Allows identifying the `compiler_builtins` crate.
- (active, compiler_builtins, "1.13.0", None, None),
+ (internal, compiler_builtins, "1.13.0", None, None),
/// Allows writing custom MIR
- (active, custom_mir, "1.65.0", None, None),
+ (internal, custom_mir, "1.65.0", None, None),
/// Outputs useful `assert!` messages
(active, generic_assert, "1.63.0", None, None),
/// Allows using the `rust-intrinsic`'s "ABI".
- (active, intrinsics, "1.0.0", None, None),
+ (internal, intrinsics, "1.0.0", None, None),
/// Allows using `#[lang = ".."]` attribute for linking items to special compiler logic.
- (active, lang_items, "1.0.0", None, None),
+ (internal, lang_items, "1.0.0", None, None),
/// Allows `#[link(..., cfg(..))]`; perma-unstable per #37406
(active, link_cfg, "1.14.0", None, None),
/// Allows the `multiple_supertrait_upcastable` lint.
@@ -167,22 +191,22 @@ declare_features! (
/// Allow negative trait bounds. This is an internal-only feature for testing the trait solver!
(incomplete, negative_bounds, "1.71.0", None, None),
/// Allows using `#[omit_gdb_pretty_printer_section]`.
- (active, omit_gdb_pretty_printer_section, "1.5.0", None, None),
+ (internal, omit_gdb_pretty_printer_section, "1.5.0", None, None),
/// Allows using `#[prelude_import]` on glob `use` items.
- (active, prelude_import, "1.2.0", None, None),
+ (internal, prelude_import, "1.2.0", None, None),
/// Used to identify crates that contain the profiler runtime.
- (active, profiler_runtime, "1.18.0", None, None),
+ (internal, profiler_runtime, "1.18.0", None, None),
/// Allows using `rustc_*` attributes (RFC 572).
- (active, rustc_attrs, "1.0.0", None, None),
+ (internal, rustc_attrs, "1.0.0", None, None),
/// Allows using the `#[stable]` and `#[unstable]` attributes.
- (active, staged_api, "1.0.0", None, None),
+ (internal, staged_api, "1.0.0", None, None),
/// Added for testing E0705; perma-unstable.
- (active, test_2018_feature, "1.31.0", None, Some(Edition::Edition2018)),
+ (internal, test_2018_feature, "1.31.0", None, Some(Edition::Edition2018)),
/// Added for testing unstable lints; perma-unstable.
- (active, test_unstable_lint, "1.60.0", None, None),
+ (internal, test_unstable_lint, "1.60.0", None, None),
/// Allows non-`unsafe` —and thus, unsound— access to `Pin` constructions.
- /// Marked `incomplete` since perma-unstable and unsound.
- (incomplete, unsafe_pin_internals, "1.60.0", None, None),
+ /// Marked `internal` since perma-unstable and unsound.
+ (internal, unsafe_pin_internals, "1.60.0", None, None),
/// Use for stable + negative coherence and strict coherence depending on trait's
/// rustc_strict_coherence value.
(active, with_negative_coherence, "1.60.0", None, None),
@@ -216,19 +240,19 @@ declare_features! (
/// Allows using the `#[linkage = ".."]` attribute.
(active, linkage, "1.0.0", Some(29603), None),
/// Allows declaring with `#![needs_panic_runtime]` that a panic runtime is needed.
- (active, needs_panic_runtime, "1.10.0", Some(32837), None),
+ (internal, needs_panic_runtime, "1.10.0", Some(32837), None),
/// Allows using `+bundled,+whole-archive` native libs.
(active, packed_bundled_libs, "1.69.0", Some(108081), None),
/// Allows using the `#![panic_runtime]` attribute.
- (active, panic_runtime, "1.10.0", Some(32837), None),
+ (internal, panic_runtime, "1.10.0", Some(32837), None),
/// Allows using `#[rustc_allow_const_fn_unstable]`.
/// This is an attribute on `const fn` for the same
/// purpose as `#[allow_internal_unstable]`.
- (active, rustc_allow_const_fn_unstable, "1.49.0", Some(69399), None),
+ (internal, rustc_allow_const_fn_unstable, "1.49.0", Some(69399), None),
/// Allows using compiler's own crates.
(active, rustc_private, "1.0.0", Some(27812), None),
/// Allows using internal rustdoc features like `doc(keyword)`.
- (active, rustdoc_internals, "1.58.0", Some(90418), None),
+ (internal, rustdoc_internals, "1.58.0", Some(90418), None),
/// Allows using the `rustdoc::missing_doc_code_examples` lint
(active, rustdoc_missing_doc_code_examples, "1.31.0", Some(101730), None),
/// Allows using `#[start]` on a function indicating that it is the program entrypoint.
@@ -258,6 +282,7 @@ declare_features! (
(active, arm_target_feature, "1.27.0", Some(44839), None),
(active, avx512_target_feature, "1.27.0", Some(44839), None),
(active, bpf_target_feature, "1.54.0", Some(44839), None),
+ (active, csky_target_feature, "1.73.0", Some(44839), None),
(active, ermsb_target_feature, "1.49.0", Some(44839), None),
(active, hexagon_target_feature, "1.27.0", Some(44839), None),
(active, mips_target_feature, "1.27.0", Some(44839), None),
@@ -289,6 +314,8 @@ declare_features! (
(active, abi_msp430_interrupt, "1.16.0", Some(38487), None),
/// Allows `extern "ptx-*" fn()`.
(active, abi_ptx, "1.15.0", Some(38788), None),
+ /// Allows `extern "riscv-interrupt-m" fn()` and `extern "riscv-interrupt-s" fn()`.
+ (active, abi_riscv_interrupt, "1.73.0", Some(111889), None),
/// Allows `extern "x86-interrupt" fn()`.
(active, abi_x86_interrupt, "1.17.0", Some(40180), None),
/// Allows additional const parameter types, such as `&'static str` or user defined types
@@ -313,6 +340,8 @@ declare_features! (
(active, async_closure, "1.37.0", Some(62290), None),
/// Allows async functions to be declared, implemented, and used in traits.
(active, async_fn_in_trait, "1.66.0", Some(91611), None),
+ /// Allows `#[track_caller]` on async functions.
+ (active, async_fn_track_caller, "1.73.0", Some(110011), None),
/// Allows builtin # foo() syntax
(active, builtin_syntax, "1.71.0", Some(110680), None),
/// Allows `c"foo"` literals.
@@ -323,6 +352,8 @@ declare_features! (
(active, c_variadic, "1.34.0", Some(44930), None),
/// Allows the use of `#[cfg(overflow_checks)` to check if integer overflow behaviour.
(active, cfg_overflow_checks, "1.71.0", Some(111466), None),
+ /// Provides the relocation model information as cfg entry
+ (active, cfg_relocation_model, "1.73.0", Some(114929), None),
/// Allows the use of `#[cfg(sanitize = "option")]`; set when -Zsanitizer is used.
(active, cfg_sanitize, "1.41.0", Some(39699), None),
/// Allows `cfg(target_abi = "...")`.
@@ -379,6 +410,8 @@ declare_features! (
(active, deprecated_safe, "1.61.0", Some(94978), None),
/// Allows having using `suggestion` in the `#[deprecated]` attribute.
(active, deprecated_suggestion, "1.61.0", Some(94785), None),
+ /// Allows using the `#[diagnostic]` attribute tool namespace
+ (active, diagnostic_namespace, "1.73.0", Some(94785), None),
/// Controls errors in trait implementations.
(active, do_not_recommend, "1.67.0", Some(51992), None),
/// Tells rustdoc to automatically generate `#[doc(cfg(...))]`.
@@ -422,6 +455,8 @@ declare_features! (
(incomplete, generic_associated_types_extended, "1.61.0", Some(95451), None),
/// Allows non-trivial generic constants which have to have wfness manually propagated to callers
(incomplete, generic_const_exprs, "1.56.0", Some(76560), None),
+ /// Allows generic parameters and where-clauses on free & associated const items.
+ (incomplete, generic_const_items, "1.73.0", Some(113521), None),
/// Allows using `..=X` as a patterns in slices.
(active, half_open_range_patterns_in_slices, "1.66.0", Some(67264), None),
/// Allows `if let` guard in match arms.
@@ -445,7 +480,7 @@ declare_features! (
// Allows setting the threshold for the `large_assignments` lint.
(active, large_assignments, "1.52.0", Some(83518), None),
/// Allow to have type alias types for inter-crate use.
- (active, lazy_type_alias, "1.72.0", Some(112792), None),
+ (incomplete, lazy_type_alias, "1.72.0", Some(112792), None),
/// Allows `if/while p && let q = r && ...` chains.
(active, let_chains, "1.37.0", Some(53667), None),
/// Allows using `reason` in lint attributes and the `#[expect(lint)]` lint check.
diff --git a/compiler/rustc_feature/src/builtin_attrs.rs b/compiler/rustc_feature/src/builtin_attrs.rs
index 366000044..2f7cff3ce 100644
--- a/compiler/rustc_feature/src/builtin_attrs.rs
+++ b/compiler/rustc_feature/src/builtin_attrs.rs
@@ -35,6 +35,7 @@ const GATED_CFGS: &[GatedCfg] = &[
(sym::target_has_atomic_load_store, sym::cfg_target_has_atomic, cfg_fn!(cfg_target_has_atomic)),
(sym::sanitize, sym::cfg_sanitize, cfg_fn!(cfg_sanitize)),
(sym::version, sym::cfg_version, cfg_fn!(cfg_version)),
+ (sym::relocation_model, sym::cfg_relocation_model, cfg_fn!(cfg_relocation_model)),
];
/// Find a gated cfg determined by the `pred`icate which is given the cfg's name.
@@ -625,6 +626,12 @@ pub const BUILTIN_ATTRIBUTES: &[BuiltinAttribute] = &[
ErrorFollowing,
INTERNAL_UNSTABLE
),
+ rustc_attr!(
+ rustc_confusables, Normal,
+ template!(List: r#""name1", "name2", ..."#),
+ ErrorFollowing,
+ INTERNAL_UNSTABLE,
+ ),
// Enumerates "identity-like" conversion methods to suggest on type mismatch.
rustc_attr!(
rustc_conversion_suggestion, Normal, template!(Word), WarnFollowing, INTERNAL_UNSTABLE
@@ -812,7 +819,7 @@ pub const BUILTIN_ATTRIBUTES: &[BuiltinAttribute] = &[
TEST, rustc_error, Normal,
template!(Word, List: "delay_span_bug_from_inside_query"), WarnFollowingWordOnly
),
- rustc_attr!(TEST, rustc_dump_user_substs, Normal, template!(Word), WarnFollowing),
+ rustc_attr!(TEST, rustc_dump_user_args, Normal, template!(Word), WarnFollowing),
rustc_attr!(TEST, rustc_evaluate_where_clauses, Normal, template!(Word), WarnFollowing),
rustc_attr!(
TEST, rustc_if_this_changed, Normal, template!(Word, List: "DepNode"), DuplicatesOk
diff --git a/compiler/rustc_feature/src/lib.rs b/compiler/rustc_feature/src/lib.rs
index beb630784..69e331159 100644
--- a/compiler/rustc_feature/src/lib.rs
+++ b/compiler/rustc_feature/src/lib.rs
@@ -108,8 +108,6 @@ impl UnstableFeatures {
fn find_lang_feature_issue(feature: Symbol) -> Option<NonZeroU32> {
if let Some(info) = ACTIVE_FEATURES.iter().find(|t| t.name == feature) {
- // FIXME (#28244): enforce that active features have issue numbers
- // assert!(info.issue.is_some())
info.issue
} else {
// search in Accepted, Removed, or Stable Removed features
diff --git a/compiler/rustc_hir/src/def.rs b/compiler/rustc_hir/src/def.rs
index 30bf8c2ad..642713096 100644
--- a/compiler/rustc_hir/src/def.rs
+++ b/compiler/rustc_hir/src/def.rs
@@ -61,7 +61,9 @@ pub enum DefKind {
Variant,
Trait,
/// Type alias: `type Foo = Bar;`
- TyAlias,
+ TyAlias {
+ lazy: bool,
+ },
/// Type from an `extern` block.
ForeignTy,
/// Trait alias: `trait IntIterator = Iterator<Item = i32>;`
@@ -109,8 +111,6 @@ pub enum DefKind {
InlineConst,
/// Opaque type, aka `impl Trait`.
OpaqueTy,
- /// A return-position `impl Trait` in a trait definition
- ImplTraitPlaceholder,
Field,
/// Lifetime parameter: the `'a` in `struct Foo<'a> { ... }`
LifetimeParam,
@@ -143,8 +143,7 @@ impl DefKind {
DefKind::Ctor(CtorOf::Struct, CtorKind::Fn) => "tuple struct",
DefKind::Ctor(CtorOf::Struct, CtorKind::Const) => "unit struct",
DefKind::OpaqueTy => "opaque type",
- DefKind::ImplTraitPlaceholder => "opaque type in trait",
- DefKind::TyAlias => "type alias",
+ DefKind::TyAlias { .. } => "type alias",
DefKind::TraitAlias => "trait alias",
DefKind::AssocTy => "associated type",
DefKind::Union => "union",
@@ -200,7 +199,7 @@ impl DefKind {
| DefKind::Variant
| DefKind::Trait
| DefKind::OpaqueTy
- | DefKind::TyAlias
+ | DefKind::TyAlias { .. }
| DefKind::ForeignTy
| DefKind::TraitAlias
| DefKind::AssocTy
@@ -227,8 +226,7 @@ impl DefKind {
| DefKind::Use
| DefKind::ForeignMod
| DefKind::GlobalAsm
- | DefKind::Impl { .. }
- | DefKind::ImplTraitPlaceholder => None,
+ | DefKind::Impl { .. } => None,
}
}
@@ -252,7 +250,7 @@ impl DefKind {
| DefKind::Enum
| DefKind::Variant
| DefKind::Trait
- | DefKind::TyAlias
+ | DefKind::TyAlias { .. }
| DefKind::ForeignTy
| DefKind::TraitAlias
| DefKind::AssocTy
@@ -262,7 +260,6 @@ impl DefKind {
| DefKind::Use
| DefKind::ForeignMod
| DefKind::OpaqueTy
- | DefKind::ImplTraitPlaceholder
| DefKind::Impl { .. }
| DefKind::Field
| DefKind::TyParam
diff --git a/compiler/rustc_hir/src/hir.rs b/compiler/rustc_hir/src/hir.rs
index 6c419471d..0bfd62d68 100644
--- a/compiler/rustc_hir/src/hir.rs
+++ b/compiler/rustc_hir/src/hir.rs
@@ -1754,7 +1754,7 @@ impl Expr<'_> {
ExprKind::Unary(UnOp::Deref, _) => true,
- ExprKind::Field(ref base, _) | ExprKind::Index(ref base, _) => {
+ ExprKind::Field(ref base, _) | ExprKind::Index(ref base, _, _) => {
allow_projections_from(base) || base.is_place_expr(allow_projections_from)
}
@@ -1831,7 +1831,7 @@ impl Expr<'_> {
ExprKind::Type(base, _)
| ExprKind::Unary(_, base)
| ExprKind::Field(base, _)
- | ExprKind::Index(base, _)
+ | ExprKind::Index(base, _, _)
| ExprKind::AddrOf(.., base)
| ExprKind::Cast(base, _) => {
// This isn't exactly true for `Index` and all `Unary`, but we are using this
@@ -1843,7 +1843,7 @@ impl Expr<'_> {
.iter()
.map(|field| field.expr)
.chain(init.into_iter())
- .all(|e| e.can_have_side_effects()),
+ .any(|e| e.can_have_side_effects()),
ExprKind::Array(args)
| ExprKind::Tup(args)
@@ -1857,7 +1857,7 @@ impl Expr<'_> {
..
},
args,
- ) => args.iter().all(|arg| arg.can_have_side_effects()),
+ ) => args.iter().any(|arg| arg.can_have_side_effects()),
ExprKind::If(..)
| ExprKind::Match(..)
| ExprKind::MethodCall(..)
@@ -2015,7 +2015,9 @@ pub enum ExprKind<'hir> {
/// Access of a named (e.g., `obj.foo`) or unnamed (e.g., `obj.0`) struct or tuple field.
Field(&'hir Expr<'hir>, Ident),
/// An indexing operation (`foo[2]`).
- Index(&'hir Expr<'hir>, &'hir Expr<'hir>),
+ /// Similar to [`ExprKind::MethodCall`], the final `Span` represents the span of the brackets
+ /// and index.
+ Index(&'hir Expr<'hir>, &'hir Expr<'hir>, Span),
/// Path to a definition, possibly containing lifetime or type parameters.
Path(QPath<'hir>),
@@ -2146,7 +2148,7 @@ pub enum MatchSource {
/// A desugared `for _ in _ { .. }` loop.
ForLoopDesugar,
/// A desugared `?` operator.
- TryDesugar,
+ TryDesugar(HirId),
/// A desugared `<expr>.await`.
AwaitDesugar,
/// A desugared `format_args!()`.
@@ -2160,7 +2162,7 @@ impl MatchSource {
match self {
Normal => "match",
ForLoopDesugar => "for",
- TryDesugar => "?",
+ TryDesugar(_) => "?",
AwaitDesugar => ".await",
FormatArgs => "format_args!()",
}
@@ -2664,10 +2666,19 @@ pub struct OpaqueTy<'hir> {
pub generics: &'hir Generics<'hir>,
pub bounds: GenericBounds<'hir>,
pub origin: OpaqueTyOrigin,
- // Opaques have duplicated lifetimes, this mapping connects the original lifetime with the copy
- // so we can later generate bidirectional outlives predicates to enforce that these lifetimes
- // stay in sync.
- pub lifetime_mapping: &'hir [(Lifetime, LocalDefId)],
+ /// Return-position impl traits (and async futures) must "reify" any late-bound
+ /// lifetimes that are captured from the function signature they originate from.
+ ///
+ /// This is done by generating a new early-bound lifetime parameter local to the
+ /// opaque which is substituted in the function signature with the late-bound
+ /// lifetime.
+ ///
+ /// This mapping associated a captured lifetime (first parameter) with the new
+ /// early-bound lifetime that was generated for the opaque.
+ pub lifetime_mapping: &'hir [(&'hir Lifetime, LocalDefId)],
+ /// Whether the opaque is a return-position impl trait (or async future)
+ /// originating from a trait method. This makes it so that the opaque is
+ /// lowered as an associated type.
pub in_trait: bool,
}
@@ -3004,8 +3015,7 @@ pub struct FieldDef<'hir> {
impl FieldDef<'_> {
// Still necessary in couple of places
pub fn is_positional(&self) -> bool {
- let first = self.ident.as_str().as_bytes()[0];
- (b'0'..=b'9').contains(&first)
+ self.ident.as_str().as_bytes()[0].is_ascii_digit()
}
}
@@ -3122,9 +3132,9 @@ impl<'hir> Item<'hir> {
}
/// Expect an [`ItemKind::Const`] or panic.
#[track_caller]
- pub fn expect_const(&self) -> (&'hir Ty<'hir>, BodyId) {
- let ItemKind::Const(ty, body) = self.kind else { self.expect_failed("a constant") };
- (ty, body)
+ pub fn expect_const(&self) -> (&'hir Ty<'hir>, &'hir Generics<'hir>, BodyId) {
+ let ItemKind::Const(ty, gen, body) = self.kind else { self.expect_failed("a constant") };
+ (ty, gen, body)
}
/// Expect an [`ItemKind::Fn`] or panic.
#[track_caller]
@@ -3150,7 +3160,9 @@ impl<'hir> Item<'hir> {
/// Expect an [`ItemKind::ForeignMod`] or panic.
#[track_caller]
pub fn expect_foreign_mod(&self) -> (Abi, &'hir [ForeignItemRef]) {
- let ItemKind::ForeignMod { abi, items } = self.kind else { self.expect_failed("a foreign module") };
+ let ItemKind::ForeignMod { abi, items } = self.kind else {
+ self.expect_failed("a foreign module")
+ };
(abi, items)
}
@@ -3201,14 +3213,18 @@ impl<'hir> Item<'hir> {
pub fn expect_trait(
self,
) -> (IsAuto, Unsafety, &'hir Generics<'hir>, GenericBounds<'hir>, &'hir [TraitItemRef]) {
- let ItemKind::Trait(is_auto, unsafety, gen, bounds, items) = self.kind else { self.expect_failed("a trait") };
+ let ItemKind::Trait(is_auto, unsafety, gen, bounds, items) = self.kind else {
+ self.expect_failed("a trait")
+ };
(is_auto, unsafety, gen, bounds, items)
}
/// Expect an [`ItemKind::TraitAlias`] or panic.
#[track_caller]
pub fn expect_trait_alias(&self) -> (&'hir Generics<'hir>, GenericBounds<'hir>) {
- let ItemKind::TraitAlias(gen, bounds) = self.kind else { self.expect_failed("a trait alias") };
+ let ItemKind::TraitAlias(gen, bounds) = self.kind else {
+ self.expect_failed("a trait alias")
+ };
(gen, bounds)
}
@@ -3305,7 +3321,7 @@ pub enum ItemKind<'hir> {
/// A `static` item.
Static(&'hir Ty<'hir>, Mutability, BodyId),
/// A `const` item.
- Const(&'hir Ty<'hir>, BodyId),
+ Const(&'hir Ty<'hir>, &'hir Generics<'hir>, BodyId),
/// A function declaration.
Fn(FnSig<'hir>, &'hir Generics<'hir>, BodyId),
/// A MBE macro definition (`macro_rules!` or `macro`).
@@ -3343,7 +3359,6 @@ pub struct Impl<'hir> {
// We do not put a `Span` in `Defaultness` because it breaks foreign crate metadata
// decoding as `Span`s cannot be decoded when a `Session` is not available.
pub defaultness_span: Option<Span>,
- pub constness: Constness,
pub generics: &'hir Generics<'hir>,
/// The trait being implemented, if any.
@@ -3358,6 +3373,7 @@ impl ItemKind<'_> {
Some(match *self {
ItemKind::Fn(_, ref generics, _)
| ItemKind::TyAlias(_, ref generics)
+ | ItemKind::Const(_, ref generics, _)
| ItemKind::OpaqueTy(OpaqueTy { ref generics, .. })
| ItemKind::Enum(_, ref generics)
| ItemKind::Struct(_, ref generics)
@@ -3553,7 +3569,9 @@ impl<'hir> OwnerNode<'hir> {
match self {
OwnerNode::Item(Item {
kind:
- ItemKind::Static(_, _, body) | ItemKind::Const(_, body) | ItemKind::Fn(_, _, body),
+ ItemKind::Static(_, _, body)
+ | ItemKind::Const(_, _, body)
+ | ItemKind::Fn(_, _, body),
..
})
| OwnerNode::TraitItem(TraitItem {
@@ -3756,9 +3774,9 @@ impl<'hir> Node<'hir> {
pub fn ty(self) -> Option<&'hir Ty<'hir>> {
match self {
Node::Item(it) => match it.kind {
- ItemKind::TyAlias(ty, _) | ItemKind::Static(ty, _, _) | ItemKind::Const(ty, _) => {
- Some(ty)
- }
+ ItemKind::TyAlias(ty, _)
+ | ItemKind::Static(ty, _, _)
+ | ItemKind::Const(ty, _, _) => Some(ty),
_ => None,
},
Node::TraitItem(it) => match it.kind {
@@ -3786,7 +3804,9 @@ impl<'hir> Node<'hir> {
match self {
Node::Item(Item {
kind:
- ItemKind::Static(_, _, body) | ItemKind::Const(_, body) | ItemKind::Fn(_, _, body),
+ ItemKind::Static(_, _, body)
+ | ItemKind::Const(_, _, body)
+ | ItemKind::Fn(_, _, body),
..
})
| Node::TraitItem(TraitItem {
diff --git a/compiler/rustc_hir/src/intravisit.rs b/compiler/rustc_hir/src/intravisit.rs
index 347c1f463..172f557f8 100644
--- a/compiler/rustc_hir/src/intravisit.rs
+++ b/compiler/rustc_hir/src/intravisit.rs
@@ -467,11 +467,17 @@ pub fn walk_item<'v, V: Visitor<'v>>(visitor: &mut V, item: &'v Item<'v>) {
ItemKind::Use(ref path, _) => {
visitor.visit_use(path, item.hir_id());
}
- ItemKind::Static(ref typ, _, body) | ItemKind::Const(ref typ, body) => {
+ ItemKind::Static(ref typ, _, body) => {
visitor.visit_id(item.hir_id());
visitor.visit_ty(typ);
visitor.visit_nested_body(body);
}
+ ItemKind::Const(ref typ, ref generics, body) => {
+ visitor.visit_id(item.hir_id());
+ visitor.visit_ty(typ);
+ visitor.visit_generics(generics);
+ visitor.visit_nested_body(body);
+ }
ItemKind::Fn(ref sig, ref generics, body_id) => {
visitor.visit_id(item.hir_id());
visitor.visit_fn(
@@ -516,7 +522,6 @@ pub fn walk_item<'v, V: Visitor<'v>>(visitor: &mut V, item: &'v Item<'v>) {
unsafety: _,
defaultness: _,
polarity: _,
- constness: _,
defaultness_span: _,
ref generics,
ref of_trait,
@@ -774,7 +779,7 @@ pub fn walk_expr<'v, V: Visitor<'v>>(visitor: &mut V, expression: &'v Expr<'v>)
visitor.visit_expr(subexpression);
visitor.visit_ident(ident);
}
- ExprKind::Index(ref main_expression, ref index_expression) => {
+ ExprKind::Index(ref main_expression, ref index_expression, _) => {
visitor.visit_expr(main_expression);
visitor.visit_expr(index_expression)
}
diff --git a/compiler/rustc_hir/src/lib.rs b/compiler/rustc_hir/src/lib.rs
index 616de57dc..34214931a 100644
--- a/compiler/rustc_hir/src/lib.rs
+++ b/compiler/rustc_hir/src/lib.rs
@@ -13,6 +13,7 @@
#![recursion_limit = "256"]
#![deny(rustc::untranslatable_diagnostic)]
#![deny(rustc::diagnostic_outside_of_impl)]
+#![cfg_attr(not(bootstrap), allow(internal_features))]
#[macro_use]
extern crate rustc_macros;
diff --git a/compiler/rustc_hir/src/target.rs b/compiler/rustc_hir/src/target.rs
index 961deac54..644c4d826 100644
--- a/compiler/rustc_hir/src/target.rs
+++ b/compiler/rustc_hir/src/target.rs
@@ -36,7 +36,6 @@ pub enum Target {
GlobalAsm,
TyAlias,
OpaqueTy,
- ImplTraitPlaceholder,
Enum,
Variant,
Struct,
@@ -80,13 +79,7 @@ impl Target {
ItemKind::ForeignMod { .. } => Target::ForeignMod,
ItemKind::GlobalAsm(..) => Target::GlobalAsm,
ItemKind::TyAlias(..) => Target::TyAlias,
- ItemKind::OpaqueTy(ref opaque) => {
- if opaque.in_trait {
- Target::ImplTraitPlaceholder
- } else {
- Target::OpaqueTy
- }
- }
+ ItemKind::OpaqueTy(..) => Target::OpaqueTy,
ItemKind::Enum(..) => Target::Enum,
ItemKind::Struct(..) => Target::Struct,
ItemKind::Union(..) => Target::Union,
@@ -108,9 +101,8 @@ impl Target {
DefKind::Mod => Target::Mod,
DefKind::ForeignMod => Target::ForeignMod,
DefKind::GlobalAsm => Target::GlobalAsm,
- DefKind::TyAlias => Target::TyAlias,
+ DefKind::TyAlias { .. } => Target::TyAlias,
DefKind::OpaqueTy => Target::OpaqueTy,
- DefKind::ImplTraitPlaceholder => Target::ImplTraitPlaceholder,
DefKind::Enum => Target::Enum,
DefKind::Struct => Target::Struct,
DefKind::Union => Target::Union,
@@ -165,7 +157,6 @@ impl Target {
Target::GlobalAsm => "global asm",
Target::TyAlias => "type alias",
Target::OpaqueTy => "opaque type",
- Target::ImplTraitPlaceholder => "opaque type in trait",
Target::Enum => "enum",
Target::Variant => "enum variant",
Target::Struct => "struct",
diff --git a/compiler/rustc_hir_analysis/messages.ftl b/compiler/rustc_hir_analysis/messages.ftl
index 166760166..597cae6ff 100644
--- a/compiler/rustc_hir_analysis/messages.ftl
+++ b/compiler/rustc_hir_analysis/messages.ftl
@@ -1,6 +1,9 @@
hir_analysis_ambiguous_lifetime_bound =
ambiguous lifetime bound, explicit lifetime bound required
+hir_analysis_assoc_bound_on_const = expected associated type, found {$descr}
+ .note = trait bounds not allowed on {$descr}
+
hir_analysis_assoc_type_binding_not_allowed =
associated type bindings are not allowed here
.label = associated type not allowed here
diff --git a/compiler/rustc_hir_analysis/src/astconv/bounds.rs b/compiler/rustc_hir_analysis/src/astconv/bounds.rs
index b13de7701..ba152cd48 100644
--- a/compiler/rustc_hir_analysis/src/astconv/bounds.rs
+++ b/compiler/rustc_hir_analysis/src/astconv/bounds.rs
@@ -13,7 +13,7 @@ use crate::astconv::{
AstConv, ConvertedBinding, ConvertedBindingKind, OnlySelfBounds, PredicateFilter,
};
use crate::bounds::Bounds;
-use crate::errors::{MultipleRelaxedDefaultBounds, ValueOfAssociatedStructAlreadySpecified};
+use crate::errors;
impl<'tcx> dyn AstConv<'tcx> + '_ {
/// Sets `implicitly_sized` to true on `Bounds` if necessary
@@ -35,7 +35,7 @@ impl<'tcx> dyn AstConv<'tcx> + '_ {
if unbound.is_none() {
unbound = Some(&ptr.trait_ref);
} else {
- tcx.sess.emit_err(MultipleRelaxedDefaultBounds { span });
+ tcx.sess.emit_err(errors::MultipleRelaxedDefaultBounds { span });
}
}
}
@@ -326,7 +326,7 @@ impl<'tcx> dyn AstConv<'tcx> + '_ {
dup_bindings
.entry(assoc_item.def_id)
.and_modify(|prev_span| {
- tcx.sess.emit_err(ValueOfAssociatedStructAlreadySpecified {
+ tcx.sess.emit_err(errors::ValueOfAssociatedStructAlreadySpecified {
span: binding.span,
prev_span: *prev_span,
item_name: binding.item_name,
@@ -341,8 +341,8 @@ impl<'tcx> dyn AstConv<'tcx> + '_ {
// If we have an method return type bound, then we need to substitute
// the method's early bound params with suitable late-bound params.
let mut num_bound_vars = candidate.bound_vars().len();
- let substs =
- candidate.skip_binder().substs.extend_to(tcx, assoc_item.def_id, |param, _| {
+ let args =
+ candidate.skip_binder().args.extend_to(tcx, assoc_item.def_id, |param, _| {
let subst = match param.kind {
ty::GenericParamDefKind::Lifetime => ty::Region::new_late_bound(
tcx,
@@ -422,7 +422,7 @@ impl<'tcx> dyn AstConv<'tcx> + '_ {
// params (and trait ref's late bound params). This logic is very similar to
// `Predicate::subst_supertrait`, and it's no coincidence why.
let shifted_output = tcx.shift_bound_var_indices(num_bound_vars, output);
- let subst_output = ty::EarlyBinder::bind(shifted_output).subst(tcx, substs);
+ let subst_output = ty::EarlyBinder::bind(shifted_output).instantiate(tcx, args);
let bound_vars = tcx.late_bound_vars(binding.hir_id);
ty::Binder::bind_with_vars(subst_output, bound_vars)
@@ -438,16 +438,16 @@ impl<'tcx> dyn AstConv<'tcx> + '_ {
infer_args: false,
};
- let substs_trait_ref_and_assoc_item = self.create_substs_for_associated_item(
+ let args_trait_ref_and_assoc_item = self.create_args_for_associated_item(
path_span,
assoc_item.def_id,
&item_segment,
- trait_ref.substs,
+ trait_ref.args,
);
- debug!(?substs_trait_ref_and_assoc_item);
+ debug!(?args_trait_ref_and_assoc_item);
- tcx.mk_alias_ty(assoc_item.def_id, substs_trait_ref_and_assoc_item)
+ tcx.mk_alias_ty(assoc_item.def_id, args_trait_ref_and_assoc_item)
})
};
@@ -488,6 +488,8 @@ impl<'tcx> dyn AstConv<'tcx> + '_ {
}
}
+ let assoc_item_def_id = projection_ty.skip_binder().def_id;
+ let def_kind = tcx.def_kind(assoc_item_def_id);
match binding.kind {
ConvertedBindingKind::Equality(..) if return_type_notation => {
return Err(self.tcx().sess.emit_err(
@@ -499,11 +501,9 @@ impl<'tcx> dyn AstConv<'tcx> + '_ {
// the "projection predicate" for:
//
// `<T as Iterator>::Item = u32`
- let assoc_item_def_id = projection_ty.skip_binder().def_id;
- let def_kind = tcx.def_kind(assoc_item_def_id);
match (def_kind, term.unpack()) {
- (hir::def::DefKind::AssocTy, ty::TermKind::Ty(_))
- | (hir::def::DefKind::AssocConst, ty::TermKind::Const(_)) => (),
+ (DefKind::AssocTy, ty::TermKind::Ty(_))
+ | (DefKind::AssocConst, ty::TermKind::Const(_)) => (),
(_, _) => {
let got = if let Some(_) = term.ty() { "type" } else { "constant" };
let expected = tcx.def_descr(assoc_item_def_id);
@@ -516,7 +516,7 @@ impl<'tcx> dyn AstConv<'tcx> + '_ {
format!("{expected} defined here"),
);
- if let hir::def::DefKind::AssocConst = def_kind
+ if let DefKind::AssocConst = def_kind
&& let Some(t) = term.ty() && (t.is_enum() || t.references_error())
&& tcx.features().associated_const_equality {
err.span_suggestion(
@@ -528,12 +528,12 @@ impl<'tcx> dyn AstConv<'tcx> + '_ {
}
let reported = err.emit();
term = match def_kind {
- hir::def::DefKind::AssocTy => Ty::new_error(tcx, reported).into(),
- hir::def::DefKind::AssocConst => ty::Const::new_error(
+ DefKind::AssocTy => Ty::new_error(tcx, reported).into(),
+ DefKind::AssocConst => ty::Const::new_error(
tcx,
reported,
tcx.type_of(assoc_item_def_id)
- .subst(tcx, projection_ty.skip_binder().substs),
+ .instantiate(tcx, projection_ty.skip_binder().args),
)
.into(),
_ => unreachable!(),
@@ -548,6 +548,15 @@ impl<'tcx> dyn AstConv<'tcx> + '_ {
);
}
ConvertedBindingKind::Constraint(ast_bounds) => {
+ match def_kind {
+ DefKind::AssocTy => {}
+ _ => {
+ return Err(tcx.sess.emit_err(errors::AssocBoundOnConst {
+ span: assoc_ident.span,
+ descr: tcx.def_descr(assoc_item_def_id),
+ }));
+ }
+ }
// "Desugar" a constraint like `T: Iterator<Item: Debug>` to
//
// `<T as Iterator>::Item: Debug`
diff --git a/compiler/rustc_hir_analysis/src/astconv/errors.rs b/compiler/rustc_hir_analysis/src/astconv/errors.rs
index ddf99853b..bd311c98f 100644
--- a/compiler/rustc_hir_analysis/src/astconv/errors.rs
+++ b/compiler/rustc_hir_analysis/src/astconv/errors.rs
@@ -123,7 +123,7 @@ impl<'o, 'tcx> dyn AstConv<'tcx> + 'o {
let all_candidate_names: Vec<_> = all_candidates()
.flat_map(|r| self.tcx().associated_items(r.def_id()).in_definition_order())
.filter_map(|item| {
- if item.opt_rpitit_info.is_none() && item.kind == ty::AssocKind::Type {
+ if !item.is_impl_trait_in_trait() && item.kind == ty::AssocKind::Type {
Some(item.name)
} else {
None
@@ -164,7 +164,7 @@ impl<'o, 'tcx> dyn AstConv<'tcx> + 'o {
self.tcx().associated_items(*trait_def_id).in_definition_order()
})
.filter_map(|item| {
- if item.opt_rpitit_info.is_none() && item.kind == ty::AssocKind::Type {
+ if !item.is_impl_trait_in_trait() && item.kind == ty::AssocKind::Type {
Some(item.name)
} else {
None
@@ -197,7 +197,7 @@ impl<'o, 'tcx> dyn AstConv<'tcx> + 'o {
}
}
- err.span_label(span, format!("associated type `{}` not found", assoc_name));
+ err.span_label(span, format!("associated type `{assoc_name}` not found"));
err.emit()
}
@@ -247,7 +247,7 @@ impl<'o, 'tcx> dyn AstConv<'tcx> + 'o {
"the candidate".into()
};
- let impl_ty = tcx.at(span).type_of(impl_).subst_identity();
+ let impl_ty = tcx.at(span).type_of(impl_).instantiate_identity();
let note = format!("{title} is defined in an impl for the type `{impl_ty}`");
if let Some(span) = note_span {
@@ -295,7 +295,9 @@ impl<'o, 'tcx> dyn AstConv<'tcx> + 'o {
let type_candidates = candidates
.iter()
.take(limit)
- .map(|&(impl_, _)| format!("- `{}`", tcx.at(span).type_of(impl_).subst_identity()))
+ .map(|&(impl_, _)| {
+ format!("- `{}`", tcx.at(span).type_of(impl_).instantiate_identity())
+ })
.collect::<Vec<_>>()
.join("\n");
let additional_types = if candidates.len() > limit {
@@ -356,13 +358,13 @@ impl<'o, 'tcx> dyn AstConv<'tcx> + 'o {
// `<Foo as Iterator>::Item = String`.
let projection_ty = pred.skip_binder().projection_ty;
- let substs_with_infer_self = tcx.mk_substs_from_iter(
+ let args_with_infer_self = tcx.mk_args_from_iter(
std::iter::once(Ty::new_var(tcx, ty::TyVid::from_u32(0)).into())
- .chain(projection_ty.substs.iter().skip(1)),
+ .chain(projection_ty.args.iter().skip(1)),
);
let quiet_projection_ty =
- tcx.mk_alias_ty(projection_ty.def_id, substs_with_infer_self);
+ tcx.mk_alias_ty(projection_ty.def_id, args_with_infer_self);
let term = pred.skip_binder().term;
@@ -391,7 +393,7 @@ impl<'o, 'tcx> dyn AstConv<'tcx> + 'o {
.into_iter()
.map(|error| error.root_obligation.predicate)
.filter_map(format_pred)
- .map(|(p, _)| format!("`{}`", p))
+ .map(|(p, _)| format!("`{p}`"))
.collect();
bounds.sort();
bounds.dedup();
@@ -650,7 +652,7 @@ pub(crate) fn fn_trait_to_string(
}
.map(|s| {
// `s.empty()` checks to see if the type is the unit tuple, if so we don't want a comma
- if parenthesized || s.is_empty() { format!("({})", s) } else { format!("({},)", s) }
+ if parenthesized || s.is_empty() { format!("({s})") } else { format!("({s},)") }
})
.ok(),
_ => None,
diff --git a/compiler/rustc_hir_analysis/src/astconv/generics.rs b/compiler/rustc_hir_analysis/src/astconv/generics.rs
index 39d1d1f2d..1372cc896 100644
--- a/compiler/rustc_hir_analysis/src/astconv/generics.rs
+++ b/compiler/rustc_hir_analysis/src/astconv/generics.rs
@@ -11,7 +11,7 @@ use rustc_hir::def::{DefKind, Res};
use rustc_hir::def_id::DefId;
use rustc_hir::GenericArg;
use rustc_middle::ty::{
- self, subst, subst::SubstsRef, GenericParamDef, GenericParamDefKind, IsSuggestable, Ty, TyCtxt,
+ self, GenericArgsRef, GenericParamDef, GenericParamDefKind, IsSuggestable, Ty, TyCtxt,
};
use rustc_session::lint::builtin::LATE_BOUND_LIFETIME_ARGUMENTS;
use rustc_span::{symbol::kw, Span};
@@ -76,12 +76,12 @@ fn generic_arg_mismatch_err(
Res::Def(DefKind::TyParam, src_def_id) => {
if let Some(param_local_id) = param.def_id.as_local() {
let param_name = tcx.hir().ty_param_name(param_local_id);
- let param_type = tcx.type_of(param.def_id).subst_identity();
+ let param_type = tcx.type_of(param.def_id).instantiate_identity();
if param_type.is_suggestable(tcx, false) {
err.span_suggestion(
tcx.def_span(src_def_id),
"consider changing this type parameter to a const parameter",
- format!("const {}: {}", param_name, param_type),
+ format!("const {param_name}: {param_type}"),
Applicability::MaybeIncorrect,
);
};
@@ -102,7 +102,7 @@ fn generic_arg_mismatch_err(
err.span_suggestion(
arg.span(),
"array type provided where a `usize` was expected, try",
- format!("{{ {} }}", snippet),
+ format!("{{ {snippet} }}"),
Applicability::MaybeIncorrect,
);
}
@@ -130,7 +130,7 @@ fn generic_arg_mismatch_err(
} else {
(arg.descr(), param.kind.descr())
};
- err.note(format!("{} arguments must be provided before {} arguments", first, last));
+ err.note(format!("{first} arguments must be provided before {last} arguments"));
if let Some(help) = help {
err.help(help);
}
@@ -146,14 +146,14 @@ fn generic_arg_mismatch_err(
///
/// To start, we are given the `def_id` of the thing we are
/// creating the substitutions for, and a partial set of
-/// substitutions `parent_substs`. In general, the substitutions
+/// substitutions `parent_args`. In general, the substitutions
/// for an item begin with substitutions for all the "parents" of
/// that item -- e.g., for a method it might include the
/// parameters from the impl.
///
/// Therefore, the method begins by walking down these parents,
/// starting with the outermost parent and proceed inwards until
-/// it reaches `def_id`. For each parent `P`, it will check `parent_substs`
+/// it reaches `def_id`. For each parent `P`, it will check `parent_args`
/// first to see if the parent's substitutions are listed in there. If so,
/// we can append those and move on. Otherwise, it invokes the
/// three callback functions:
@@ -168,15 +168,15 @@ fn generic_arg_mismatch_err(
/// instantiate a `GenericArg`.
/// - `inferred_kind`: if no parameter was provided, and inference is enabled, then
/// creates a suitable inference variable.
-pub fn create_substs_for_generic_args<'tcx, 'a>(
+pub fn create_args_for_parent_generic_args<'tcx, 'a>(
tcx: TyCtxt<'tcx>,
def_id: DefId,
- parent_substs: &[subst::GenericArg<'tcx>],
+ parent_args: &[ty::GenericArg<'tcx>],
has_self: bool,
self_ty: Option<Ty<'tcx>>,
arg_count: &GenericArgCountResult,
ctx: &mut impl CreateSubstsForGenericArgsCtxt<'a, 'tcx>,
-) -> SubstsRef<'tcx> {
+) -> GenericArgsRef<'tcx> {
// Collect the segments of the path; we need to substitute arguments
// for parameters throughout the entire path (wherever there are
// generic parameters).
@@ -191,27 +191,27 @@ pub fn create_substs_for_generic_args<'tcx, 'a>(
// We manually build up the substitution, rather than using convenience
// methods in `subst.rs`, so that we can iterate over the arguments and
// parameters in lock-step linearly, instead of trying to match each pair.
- let mut substs: SmallVec<[subst::GenericArg<'tcx>; 8]> = SmallVec::with_capacity(count);
+ let mut args: SmallVec<[ty::GenericArg<'tcx>; 8]> = SmallVec::with_capacity(count);
// Iterate over each segment of the path.
while let Some((def_id, defs)) = stack.pop() {
let mut params = defs.params.iter().peekable();
// If we have already computed substitutions for parents, we can use those directly.
while let Some(&param) = params.peek() {
- if let Some(&kind) = parent_substs.get(param.index as usize) {
- substs.push(kind);
+ if let Some(&kind) = parent_args.get(param.index as usize) {
+ args.push(kind);
params.next();
} else {
break;
}
}
- // `Self` is handled first, unless it's been handled in `parent_substs`.
+ // `Self` is handled first, unless it's been handled in `parent_args`.
if has_self {
if let Some(&param) = params.peek() {
if param.index == 0 {
if let GenericParamDefKind::Type { .. } = param.kind {
- substs.push(
+ args.push(
self_ty
.map(|ty| ty.into())
.unwrap_or_else(|| ctx.inferred_kind(None, param, true)),
@@ -226,7 +226,7 @@ pub fn create_substs_for_generic_args<'tcx, 'a>(
let (generic_args, infer_args) = ctx.args_for_def_id(def_id);
let args_iter = generic_args.iter().flat_map(|generic_args| generic_args.args.iter());
- let mut args = args_iter.clone().peekable();
+ let mut args_iter = args_iter.clone().peekable();
// If we encounter a type or const when we expect a lifetime, we infer the lifetimes.
// If we later encounter a lifetime, we know that the arguments were provided in the
@@ -239,7 +239,7 @@ pub fn create_substs_for_generic_args<'tcx, 'a>(
// provided, matching them with the generic parameters we expect.
// Mismatches can occur as a result of elided lifetimes, or for malformed
// input. We try to handle both sensibly.
- match (args.peek(), params.peek()) {
+ match (args_iter.peek(), params.peek()) {
(Some(&arg), Some(&param)) => {
match (arg, &param.kind, arg_count.explicit_late_bound) {
(GenericArg::Lifetime(_), GenericParamDefKind::Lifetime, _)
@@ -253,8 +253,8 @@ pub fn create_substs_for_generic_args<'tcx, 'a>(
GenericParamDefKind::Const { .. },
_,
) => {
- substs.push(ctx.provided_kind(param, arg));
- args.next();
+ args.push(ctx.provided_kind(param, arg));
+ args_iter.next();
params.next();
}
(
@@ -264,7 +264,7 @@ pub fn create_substs_for_generic_args<'tcx, 'a>(
) => {
// We expected a lifetime argument, but got a type or const
// argument. That means we're inferring the lifetimes.
- substs.push(ctx.inferred_kind(None, param, infer_args));
+ args.push(ctx.inferred_kind(None, param, infer_args));
force_infer_lt = Some((arg, param));
params.next();
}
@@ -273,7 +273,7 @@ pub fn create_substs_for_generic_args<'tcx, 'a>(
// the presence of explicit late bounds. This is most likely
// due to the presence of the explicit bound so we're just going to
// ignore it.
- args.next();
+ args_iter.next();
}
(_, _, _) => {
// We expected one kind of parameter, but the user provided
@@ -304,7 +304,7 @@ pub fn create_substs_for_generic_args<'tcx, 'a>(
"reorder the arguments: {}: `<{}>`",
param_types_present
.into_iter()
- .map(|ord| format!("{}s", ord))
+ .map(|ord| format!("{ord}s"))
.collect::<Vec<String>>()
.join(", then "),
ordered_params
@@ -327,7 +327,7 @@ pub fn create_substs_for_generic_args<'tcx, 'a>(
// errors. In this case, we're simply going to ignore the argument
// and any following arguments. The rest of the parameters will be
// inferred.
- while args.next().is_some() {}
+ while args_iter.next().is_some() {}
}
}
}
@@ -360,7 +360,7 @@ pub fn create_substs_for_generic_args<'tcx, 'a>(
(None, Some(&param)) => {
// If there are fewer arguments than parameters, it means
// we're inferring the remaining arguments.
- substs.push(ctx.inferred_kind(Some(&substs), param, infer_args));
+ args.push(ctx.inferred_kind(Some(&args), param, infer_args));
params.next();
}
@@ -369,7 +369,7 @@ pub fn create_substs_for_generic_args<'tcx, 'a>(
}
}
- tcx.mk_substs(&substs)
+ tcx.mk_args(&args)
}
/// Checks that the correct number of generic arguments have been provided.
diff --git a/compiler/rustc_hir_analysis/src/astconv/lint.rs b/compiler/rustc_hir_analysis/src/astconv/lint.rs
index 05a3ab63d..1bd1270be 100644
--- a/compiler/rustc_hir_analysis/src/astconv/lint.rs
+++ b/compiler/rustc_hir_analysis/src/astconv/lint.rs
@@ -34,9 +34,9 @@ impl<'o, 'tcx> dyn AstConv<'tcx> + 'o {
let param_name = generics.params.next_type_param_name(None);
let add_generic_sugg = if let Some(span) = generics.span_for_param_suggestion() {
- (span, format!(", {}: {}", param_name, impl_trait_name))
+ (span, format!(", {param_name}: {impl_trait_name}"))
} else {
- (generics.span, format!("<{}: {}>", param_name, impl_trait_name))
+ (generics.span, format!("<{param_name}: {impl_trait_name}>"))
};
diag.multipart_suggestion(
format!("alternatively use a blanket \
@@ -86,7 +86,7 @@ impl<'o, 'tcx> dyn AstConv<'tcx> + 'o {
));
}
- if self_ty.span.edition().rust_2021() {
+ if self_ty.span.edition().at_least_rust_2021() {
let msg = "trait objects must include the `dyn` keyword";
let label = "add `dyn` keyword before this trait";
let mut diag =
diff --git a/compiler/rustc_hir_analysis/src/astconv/mod.rs b/compiler/rustc_hir_analysis/src/astconv/mod.rs
index 3d6984628..668763f9b 100644
--- a/compiler/rustc_hir_analysis/src/astconv/mod.rs
+++ b/compiler/rustc_hir_analysis/src/astconv/mod.rs
@@ -9,7 +9,7 @@ mod lint;
mod object_safety;
use crate::astconv::errors::prohibit_assoc_ty_binding;
-use crate::astconv::generics::{check_generic_arg_count, create_substs_for_generic_args};
+use crate::astconv::generics::{check_generic_arg_count, create_args_for_parent_generic_args};
use crate::bounds::Bounds;
use crate::collect::HirPlaceholderCollector;
use crate::errors::{AmbiguousLifetimeBound, TypeofReservedKeywordUsed};
@@ -29,9 +29,10 @@ use rustc_hir::{GenericArg, GenericArgs, OpaqueTyOrigin};
use rustc_infer::infer::{InferCtxt, InferOk, TyCtxtInferExt};
use rustc_infer::traits::ObligationCause;
use rustc_middle::middle::stability::AllowUnstable;
-use rustc_middle::ty::subst::{self, GenericArgKind, InternalSubsts, SubstsRef};
use rustc_middle::ty::GenericParamDefKind;
-use rustc_middle::ty::{self, Const, IsSuggestable, Ty, TyCtxt, TypeVisitableExt};
+use rustc_middle::ty::{
+ self, Const, GenericArgKind, GenericArgsRef, IsSuggestable, Ty, TyCtxt, TypeVisitableExt,
+};
use rustc_session::lint::builtin::AMBIGUOUS_ASSOCIATED_ITEMS;
use rustc_span::edit_distance::find_best_match_for_name;
use rustc_span::symbol::{kw, Ident, Symbol};
@@ -220,14 +221,14 @@ pub trait CreateSubstsForGenericArgsCtxt<'a, 'tcx> {
&mut self,
param: &ty::GenericParamDef,
arg: &GenericArg<'_>,
- ) -> subst::GenericArg<'tcx>;
+ ) -> ty::GenericArg<'tcx>;
fn inferred_kind(
&mut self,
- substs: Option<&[subst::GenericArg<'tcx>]>,
+ args: Option<&[ty::GenericArg<'tcx>]>,
param: &ty::GenericParamDef,
infer_args: bool,
- ) -> subst::GenericArg<'tcx>;
+ ) -> ty::GenericArg<'tcx>;
}
impl<'o, 'tcx> dyn AstConv<'tcx> + 'o {
@@ -291,13 +292,13 @@ impl<'o, 'tcx> dyn AstConv<'tcx> + 'o {
/// Given a path `path` that refers to an item `I` with the declared generics `decl_generics`,
/// returns an appropriate set of substitutions for this particular reference to `I`.
- pub fn ast_path_substs_for_ty(
+ pub fn ast_path_args_for_ty(
&self,
span: Span,
def_id: DefId,
item_segment: &hir::PathSegment<'_>,
- ) -> SubstsRef<'tcx> {
- let (substs, _) = self.create_substs_for_ast_path(
+ ) -> GenericArgsRef<'tcx> {
+ let (args, _) = self.create_args_for_ast_path(
span,
def_id,
&[],
@@ -311,7 +312,7 @@ impl<'o, 'tcx> dyn AstConv<'tcx> + 'o {
prohibit_assoc_ty_binding(self.tcx(), b.span, Some((item_segment, span)));
}
- substs
+ args
}
/// Given the type/lifetime/const arguments provided to some path (along with
@@ -330,7 +331,7 @@ impl<'o, 'tcx> dyn AstConv<'tcx> + 'o {
/// 2. The path in question is the path to the trait `std::ops::Index`,
/// which will have been resolved to a `def_id`
/// 3. The `generic_args` contains info on the `<...>` contents. The `usize` type
- /// parameters are returned in the `SubstsRef`, the associated type bindings like
+ /// parameters are returned in the `GenericArgsRef`, the associated type bindings like
/// `Output = u32` are returned from `create_assoc_bindings_for_generic_args`.
///
/// Note that the type listing given here is *exactly* what the user provided.
@@ -341,22 +342,22 @@ impl<'o, 'tcx> dyn AstConv<'tcx> + 'o {
/// <Vec<u8> as Iterable<u8>>::Iter::<'a>
/// ```
///
- /// We have the parent substs are the substs for the parent trait:
+ /// We have the parent args are the args for the parent trait:
/// `[Vec<u8>, u8]` and `generic_args` are the arguments for the associated
- /// type itself: `['a]`. The returned `SubstsRef` concatenates these two
+ /// type itself: `['a]`. The returned `GenericArgsRef` concatenates these two
/// lists: `[Vec<u8>, u8, 'a]`.
#[instrument(level = "debug", skip(self, span), ret)]
- fn create_substs_for_ast_path<'a>(
+ fn create_args_for_ast_path<'a>(
&self,
span: Span,
def_id: DefId,
- parent_substs: &[subst::GenericArg<'tcx>],
+ parent_args: &[ty::GenericArg<'tcx>],
seg: &hir::PathSegment<'_>,
generic_args: &'a hir::GenericArgs<'_>,
infer_args: bool,
self_ty: Option<Ty<'tcx>>,
constness: ty::BoundConstness,
- ) -> (SubstsRef<'tcx>, GenericArgCountResult) {
+ ) -> (GenericArgsRef<'tcx>, GenericArgCountResult) {
// If the type is parameterized by this region, then replace this
// region with the current anon region binding (in other words,
// whatever & would get replaced with).
@@ -369,7 +370,7 @@ impl<'o, 'tcx> dyn AstConv<'tcx> + 'o {
if generics.parent.is_some() {
// The parent is a trait so it should have at least one subst
// for the `Self` type.
- assert!(!parent_substs.is_empty())
+ assert!(!parent_args.is_empty())
} else {
// This item (presumably a trait) needs a self-type.
assert!(self_ty.is_some());
@@ -395,7 +396,7 @@ impl<'o, 'tcx> dyn AstConv<'tcx> + 'o {
// here and so associated type bindings will be handled regardless of whether there are any
// non-`Self` generic parameters.
if generics.params.is_empty() {
- return (tcx.mk_substs(parent_substs), arg_count);
+ return (tcx.mk_args(parent_args), arg_count);
}
struct SubstsForAstPathCtxt<'a, 'tcx> {
@@ -421,7 +422,7 @@ impl<'o, 'tcx> dyn AstConv<'tcx> + 'o {
&mut self,
param: &ty::GenericParamDef,
arg: &GenericArg<'_>,
- ) -> subst::GenericArg<'tcx> {
+ ) -> ty::GenericArg<'tcx> {
let tcx = self.astconv.tcx();
let mut handle_ty_args = |has_default, ty: &hir::Ty<'_>| {
@@ -483,10 +484,10 @@ impl<'o, 'tcx> dyn AstConv<'tcx> + 'o {
fn inferred_kind(
&mut self,
- substs: Option<&[subst::GenericArg<'tcx>]>,
+ args: Option<&[ty::GenericArg<'tcx>]>,
param: &ty::GenericParamDef,
infer_args: bool,
- ) -> subst::GenericArg<'tcx> {
+ ) -> ty::GenericArg<'tcx> {
let tcx = self.astconv.tcx();
match param.kind {
GenericParamDefKind::Lifetime => self
@@ -506,15 +507,15 @@ impl<'o, 'tcx> dyn AstConv<'tcx> + 'o {
GenericParamDefKind::Type { has_default, .. } => {
if !infer_args && has_default {
// No type parameter provided, but a default exists.
- let substs = substs.unwrap();
- if substs.iter().any(|arg| match arg.unpack() {
+ let args = args.unwrap();
+ if args.iter().any(|arg| match arg.unpack() {
GenericArgKind::Type(ty) => ty.references_error(),
_ => false,
}) {
// Avoid ICE #86756 when type error recovery goes awry.
return Ty::new_misc_error(tcx).into();
}
- tcx.at(self.span).type_of(param.def_id).subst(tcx, substs).into()
+ tcx.at(self.span).type_of(param.def_id).instantiate(tcx, args).into()
} else if infer_args {
self.astconv.ty_infer(Some(param), self.span).into()
} else {
@@ -531,8 +532,11 @@ impl<'o, 'tcx> dyn AstConv<'tcx> + 'o {
if let Err(guar) = ty.error_reported() {
return ty::Const::new_error(tcx, guar, ty).into();
}
+ // FIXME(effects) see if we should special case effect params here
if !infer_args && has_default {
- tcx.const_param_default(param.def_id).subst(tcx, substs.unwrap()).into()
+ tcx.const_param_default(param.def_id)
+ .instantiate(tcx, args.unwrap())
+ .into()
} else {
if infer_args {
self.astconv.ct_infer(ty, Some(param), self.span).into()
@@ -546,7 +550,7 @@ impl<'o, 'tcx> dyn AstConv<'tcx> + 'o {
}
}
- let mut substs_ctx = SubstsForAstPathCtxt {
+ let mut args_ctx = SubstsForAstPathCtxt {
astconv: self,
def_id,
span,
@@ -554,14 +558,14 @@ impl<'o, 'tcx> dyn AstConv<'tcx> + 'o {
inferred_params: vec![],
infer_args,
};
- let substs = create_substs_for_generic_args(
+ let args = create_args_for_parent_generic_args(
tcx,
def_id,
- parent_substs,
+ parent_args,
self_ty.is_some(),
self_ty,
&arg_count,
- &mut substs_ctx,
+ &mut args_ctx,
);
if let ty::BoundConstness::ConstIfConst = constness
@@ -570,7 +574,7 @@ impl<'o, 'tcx> dyn AstConv<'tcx> + 'o {
tcx.sess.emit_err(crate::errors::ConstBoundForNonConstTrait { span } );
}
- (substs, arg_count)
+ (args, arg_count)
}
fn create_assoc_bindings_for_generic_args<'a>(
@@ -617,21 +621,21 @@ impl<'o, 'tcx> dyn AstConv<'tcx> + 'o {
assoc_bindings
}
- pub fn create_substs_for_associated_item(
+ pub fn create_args_for_associated_item(
&self,
span: Span,
item_def_id: DefId,
item_segment: &hir::PathSegment<'_>,
- parent_substs: SubstsRef<'tcx>,
- ) -> SubstsRef<'tcx> {
+ parent_args: GenericArgsRef<'tcx>,
+ ) -> GenericArgsRef<'tcx> {
debug!(
- "create_substs_for_associated_item(span: {:?}, item_def_id: {:?}, item_segment: {:?}",
+ "create_args_for_associated_item(span: {:?}, item_def_id: {:?}, item_segment: {:?}",
span, item_def_id, item_segment
);
- let (args, _) = self.create_substs_for_ast_path(
+ let (args, _) = self.create_args_for_ast_path(
span,
item_def_id,
- parent_substs,
+ parent_args,
item_segment,
item_segment.args(),
item_segment.infer_args,
@@ -656,7 +660,6 @@ impl<'o, 'tcx> dyn AstConv<'tcx> + 'o {
&self,
trait_ref: &hir::TraitRef<'_>,
self_ty: Ty<'tcx>,
- constness: ty::BoundConstness,
) -> ty::TraitRef<'tcx> {
self.prohibit_generics(trait_ref.path.segments.split_last().unwrap().1.iter(), |_| {});
@@ -666,7 +669,7 @@ impl<'o, 'tcx> dyn AstConv<'tcx> + 'o {
self_ty,
trait_ref.path.segments.last().unwrap(),
true,
- constness,
+ ty::BoundConstness::NotConst,
)
}
@@ -687,7 +690,7 @@ impl<'o, 'tcx> dyn AstConv<'tcx> + 'o {
self_ty: Ty<'tcx>,
only_self_bounds: OnlySelfBounds,
) -> GenericArgCountResult {
- let (substs, arg_count) = self.create_substs_for_ast_path(
+ let (generic_args, arg_count) = self.create_args_for_ast_path(
trait_ref_span,
trait_def_id,
&[],
@@ -704,11 +707,13 @@ impl<'o, 'tcx> dyn AstConv<'tcx> + 'o {
let assoc_bindings = self.create_assoc_bindings_for_generic_args(args);
- let poly_trait_ref =
- ty::Binder::bind_with_vars(ty::TraitRef::new(tcx, trait_def_id, substs), bound_vars);
+ let poly_trait_ref = ty::Binder::bind_with_vars(
+ ty::TraitRef::new(tcx, trait_def_id, generic_args),
+ bound_vars,
+ );
debug!(?poly_trait_ref, ?assoc_bindings);
- bounds.push_trait_bound(tcx, poly_trait_ref, span, constness, polarity);
+ bounds.push_trait_bound(tcx, poly_trait_ref, span, polarity);
let mut dup_bindings = FxHashMap::default();
for binding in &assoc_bindings {
@@ -844,9 +849,10 @@ impl<'o, 'tcx> dyn AstConv<'tcx> + 'o {
self_ty: Ty<'tcx>,
trait_segment: &hir::PathSegment<'_>,
is_impl: bool,
+ // FIXME(effects) move all host param things in astconv to hir lowering
constness: ty::BoundConstness,
) -> ty::TraitRef<'tcx> {
- let (substs, _) = self.create_substs_for_ast_trait_ref(
+ let (generic_args, _) = self.create_args_for_ast_trait_ref(
span,
trait_def_id,
self_ty,
@@ -857,11 +863,11 @@ impl<'o, 'tcx> dyn AstConv<'tcx> + 'o {
if let Some(b) = trait_segment.args().bindings.first() {
prohibit_assoc_ty_binding(self.tcx(), b.span, Some((trait_segment, span)));
}
- ty::TraitRef::new(self.tcx(), trait_def_id, substs)
+ ty::TraitRef::new(self.tcx(), trait_def_id, generic_args)
}
#[instrument(level = "debug", skip(self, span))]
- fn create_substs_for_ast_trait_ref<'a>(
+ fn create_args_for_ast_trait_ref<'a>(
&self,
span: Span,
trait_def_id: DefId,
@@ -869,10 +875,10 @@ impl<'o, 'tcx> dyn AstConv<'tcx> + 'o {
trait_segment: &'a hir::PathSegment<'a>,
is_impl: bool,
constness: ty::BoundConstness,
- ) -> (SubstsRef<'tcx>, GenericArgCountResult) {
+ ) -> (GenericArgsRef<'tcx>, GenericArgCountResult) {
self.complain_about_internal_fn_trait(span, trait_def_id, trait_segment, is_impl);
- self.create_substs_for_ast_path(
+ self.create_args_for_ast_path(
span,
trait_def_id,
&[],
@@ -902,19 +908,21 @@ impl<'o, 'tcx> dyn AstConv<'tcx> + 'o {
did: DefId,
item_segment: &hir::PathSegment<'_>,
) -> Ty<'tcx> {
- let substs = self.ast_path_substs_for_ty(span, did, item_segment);
- let ty = self.tcx().at(span).type_of(did);
+ let tcx = self.tcx();
+ let args = self.ast_path_args_for_ty(span, did, item_segment);
+ let ty = tcx.at(span).type_of(did);
- if matches!(self.tcx().def_kind(did), DefKind::TyAlias)
- && (ty.skip_binder().has_opaque_types() || self.tcx().features().lazy_type_alias)
+ if let DefKind::TyAlias { lazy } = tcx.def_kind(did)
+ && (lazy || ty.skip_binder().has_opaque_types())
{
// Type aliases referring to types that contain opaque types (but aren't just directly
- // referencing a single opaque type) get encoded as a type alias that normalization will
+ // referencing a single opaque type) as well as those defined in crates that have the
+ // feature `lazy_type_alias` enabled get encoded as a type alias that normalization will
// then actually instantiate the where bounds of.
- let alias_ty = self.tcx().mk_alias_ty(did, substs);
- Ty::new_alias(self.tcx(), ty::Weak, alias_ty)
+ let alias_ty = tcx.mk_alias_ty(did, args);
+ Ty::new_alias(tcx, ty::Weak, alias_ty)
} else {
- ty.subst(self.tcx(), substs)
+ ty.instantiate(tcx, args)
}
}
@@ -1123,7 +1131,7 @@ impl<'o, 'tcx> dyn AstConv<'tcx> + 'o {
ty_param_name
)
};
- err.span_label(span, format!("ambiguous associated type `{}`", assoc_name));
+ err.span_label(span, format!("ambiguous associated type `{assoc_name}`"));
let mut where_bounds = vec![];
for bound in bounds {
@@ -1267,9 +1275,12 @@ impl<'o, 'tcx> dyn AstConv<'tcx> + 'o {
"you might have meant to specify type parameters on enum \
`{type_name}`"
);
- let Some(args) = assoc_segment.args else { return; };
+ let Some(args) = assoc_segment.args else {
+ return;
+ };
// Get the span of the generics args *including* the leading `::`.
- let args_span = assoc_segment.ident.span.shrink_to_hi().to(args.span_ext);
+ let args_span =
+ assoc_segment.ident.span.shrink_to_hi().to(args.span_ext);
if tcx.generics_of(adt_def.did()).count() == 0 {
// FIXME(estebank): we could also verify that the arguments being
// work for the `enum`, instead of just looking if it takes *any*.
@@ -1281,49 +1292,56 @@ impl<'o, 'tcx> dyn AstConv<'tcx> + 'o {
);
return;
}
- let Ok(snippet) = tcx.sess.source_map().span_to_snippet(args_span) else {
+ let Ok(snippet) = tcx.sess.source_map().span_to_snippet(args_span)
+ else {
err.note(msg);
return;
};
- let (qself_sugg_span, is_self) = if let hir::TyKind::Path(
- hir::QPath::Resolved(_, path)
- ) = &qself.kind {
- // If the path segment already has type params, we want to overwrite
- // them.
- match &path.segments {
- // `segment` is the previous to last element on the path,
- // which would normally be the `enum` itself, while the last
- // `_` `PathSegment` corresponds to the variant.
- [.., hir::PathSegment {
- ident,
- args,
- res: Res::Def(DefKind::Enum, _),
- ..
- }, _] => (
- // We need to include the `::` in `Type::Variant::<Args>`
- // to point the span to `::<Args>`, not just `<Args>`.
- ident.span.shrink_to_hi().to(args.map_or(
- ident.span.shrink_to_hi(),
- |a| a.span_ext)),
- false,
- ),
- [segment] => (
- // We need to include the `::` in `Type::Variant::<Args>`
- // to point the span to `::<Args>`, not just `<Args>`.
- segment.ident.span.shrink_to_hi().to(segment.args.map_or(
- segment.ident.span.shrink_to_hi(),
- |a| a.span_ext)),
- kw::SelfUpper == segment.ident.name,
- ),
- _ => {
- err.note(msg);
- return;
+ let (qself_sugg_span, is_self) =
+ if let hir::TyKind::Path(hir::QPath::Resolved(_, path)) =
+ &qself.kind
+ {
+ // If the path segment already has type params, we want to overwrite
+ // them.
+ match &path.segments {
+ // `segment` is the previous to last element on the path,
+ // which would normally be the `enum` itself, while the last
+ // `_` `PathSegment` corresponds to the variant.
+ [
+ ..,
+ hir::PathSegment {
+ ident,
+ args,
+ res: Res::Def(DefKind::Enum, _),
+ ..
+ },
+ _,
+ ] => (
+ // We need to include the `::` in `Type::Variant::<Args>`
+ // to point the span to `::<Args>`, not just `<Args>`.
+ ident.span.shrink_to_hi().to(args
+ .map_or(ident.span.shrink_to_hi(), |a| a.span_ext)),
+ false,
+ ),
+ [segment] => (
+ // We need to include the `::` in `Type::Variant::<Args>`
+ // to point the span to `::<Args>`, not just `<Args>`.
+ segment.ident.span.shrink_to_hi().to(segment
+ .args
+ .map_or(segment.ident.span.shrink_to_hi(), |a| {
+ a.span_ext
+ })),
+ kw::SelfUpper == segment.ident.name,
+ ),
+ _ => {
+ err.note(msg);
+ return;
+ }
}
- }
- } else {
- err.note(msg);
- return;
- };
+ } else {
+ err.note(msg);
+ return;
+ };
let suggestion = vec![
if is_self {
// Account for people writing `Self::Variant::<Args>`, where
@@ -1373,7 +1391,12 @@ impl<'o, 'tcx> dyn AstConv<'tcx> + 'o {
};
self.one_bound_for_assoc_type(
- || traits::supertraits(tcx, ty::Binder::dummy(trait_ref.subst_identity())),
+ || {
+ traits::supertraits(
+ tcx,
+ ty::Binder::dummy(trait_ref.instantiate_identity()),
+ )
+ },
kw::SelfUpper,
assoc_ident,
span,
@@ -1387,7 +1410,7 @@ impl<'o, 'tcx> dyn AstConv<'tcx> + 'o {
_ => {
let reported = if variant_resolution.is_some() {
// Variant in type position
- let msg = format!("expected type, found variant `{}`", assoc_ident);
+ let msg = format!("expected type, found variant `{assoc_ident}`");
tcx.sess.span_err(span, msg)
} else if qself_ty.is_enum() {
let mut err = struct_span_err!(
@@ -1418,12 +1441,12 @@ impl<'o, 'tcx> dyn AstConv<'tcx> + 'o {
} else {
err.span_label(
assoc_ident.span,
- format!("variant not found in `{}`", qself_ty),
+ format!("variant not found in `{qself_ty}`"),
);
}
if let Some(sp) = tcx.hir().span_if_local(adt_def.did()) {
- err.span_label(sp, format!("variant `{}` not found here", assoc_ident));
+ err.span_label(sp, format!("variant `{assoc_ident}` not found here"));
}
err.emit()
@@ -1442,7 +1465,7 @@ impl<'o, 'tcx> dyn AstConv<'tcx> + 'o {
let traits: Vec<_> =
self.probe_traits_that_match_assoc_ty(qself_ty, assoc_ident);
- // Don't print `TyErr` to the user.
+ // Don't print `ty::Error` to the user.
self.report_ambiguous_associated_type(
span,
&[qself_ty.to_string()],
@@ -1455,7 +1478,8 @@ impl<'o, 'tcx> dyn AstConv<'tcx> + 'o {
};
let trait_did = bound.def_id();
- let Some(assoc_ty_did) = self.lookup_assoc_ty(assoc_ident, hir_ref_id, span, trait_did) else {
+ let Some(assoc_ty_did) = self.lookup_assoc_ty(assoc_ident, hir_ref_id, span, trait_did)
+ else {
// Assume that if it's not matched, there must be a const defined with the same name
// but it was used in a type position.
let msg = format!("found associated const `{assoc_ident}` when type was expected");
@@ -1609,8 +1633,8 @@ impl<'o, 'tcx> dyn AstConv<'tcx> + 'o {
let ocx = ObligationCtxt::new(&infcx);
ocx.register_obligations(obligations.clone());
- let impl_substs = infcx.fresh_substs_for_item(span, impl_);
- let impl_ty = tcx.type_of(impl_).subst(tcx, impl_substs);
+ let impl_args = infcx.fresh_args_for_item(span, impl_);
+ let impl_ty = tcx.type_of(impl_).instantiate(tcx, impl_args);
let impl_ty = ocx.normalize(&cause, param_env, impl_ty);
// Check that the self types can be related.
@@ -1622,7 +1646,7 @@ impl<'o, 'tcx> dyn AstConv<'tcx> + 'o {
}
// Check whether the impl imposes obligations we have to worry about.
- let impl_bounds = tcx.predicates_of(impl_).instantiate(tcx, impl_substs);
+ let impl_bounds = tcx.predicates_of(impl_).instantiate(tcx, impl_args);
let impl_bounds = ocx.normalize(&cause, param_env, impl_bounds);
let impl_obligations = traits::predicates_for_generics(
|_, _| cause.clone(),
@@ -1654,18 +1678,17 @@ impl<'o, 'tcx> dyn AstConv<'tcx> + 'o {
if let Some((impl_, (assoc_item, def_scope))) = applicable_candidates.pop() {
self.check_assoc_ty(assoc_item, name, def_scope, block, span);
- // FIXME(fmease): Currently creating throwaway `parent_substs` to please
- // `create_substs_for_associated_item`. Modify the latter instead (or sth. similar) to
- // not require the parent substs logic.
- let parent_substs = InternalSubsts::identity_for_item(tcx, impl_);
- let substs =
- self.create_substs_for_associated_item(span, assoc_item, segment, parent_substs);
- let substs = tcx.mk_substs_from_iter(
+ // FIXME(fmease): Currently creating throwaway `parent_args` to please
+ // `create_args_for_associated_item`. Modify the latter instead (or sth. similar) to
+ // not require the parent args logic.
+ let parent_args = ty::GenericArgs::identity_for_item(tcx, impl_);
+ let args = self.create_args_for_associated_item(span, assoc_item, segment, parent_args);
+ let args = tcx.mk_args_from_iter(
std::iter::once(ty::GenericArg::from(self_ty))
- .chain(substs.into_iter().skip(parent_substs.len())),
+ .chain(args.into_iter().skip(parent_args.len())),
);
- let ty = Ty::new_alias(tcx, ty::Inherent, tcx.mk_alias_ty(assoc_item, substs));
+ let ty = Ty::new_alias(tcx, ty::Inherent, tcx.mk_alias_ty(assoc_item, args));
return Ok(Some((ty, assoc_item)));
}
@@ -1769,9 +1792,9 @@ impl<'o, 'tcx> dyn AstConv<'tcx> + 'o {
.any(|impl_def_id| {
let trait_ref = tcx.impl_trait_ref(impl_def_id);
trait_ref.is_some_and(|trait_ref| {
- let impl_ = trait_ref.subst(
+ let impl_ = trait_ref.instantiate(
tcx,
- infcx.fresh_substs_for_item(DUMMY_SP, impl_def_id),
+ infcx.fresh_args_for_item(DUMMY_SP, impl_def_id),
);
let value = tcx.fold_regions(qself_ty, |_, _| tcx.lifetimes.re_erased);
// FIXME: Don't bother dealing with non-lifetime binders here...
@@ -1814,7 +1837,9 @@ impl<'o, 'tcx> dyn AstConv<'tcx> + 'o {
debug!("qpath_to_ty: self.item_def_id()={:?}", def_id);
- let parent_def_id = def_id.as_local().map(|def_id| tcx.hir().local_def_id_to_hir_id(def_id))
+ let parent_def_id = def_id
+ .as_local()
+ .map(|def_id| tcx.hir().local_def_id_to_hir_id(def_id))
.map(|hir_id| tcx.hir().get_parent_item(hir_id).to_def_id());
debug!("qpath_to_ty: parent_def_id={:?}", parent_def_id);
@@ -1835,7 +1860,7 @@ impl<'o, 'tcx> dyn AstConv<'tcx> + 'o {
&& tcx.impl_polarity(impl_def_id) != ty::ImplPolarity::Negative
})
.filter_map(|impl_def_id| tcx.impl_trait_ref(impl_def_id))
- .map(|impl_| impl_.subst_identity().self_ty())
+ .map(|impl_| impl_.instantiate_identity().self_ty())
// We don't care about blanket impls.
.filter(|self_ty| !self_ty.has_non_region_param())
.map(|self_ty| tcx.erase_regions(self_ty).to_string())
@@ -1850,7 +1875,7 @@ impl<'o, 'tcx> dyn AstConv<'tcx> + 'o {
&[path_str],
item_segment.ident.name,
);
- return Ty::new_error(tcx,reported)
+ return Ty::new_error(tcx, reported);
};
debug!("qpath_to_ty: self_type={:?}", self_ty);
@@ -1864,16 +1889,12 @@ impl<'o, 'tcx> dyn AstConv<'tcx> + 'o {
constness,
);
- let item_substs = self.create_substs_for_associated_item(
- span,
- item_def_id,
- item_segment,
- trait_ref.substs,
- );
+ let item_args =
+ self.create_args_for_associated_item(span, item_def_id, item_segment, trait_ref.args);
debug!("qpath_to_ty: trait_ref={:?}", trait_ref);
- Ty::new_projection(tcx, item_def_id, item_substs)
+ Ty::new_projection(tcx, item_def_id, item_args)
}
pub fn prohibit_generics<'a>(
@@ -2128,19 +2149,19 @@ impl<'o, 'tcx> dyn AstConv<'tcx> + 'o {
let span = path.span;
match path.res {
- Res::Def(DefKind::OpaqueTy | DefKind::ImplTraitPlaceholder, did) => {
+ Res::Def(DefKind::OpaqueTy, did) => {
// Check for desugared `impl Trait`.
assert!(tcx.is_type_alias_impl_trait(did));
let item_segment = path.segments.split_last().unwrap();
self.prohibit_generics(item_segment.1.iter(), |err| {
err.note("`impl Trait` types can't have type parameters");
});
- let substs = self.ast_path_substs_for_ty(span, did, item_segment.0);
- Ty::new_opaque(tcx, did, substs)
+ let args = self.ast_path_args_for_ty(span, did, item_segment.0);
+ Ty::new_opaque(tcx, did, args)
}
Res::Def(
DefKind::Enum
- | DefKind::TyAlias
+ | DefKind::TyAlias { .. }
| DefKind::Struct
| DefKind::Union
| DefKind::ForeignTy,
@@ -2220,7 +2241,7 @@ impl<'o, 'tcx> dyn AstConv<'tcx> + 'o {
// `Self` in impl (we know the concrete type).
assert_eq!(opt_self_ty, None);
// Try to evaluate any array length constants.
- let ty = tcx.at(span).type_of(def_id).subst_identity();
+ let ty = tcx.at(span).type_of(def_id).instantiate_identity();
let span_of_impl = tcx.span_of_impl(def_id);
self.prohibit_generics(path.segments.iter(), |err| {
let def_id = match *ty.kind() {
@@ -2439,7 +2460,7 @@ impl<'o, 'tcx> dyn AstConv<'tcx> + 'o {
// If this is an RPITIT and we are using the new RPITIT lowering scheme, we
// generate the def_id of an associated type for the trait and return as
// type a projection.
- let def_id = if in_trait && tcx.lower_impl_trait_in_trait_to_assoc_ty() {
+ let def_id = if in_trait {
tcx.associated_type_for_impl_trait_in_trait(local_def_id).to_def_id()
} else {
local_def_id.to_def_id()
@@ -2458,7 +2479,7 @@ impl<'o, 'tcx> dyn AstConv<'tcx> + 'o {
}
&hir::TyKind::Path(hir::QPath::LangItem(lang_item, span, _)) => {
let def_id = tcx.require_lang_item(lang_item, Some(span));
- let (substs, _) = self.create_substs_for_ast_path(
+ let (args, _) = self.create_args_for_ast_path(
span,
def_id,
&[],
@@ -2468,7 +2489,7 @@ impl<'o, 'tcx> dyn AstConv<'tcx> + 'o {
None,
ty::BoundConstness::NotConst,
);
- tcx.at(span).type_of(def_id).subst(tcx, substs)
+ tcx.at(span).type_of(def_id).instantiate(tcx, args)
}
hir::TyKind::Array(ty, length) => {
let length = match length {
@@ -2481,7 +2502,7 @@ impl<'o, 'tcx> dyn AstConv<'tcx> + 'o {
Ty::new_array_with_const_len(tcx, self.ast_ty_to_ty(ty), length)
}
hir::TyKind::Typeof(e) => {
- let ty_erased = tcx.type_of(e.def_id).subst_identity();
+ let ty_erased = tcx.type_of(e.def_id).instantiate_identity();
let ty = tcx.fold_regions(ty_erased, |r, _| {
if r.is_erased() { tcx.lifetimes.re_static } else { r }
});
@@ -2523,7 +2544,7 @@ impl<'o, 'tcx> dyn AstConv<'tcx> + 'o {
let generics = tcx.generics_of(def_id);
debug!("impl_trait_ty_to_ty: generics={:?}", generics);
- let substs = InternalSubsts::for_item(tcx, def_id, |param, _| {
+ let args = ty::GenericArgs::for_item(tcx, def_id, |param, _| {
// We use `generics.count() - lifetimes.len()` here instead of `generics.parent_count`
// since return-position impl trait in trait squashes all of the generics from its source fn
// into its own generics, so the opaque's "own" params isn't always just lifetimes.
@@ -2537,12 +2558,12 @@ impl<'o, 'tcx> dyn AstConv<'tcx> + 'o {
tcx.mk_param_from_def(param)
}
});
- debug!("impl_trait_ty_to_ty: substs={:?}", substs);
+ debug!("impl_trait_ty_to_ty: args={:?}", args);
if in_trait {
- Ty::new_projection(tcx, def_id, substs)
+ Ty::new_projection(tcx, def_id, args)
} else {
- Ty::new_opaque(tcx, def_id, substs)
+ Ty::new_opaque(tcx, def_id, args)
}
}
@@ -2688,14 +2709,14 @@ impl<'o, 'tcx> dyn AstConv<'tcx> + 'o {
let hir = tcx.hir();
let hir::Node::ImplItem(hir::ImplItem { kind: hir::ImplItemKind::Fn(..), ident, .. }) =
- hir.get(fn_hir_id) else { return None };
+ hir.get(fn_hir_id)
+ else {
+ return None;
+ };
let i = hir.get_parent(fn_hir_id).expect_item().expect_impl();
- let trait_ref = self.instantiate_mono_trait_ref(
- i.of_trait.as_ref()?,
- self.ast_ty_to_ty(i.self_ty),
- ty::BoundConstness::NotConst,
- );
+ let trait_ref =
+ self.instantiate_mono_trait_ref(i.of_trait.as_ref()?, self.ast_ty_to_ty(i.self_ty));
let assoc = tcx.associated_items(trait_ref.def_id).find_by_name_and_kind(
tcx,
@@ -2704,9 +2725,9 @@ impl<'o, 'tcx> dyn AstConv<'tcx> + 'o {
trait_ref.def_id,
)?;
- let fn_sig = tcx.fn_sig(assoc.def_id).subst(
+ let fn_sig = tcx.fn_sig(assoc.def_id).instantiate(
tcx,
- trait_ref.substs.extend_to(tcx, assoc.def_id, |param, _| tcx.mk_param_from_def(param)),
+ trait_ref.args.extend_to(tcx, assoc.def_id, |param, _| tcx.mk_param_from_def(param)),
);
let fn_sig = tcx.liberate_late_bound_regions(fn_hir_id.expect_owner().to_def_id(), fn_sig);
@@ -2729,7 +2750,7 @@ impl<'o, 'tcx> dyn AstConv<'tcx> + 'o {
ty::BrNamed(_, kw::UnderscoreLifetime) | ty::BrAnon(..) | ty::BrEnv => {
"an anonymous lifetime".to_string()
}
- ty::BrNamed(_, name) => format!("lifetime `{}`", name),
+ ty::BrNamed(_, name) => format!("lifetime `{name}`"),
};
let mut err = generate_err(&br_name);
diff --git a/compiler/rustc_hir_analysis/src/astconv/object_safety.rs b/compiler/rustc_hir_analysis/src/astconv/object_safety.rs
index 9227ee934..30c2ab8f5 100644
--- a/compiler/rustc_hir_analysis/src/astconv/object_safety.rs
+++ b/compiler/rustc_hir_analysis/src/astconv/object_safety.rs
@@ -62,11 +62,7 @@ impl<'o, 'tcx> dyn AstConv<'tcx> + 'o {
match bound_pred.skip_binder() {
ty::ClauseKind::Trait(trait_pred) => {
assert_eq!(trait_pred.polarity, ty::ImplPolarity::Positive);
- trait_bounds.push((
- bound_pred.rebind(trait_pred.trait_ref),
- span,
- trait_pred.constness,
- ));
+ trait_bounds.push((bound_pred.rebind(trait_pred.trait_ref), span));
}
ty::ClauseKind::Projection(proj) => {
projection_bounds.push((bound_pred.rebind(proj), span));
@@ -86,7 +82,7 @@ impl<'o, 'tcx> dyn AstConv<'tcx> + 'o {
// Expand trait aliases recursively and check that only one regular (non-auto) trait
// is used and no 'maybe' bounds are used.
let expanded_traits =
- traits::expand_trait_aliases(tcx, trait_bounds.iter().map(|&(a, b, _)| (a, b)));
+ traits::expand_trait_aliases(tcx, trait_bounds.iter().map(|&(a, b)| (a, b)));
let (mut auto_traits, regular_traits): (Vec<_>, Vec<_>) = expanded_traits
.filter(|i| i.trait_ref().self_ty().skip_binder() == dummy_self)
@@ -126,7 +122,7 @@ impl<'o, 'tcx> dyn AstConv<'tcx> + 'o {
if regular_traits.is_empty() && auto_traits.is_empty() {
let trait_alias_span = trait_bounds
.iter()
- .map(|&(trait_ref, _, _)| trait_ref.def_id())
+ .map(|&(trait_ref, _)| trait_ref.def_id())
.find(|&trait_ref| tcx.is_trait_alias(trait_ref))
.map(|trait_ref| tcx.def_span(trait_ref));
let reported =
@@ -157,10 +153,9 @@ impl<'o, 'tcx> dyn AstConv<'tcx> + 'o {
let regular_traits_refs_spans = trait_bounds
.into_iter()
- .filter(|(trait_ref, _, _)| !tcx.trait_is_auto(trait_ref.def_id()));
+ .filter(|(trait_ref, _)| !tcx.trait_is_auto(trait_ref.def_id()));
- for (base_trait_ref, span, constness) in regular_traits_refs_spans {
- assert_eq!(constness, ty::BoundConstness::NotConst);
+ for (base_trait_ref, span) in regular_traits_refs_spans {
let base_pred: ty::Predicate<'tcx> = base_trait_ref.to_predicate(tcx);
for pred in traits::elaborate(tcx, [base_pred]) {
debug!("conv_object_ty_poly_trait_ref: observing object predicate `{:?}`", pred);
@@ -173,7 +168,7 @@ impl<'o, 'tcx> dyn AstConv<'tcx> + 'o {
tcx.associated_items(pred.def_id())
.in_definition_order()
.filter(|item| item.kind == ty::AssocKind::Type)
- .filter(|item| item.opt_rpitit_info.is_none())
+ .filter(|item| !item.is_impl_trait_in_trait())
.map(|item| item.def_id),
);
}
@@ -262,8 +257,8 @@ impl<'o, 'tcx> dyn AstConv<'tcx> + 'o {
let mut missing_type_params = vec![];
let mut references_self = false;
let generics = tcx.generics_of(trait_ref.def_id);
- let substs: Vec<_> = trait_ref
- .substs
+ let args: Vec<_> = trait_ref
+ .args
.iter()
.enumerate()
.skip(1) // Remove `Self` for `ExistentialPredicate`.
@@ -279,7 +274,7 @@ impl<'o, 'tcx> dyn AstConv<'tcx> + 'o {
arg
})
.collect();
- let substs = tcx.mk_substs(&substs);
+ let args = tcx.mk_args(&args);
let span = i.bottom().1;
let empty_generic_args = hir_trait_bounds.iter().any(|hir_bound| {
@@ -310,7 +305,7 @@ impl<'o, 'tcx> dyn AstConv<'tcx> + 'o {
err.emit();
}
- ty::ExistentialTraitRef { def_id: trait_ref.def_id, substs }
+ ty::ExistentialTraitRef { def_id: trait_ref.def_id, args }
})
});
@@ -325,7 +320,7 @@ impl<'o, 'tcx> dyn AstConv<'tcx> + 'o {
// Like for trait refs, verify that `dummy_self` did not leak inside default type
// parameters.
- let references_self = b.projection_ty.substs.iter().skip(1).any(|arg| {
+ let references_self = b.projection_ty.args.iter().skip(1).any(|arg| {
if arg.walk().any(|arg| arg == dummy_self.into()) {
return true;
}
@@ -336,9 +331,9 @@ impl<'o, 'tcx> dyn AstConv<'tcx> + 'o {
span,
"trait object projection bounds reference `Self`",
);
- let substs: Vec<_> = b
+ let args: Vec<_> = b
.projection_ty
- .substs
+ .args
.iter()
.map(|arg| {
if arg.walk().any(|arg| arg == dummy_self.into()) {
@@ -347,7 +342,7 @@ impl<'o, 'tcx> dyn AstConv<'tcx> + 'o {
arg
})
.collect();
- b.projection_ty.substs = tcx.mk_substs(&substs);
+ b.projection_ty.args = tcx.mk_args(&args);
}
ty::ExistentialProjection::erase_self_ty(tcx, b)
diff --git a/compiler/rustc_hir_analysis/src/autoderef.rs b/compiler/rustc_hir_analysis/src/autoderef.rs
index c07ac35cb..39db29504 100644
--- a/compiler/rustc_hir_analysis/src/autoderef.rs
+++ b/compiler/rustc_hir_analysis/src/autoderef.rs
@@ -74,7 +74,7 @@ impl<'a, 'tcx> Iterator for Autoderef<'a, 'tcx> {
// we have some type like `&<Ty as Trait>::Assoc`, since users of
// autoderef expect this type to have been structurally normalized.
if self.infcx.next_trait_solver()
- && let ty::Alias(ty::Projection, _) = ty.kind()
+ && let ty::Alias(ty::Projection | ty::Inherent | ty::Weak, _) = ty.kind()
{
let (normalized_ty, obligations) = self.structurally_normalize(ty)?;
self.state.obligations.extend(obligations);
diff --git a/compiler/rustc_hir_analysis/src/bounds.rs b/compiler/rustc_hir_analysis/src/bounds.rs
index 531100e1f..1d9ae2b9c 100644
--- a/compiler/rustc_hir_analysis/src/bounds.rs
+++ b/compiler/rustc_hir_analysis/src/bounds.rs
@@ -42,13 +42,12 @@ impl<'tcx> Bounds<'tcx> {
tcx: TyCtxt<'tcx>,
trait_ref: ty::PolyTraitRef<'tcx>,
span: Span,
- constness: ty::BoundConstness,
polarity: ty::ImplPolarity,
) {
self.clauses.push((
trait_ref
.map_bound(|trait_ref| {
- ty::ClauseKind::Trait(ty::TraitPredicate { trait_ref, constness, polarity })
+ ty::ClauseKind::Trait(ty::TraitPredicate { trait_ref, polarity })
})
.to_predicate(tcx),
span,
diff --git a/compiler/rustc_hir_analysis/src/check/check.rs b/compiler/rustc_hir_analysis/src/check/check.rs
index 120545c8e..2c7788498 100644
--- a/compiler/rustc_hir_analysis/src/check/check.rs
+++ b/compiler/rustc_hir_analysis/src/check/check.rs
@@ -8,7 +8,7 @@ use rustc_attr as attr;
use rustc_errors::{Applicability, ErrorGuaranteed, MultiSpan};
use rustc_hir as hir;
use rustc_hir::def::{CtorKind, DefKind, Res};
-use rustc_hir::def_id::{DefId, LocalDefId};
+use rustc_hir::def_id::{DefId, LocalDefId, LocalModDefId};
use rustc_hir::intravisit::Visitor;
use rustc_hir::{ItemKind, Node, PathSegment};
use rustc_infer::infer::opaque_types::ConstrainOpaqueTypeRegionVisitor;
@@ -19,11 +19,13 @@ use rustc_lint_defs::builtin::REPR_TRANSPARENT_EXTERNAL_PRIVATE_FIELDS;
use rustc_middle::hir::nested_filter;
use rustc_middle::middle::stability::EvalResult;
use rustc_middle::traits::DefiningAnchor;
+use rustc_middle::ty::fold::BottomUpFolder;
use rustc_middle::ty::layout::{LayoutError, MAX_SIMD_LANES};
-use rustc_middle::ty::subst::GenericArgKind;
use rustc_middle::ty::util::{Discr, IntTypeExt};
+use rustc_middle::ty::GenericArgKind;
use rustc_middle::ty::{
- self, AdtDef, ParamEnv, Ty, TyCtxt, TypeSuperVisitable, TypeVisitable, TypeVisitableExt,
+ self, AdtDef, ParamEnv, RegionKind, Ty, TyCtxt, TypeSuperVisitable, TypeVisitable,
+ TypeVisitableExt,
};
use rustc_session::lint::builtin::{UNINHABITED_STATIC, UNSUPPORTED_CALLING_CONVENTIONS};
use rustc_span::symbol::sym;
@@ -34,6 +36,7 @@ use rustc_trait_selection::traits::error_reporting::on_unimplemented::OnUnimplem
use rustc_trait_selection::traits::error_reporting::TypeErrCtxtExt as _;
use rustc_trait_selection::traits::outlives_bounds::InferCtxtExt as _;
use rustc_trait_selection::traits::{self, ObligationCtxt, TraitEngine, TraitEngineExt as _};
+use rustc_type_ir::fold::TypeFoldable;
use std::ops::ControlFlow;
@@ -96,8 +99,8 @@ fn check_union(tcx: TyCtxt<'_>, def_id: LocalDefId) {
/// Check that the fields of the `union` do not need dropping.
fn check_union_fields(tcx: TyCtxt<'_>, span: Span, item_def_id: LocalDefId) -> bool {
- let item_type = tcx.type_of(item_def_id).subst_identity();
- if let ty::Adt(def, substs) = item_type.kind() {
+ let item_type = tcx.type_of(item_def_id).instantiate_identity();
+ if let ty::Adt(def, args) = item_type.kind() {
assert!(def.is_union());
fn allowed_union_field<'tcx>(
@@ -128,7 +131,7 @@ fn check_union_fields(tcx: TyCtxt<'_>, span: Span, item_def_id: LocalDefId) -> b
let param_env = tcx.param_env(item_def_id);
for field in &def.non_enum_variant().fields {
- let field_ty = tcx.normalize_erasing_regions(param_env, field.ty(tcx, substs));
+ let field_ty = tcx.normalize_erasing_regions(param_env, field.ty(tcx, args));
if !allowed_union_field(field_ty, tcx, param_env) {
let (field_span, ty_span) = match tcx.hir().get_if_local(field.did) {
@@ -163,7 +166,7 @@ fn check_static_inhabited(tcx: TyCtxt<'_>, def_id: LocalDefId) {
// would be enough to check this for `extern` statics, as statics with an initializer will
// have UB during initialization if they are uninhabited, but there also seems to be no good
// reason to allow any statics to be uninhabited.
- let ty = tcx.type_of(def_id).subst_identity();
+ let ty = tcx.type_of(def_id).instantiate_identity();
let span = tcx.def_span(def_id);
let layout = match tcx.layout_of(ParamEnv::reveal_all().and(ty)) {
Ok(l) => l,
@@ -212,16 +215,16 @@ fn check_opaque(tcx: TyCtxt<'_>, id: hir::ItemId) {
return;
}
- let substs = InternalSubsts::identity_for_item(tcx, item.owner_id);
+ let args = GenericArgs::identity_for_item(tcx, item.owner_id);
let span = tcx.def_span(item.owner_id.def_id);
if !tcx.features().impl_trait_projections {
check_opaque_for_inheriting_lifetimes(tcx, item.owner_id.def_id, span);
}
- if tcx.type_of(item.owner_id.def_id).subst_identity().references_error() {
+ if tcx.type_of(item.owner_id.def_id).instantiate_identity().references_error() {
return;
}
- if check_opaque_for_cycles(tcx, item.owner_id.def_id, substs, span, &origin).is_err() {
+ if check_opaque_for_cycles(tcx, item.owner_id.def_id, args, span, &origin).is_err() {
return;
}
@@ -302,16 +305,11 @@ pub(super) fn check_opaque_for_inheriting_lifetimes(
if let ItemKind::OpaqueTy(&hir::OpaqueTy {
origin: hir::OpaqueTyOrigin::AsyncFn(..) | hir::OpaqueTyOrigin::FnReturn(..),
- in_trait,
..
}) = item.kind
{
- let substs = InternalSubsts::identity_for_item(tcx, def_id);
- let opaque_identity_ty = if in_trait && !tcx.lower_impl_trait_in_trait_to_assoc_ty() {
- Ty::new_projection(tcx, def_id.to_def_id(), substs)
- } else {
- Ty::new_opaque(tcx, def_id.to_def_id(), substs)
- };
+ let args = GenericArgs::identity_for_item(tcx, def_id);
+ let opaque_identity_ty = Ty::new_opaque(tcx, def_id.to_def_id(), args);
let mut visitor = ProhibitOpaqueVisitor {
opaque_identity_ty,
parent_count: tcx.generics_of(def_id).parent_count as u32,
@@ -321,7 +319,7 @@ pub(super) fn check_opaque_for_inheriting_lifetimes(
};
let prohibit_opaque = tcx
.explicit_item_bounds(def_id)
- .subst_identity_iter_copied()
+ .instantiate_identity_iter_copied()
.try_for_each(|(predicate, _)| predicate.visit_with(&mut visitor));
if let Some(ty) = prohibit_opaque.break_value() {
@@ -347,7 +345,7 @@ pub(super) fn check_opaque_for_inheriting_lifetimes(
err.span_suggestion(
span,
"consider spelling out the type instead",
- name.unwrap_or_else(|| format!("{:?}", ty)),
+ name.unwrap_or_else(|| format!("{ty:?}")),
Applicability::MaybeIncorrect,
);
}
@@ -360,11 +358,11 @@ pub(super) fn check_opaque_for_inheriting_lifetimes(
pub(super) fn check_opaque_for_cycles<'tcx>(
tcx: TyCtxt<'tcx>,
def_id: LocalDefId,
- substs: SubstsRef<'tcx>,
+ args: GenericArgsRef<'tcx>,
span: Span,
origin: &hir::OpaqueTyOrigin,
) -> Result<(), ErrorGuaranteed> {
- if tcx.try_expand_impl_trait_type(def_id.to_def_id(), substs).is_err() {
+ if tcx.try_expand_impl_trait_type(def_id.to_def_id(), args).is_err() {
let reported = match origin {
hir::OpaqueTyOrigin::AsyncFn(..) => async_opaque_type_cycle_error(tcx, span),
_ => opaque_type_cycle_error(tcx, def_id, span),
@@ -409,16 +407,26 @@ fn check_opaque_meets_bounds<'tcx>(
.build();
let ocx = ObligationCtxt::new(&infcx);
- let substs = InternalSubsts::identity_for_item(tcx, def_id.to_def_id());
- let opaque_ty = Ty::new_opaque(tcx, def_id.to_def_id(), substs);
+ let args = match *origin {
+ hir::OpaqueTyOrigin::FnReturn(parent) | hir::OpaqueTyOrigin::AsyncFn(parent) => {
+ GenericArgs::identity_for_item(tcx, parent).extend_to(
+ tcx,
+ def_id.to_def_id(),
+ |param, _| tcx.map_rpit_lifetime_to_fn_lifetime(param.def_id.expect_local()).into(),
+ )
+ }
+ hir::OpaqueTyOrigin::TyAlias { .. } => GenericArgs::identity_for_item(tcx, def_id),
+ };
+
+ let opaque_ty = Ty::new_opaque(tcx, def_id.to_def_id(), args);
- // `ReErased` regions appear in the "parent_substs" of closures/generators.
+ // `ReErased` regions appear in the "parent_args" of closures/generators.
// We're ignoring them here and replacing them with fresh region variables.
- // See tests in ui/type-alias-impl-trait/closure_{parent_substs,wf_outlives}.rs.
+ // See tests in ui/type-alias-impl-trait/closure_{parent_args,wf_outlives}.rs.
//
// FIXME: Consider wrapping the hidden type in an existential `Binder` and instantiating it
// here rather than using ReErased.
- let hidden_ty = tcx.type_of(def_id.to_def_id()).subst(tcx, substs);
+ let hidden_ty = tcx.type_of(def_id.to_def_id()).instantiate(tcx, args);
let hidden_ty = tcx.fold_regions(hidden_ty, |re, _dbi| match re.kind() {
ty::ReErased => infcx.next_region_var(RegionVariableOrigin::MiscVariable(span)),
_ => re,
@@ -442,7 +450,7 @@ fn check_opaque_meets_bounds<'tcx>(
// hidden type is well formed even without those bounds.
let predicate =
ty::Binder::dummy(ty::PredicateKind::Clause(ty::ClauseKind::WellFormed(hidden_ty.into())));
- ocx.register_obligation(Obligation::new(tcx, misc_cause, param_env, predicate));
+ ocx.register_obligation(Obligation::new(tcx, misc_cause.clone(), param_env, predicate));
// Check that all obligations are satisfied by the implementation's
// version.
@@ -453,7 +461,15 @@ fn check_opaque_meets_bounds<'tcx>(
}
match origin {
// Checked when type checking the function containing them.
- hir::OpaqueTyOrigin::FnReturn(..) | hir::OpaqueTyOrigin::AsyncFn(..) => {}
+ hir::OpaqueTyOrigin::FnReturn(..) | hir::OpaqueTyOrigin::AsyncFn(..) => {
+ // HACK: this should also fall through to the hidden type check below, but the original
+ // implementation had a bug where equivalent lifetimes are not identical. This caused us
+ // to reject existing stable code that is otherwise completely fine. The real fix is to
+ // compare the hidden types via our type equivalence/relation infra instead of doing an
+ // identity check.
+ let _ = infcx.take_opaque_types();
+ return Ok(());
+ }
// Nested opaque types occur only in associated types:
// ` type Opaque<T> = impl Trait<&'static T, AssocTy = impl Nested>; `
// They can only be referenced as `<Opaque<T> as Trait<&'static T>>::AssocTy`.
@@ -469,15 +485,63 @@ fn check_opaque_meets_bounds<'tcx>(
ocx.resolve_regions_and_report_errors(defining_use_anchor, &outlives_env)?;
}
}
- // Clean up after ourselves
- let _ = infcx.take_opaque_types();
+ // Check that any hidden types found during wf checking match the hidden types that `type_of` sees.
+ for (mut key, mut ty) in infcx.take_opaque_types() {
+ ty.hidden_type.ty = infcx.resolve_vars_if_possible(ty.hidden_type.ty);
+ key = infcx.resolve_vars_if_possible(key);
+ sanity_check_found_hidden_type(tcx, key, ty.hidden_type)?;
+ }
Ok(())
}
+fn sanity_check_found_hidden_type<'tcx>(
+ tcx: TyCtxt<'tcx>,
+ key: ty::OpaqueTypeKey<'tcx>,
+ mut ty: ty::OpaqueHiddenType<'tcx>,
+) -> Result<(), ErrorGuaranteed> {
+ if ty.ty.is_ty_var() {
+ // Nothing was actually constrained.
+ return Ok(());
+ }
+ if let ty::Alias(ty::Opaque, alias) = ty.ty.kind() {
+ if alias.def_id == key.def_id.to_def_id() && alias.args == key.args {
+ // Nothing was actually constrained, this is an opaque usage that was
+ // only discovered to be opaque after inference vars resolved.
+ return Ok(());
+ }
+ }
+ let strip_vars = |ty: Ty<'tcx>| {
+ ty.fold_with(&mut BottomUpFolder {
+ tcx,
+ ty_op: |t| t,
+ ct_op: |c| c,
+ lt_op: |l| match l.kind() {
+ RegionKind::ReVar(_) => tcx.lifetimes.re_erased,
+ _ => l,
+ },
+ })
+ };
+ // Closures frequently end up containing erased lifetimes in their final representation.
+ // These correspond to lifetime variables that never got resolved, so we patch this up here.
+ ty.ty = strip_vars(ty.ty);
+ // Get the hidden type.
+ let hidden_ty = tcx.type_of(key.def_id).instantiate(tcx, key.args);
+ let hidden_ty = strip_vars(hidden_ty);
+
+ // If the hidden types differ, emit a type mismatch diagnostic.
+ if hidden_ty == ty.ty {
+ Ok(())
+ } else {
+ let span = tcx.def_span(key.def_id);
+ let other = ty::OpaqueHiddenType { ty: hidden_ty, span };
+ Err(ty.report_mismatch(&other, key.def_id, tcx).emit())
+ }
+}
+
fn is_enum_of_nonnullable_ptr<'tcx>(
tcx: TyCtxt<'tcx>,
adt_def: AdtDef<'tcx>,
- substs: SubstsRef<'tcx>,
+ args: GenericArgsRef<'tcx>,
) -> bool {
if adt_def.repr().inhibit_enum_layout_opt() {
return false;
@@ -489,14 +553,14 @@ fn is_enum_of_nonnullable_ptr<'tcx>(
let (([], [field]) | ([field], [])) = (&var_one.fields.raw[..], &var_two.fields.raw[..]) else {
return false;
};
- matches!(field.ty(tcx, substs).kind(), ty::FnPtr(..) | ty::Ref(..))
+ matches!(field.ty(tcx, args).kind(), ty::FnPtr(..) | ty::Ref(..))
}
fn check_static_linkage(tcx: TyCtxt<'_>, def_id: LocalDefId) {
if tcx.codegen_fn_attrs(def_id).import_linkage.is_some() {
- if match tcx.type_of(def_id).subst_identity().kind() {
+ if match tcx.type_of(def_id).instantiate_identity().kind() {
ty::RawPtr(_) => false,
- ty::Adt(adt_def, substs) => !is_enum_of_nonnullable_ptr(tcx, *adt_def, *substs),
+ ty::Adt(adt_def, args) => !is_enum_of_nonnullable_ptr(tcx, *adt_def, *args),
_ => true,
} {
tcx.sess.emit_err(LinkageType { span: tcx.def_span(def_id) });
@@ -530,7 +594,7 @@ fn check_item_type(tcx: TyCtxt<'_>, id: hir::ItemId) {
check_impl_items_against_trait(
tcx,
id.owner_id.def_id,
- impl_trait_ref.subst_identity(),
+ impl_trait_ref.instantiate_identity(),
);
check_on_unimplemented(tcx, id);
}
@@ -546,13 +610,13 @@ fn check_item_type(tcx: TyCtxt<'_>, id: hir::ItemId) {
fn_maybe_err(tcx, assoc_item.ident(tcx).span, abi);
}
ty::AssocKind::Type if assoc_item.defaultness(tcx).has_value() => {
- let trait_substs =
- InternalSubsts::identity_for_item(tcx, id.owner_id);
+ let trait_args =
+ GenericArgs::identity_for_item(tcx, id.owner_id);
let _: Result<_, rustc_errors::ErrorGuaranteed> = check_type_bounds(
tcx,
assoc_item,
assoc_item,
- ty::TraitRef::new(tcx, id.owner_id.to_def_id(), trait_substs),
+ ty::TraitRef::new(tcx, id.owner_id.to_def_id(), trait_args),
);
}
_ => {}
@@ -576,19 +640,8 @@ fn check_item_type(tcx: TyCtxt<'_>, id: hir::ItemId) {
check_opaque(tcx, id);
}
}
- DefKind::ImplTraitPlaceholder => {
- let parent = tcx.impl_trait_in_trait_parent_fn(id.owner_id.to_def_id());
- // Only check the validity of this opaque type if the function has a default body
- if let hir::Node::TraitItem(hir::TraitItem {
- kind: hir::TraitItemKind::Fn(_, hir::TraitFn::Provided(_)),
- ..
- }) = tcx.hir().get_by_def_id(parent.expect_local())
- {
- check_opaque(tcx, id);
- }
- }
- DefKind::TyAlias => {
- let pty_ty = tcx.type_of(id.owner_id).subst_identity();
+ DefKind::TyAlias { .. } => {
+ let pty_ty = tcx.type_of(id.owner_id).instantiate_identity();
let generics = tcx.generics_of(id.owner_id);
check_type_params_are_used(tcx, &generics, pty_ty);
}
@@ -642,7 +695,7 @@ fn check_item_type(tcx: TyCtxt<'_>, id: hir::ItemId) {
"replace the {} parameters with concrete {}{}",
kinds,
kinds_pl,
- egs.map(|egs| format!(" like `{}`", egs)).unwrap_or_default(),
+ egs.map(|egs| format!(" like `{egs}`")).unwrap_or_default(),
),
)
.emit();
@@ -727,7 +780,7 @@ pub(super) fn check_specialization_validity<'tcx>(
} else {
tcx.sess.delay_span_bug(
DUMMY_SP,
- format!("parent item: {:?} not marked as default", parent_impl),
+ format!("parent item: {parent_impl:?} not marked as default"),
);
}
}
@@ -902,8 +955,8 @@ fn check_impl_items_against_trait<'tcx>(
}
pub fn check_simd(tcx: TyCtxt<'_>, sp: Span, def_id: LocalDefId) {
- let t = tcx.type_of(def_id).subst_identity();
- if let ty::Adt(def, substs) = t.kind()
+ let t = tcx.type_of(def_id).instantiate_identity();
+ if let ty::Adt(def, args) = t.kind()
&& def.is_struct()
{
let fields = &def.non_enum_variant().fields;
@@ -911,8 +964,8 @@ pub fn check_simd(tcx: TyCtxt<'_>, sp: Span, def_id: LocalDefId) {
struct_span_err!(tcx.sess, sp, E0075, "SIMD vector cannot be empty").emit();
return;
}
- let e = fields[FieldIdx::from_u32(0)].ty(tcx, substs);
- if !fields.iter().all(|f| f.ty(tcx, substs) == e) {
+ let e = fields[FieldIdx::from_u32(0)].ty(tcx, args);
+ if !fields.iter().all(|f| f.ty(tcx, args) == e) {
struct_span_err!(tcx.sess, sp, E0076, "SIMD vector should be homogeneous")
.span_label(sp, "SIMD elements must have the same type")
.emit();
@@ -1019,7 +1072,7 @@ pub(super) fn check_packed(tcx: TyCtxt<'_>, sp: Span, def: ty::AdtDef<'_>) {
if first {
format!(
"`{}` contains a field of type `{}`",
- tcx.type_of(def.did()).subst_identity(),
+ tcx.type_of(def.did()).instantiate_identity(),
ident
)
} else {
@@ -1041,7 +1094,7 @@ pub(super) fn check_packed_inner(
def_id: DefId,
stack: &mut Vec<DefId>,
) -> Option<Vec<(DefId, Span)>> {
- if let ty::Adt(def, substs) = tcx.type_of(def_id).subst_identity().kind() {
+ if let ty::Adt(def, args) = tcx.type_of(def_id).instantiate_identity().kind() {
if def.is_struct() || def.is_union() {
if def.repr().align.is_some() {
return Some(vec![(def.did(), DUMMY_SP)]);
@@ -1049,7 +1102,7 @@ pub(super) fn check_packed_inner(
stack.push(def_id);
for field in &def.non_enum_variant().fields {
- if let ty::Adt(def, _) = field.ty(tcx, substs).kind()
+ if let ty::Adt(def, _) = field.ty(tcx, args).kind()
&& !stack.contains(&def.did())
&& let Some(mut defs) = check_packed_inner(tcx, def.did(), stack)
{
@@ -1088,21 +1141,21 @@ pub(super) fn check_transparent<'tcx>(tcx: TyCtxt<'tcx>, adt: ty::AdtDef<'tcx>)
// For each field, figure out if it's known to be a ZST and align(1), with "known"
// respecting #[non_exhaustive] attributes.
let field_infos = adt.all_fields().map(|field| {
- let ty = field.ty(tcx, InternalSubsts::identity_for_item(tcx, field.did));
+ let ty = field.ty(tcx, GenericArgs::identity_for_item(tcx, field.did));
let param_env = tcx.param_env(field.did);
let layout = tcx.layout_of(param_env.and(ty));
// We are currently checking the type this field came from, so it must be local
let span = tcx.hir().span_if_local(field.did).unwrap();
let zst = layout.is_ok_and(|layout| layout.is_zst());
- let align1 = layout.is_ok_and(|layout| layout.align.abi.bytes() == 1);
+ let align = layout.ok().map(|layout| layout.align.abi.bytes());
if !zst {
- return (span, zst, align1, None);
+ return (span, zst, align, None);
}
fn check_non_exhaustive<'tcx>(
tcx: TyCtxt<'tcx>,
t: Ty<'tcx>,
- ) -> ControlFlow<(&'static str, DefId, SubstsRef<'tcx>, bool)> {
+ ) -> ControlFlow<(&'static str, DefId, GenericArgsRef<'tcx>, bool)> {
match t.kind() {
ty::Tuple(list) => list.iter().try_for_each(|t| check_non_exhaustive(tcx, t)),
ty::Array(ty, _) => check_non_exhaustive(tcx, *ty),
@@ -1131,12 +1184,12 @@ pub(super) fn check_transparent<'tcx>(tcx: TyCtxt<'tcx>, adt: ty::AdtDef<'tcx>)
}
}
- (span, zst, align1, check_non_exhaustive(tcx, ty).break_value())
+ (span, zst, align, check_non_exhaustive(tcx, ty).break_value())
});
let non_zst_fields = field_infos
.clone()
- .filter_map(|(span, zst, _align1, _non_exhaustive)| if !zst { Some(span) } else { None });
+ .filter_map(|(span, zst, _align, _non_exhaustive)| if !zst { Some(span) } else { None });
let non_zst_count = non_zst_fields.clone().count();
if non_zst_count >= 2 {
bad_non_zero_sized_fields(tcx, adt, non_zst_count, non_zst_fields, tcx.def_span(adt.did()));
@@ -1144,19 +1197,28 @@ pub(super) fn check_transparent<'tcx>(tcx: TyCtxt<'tcx>, adt: ty::AdtDef<'tcx>)
let incompatible_zst_fields =
field_infos.clone().filter(|(_, _, _, opt)| opt.is_some()).count();
let incompat = incompatible_zst_fields + non_zst_count >= 2 && non_zst_count < 2;
- for (span, zst, align1, non_exhaustive) in field_infos {
- if zst && !align1 {
- struct_span_err!(
+ for (span, zst, align, non_exhaustive) in field_infos {
+ if zst && align != Some(1) {
+ let mut err = struct_span_err!(
tcx.sess,
span,
E0691,
"zero-sized field in transparent {} has alignment larger than 1",
adt.descr(),
- )
- .span_label(span, "has alignment larger than 1")
- .emit();
+ );
+
+ if let Some(align_bytes) = align {
+ err.span_label(
+ span,
+ format!("has alignment of {align_bytes}, which is larger than 1"),
+ );
+ } else {
+ err.span_label(span, "may have alignment larger than 1");
+ }
+
+ err.emit();
}
- if incompat && let Some((descr, def_id, substs, non_exhaustive)) = non_exhaustive {
+ if incompat && let Some((descr, def_id, args, non_exhaustive)) = non_exhaustive {
tcx.struct_span_lint_hir(
REPR_TRANSPARENT_EXTERNAL_PRIVATE_FIELDS,
tcx.hir().local_def_id_to_hir_id(adt.did().expect_local()),
@@ -1168,7 +1230,7 @@ pub(super) fn check_transparent<'tcx>(tcx: TyCtxt<'tcx>, adt: ty::AdtDef<'tcx>)
} else {
"contains private fields"
};
- let field_ty = tcx.def_path_str_with_substs(def_id, substs);
+ let field_ty = tcx.def_path_str_with_args(def_id, args);
lint
.note(format!("this {descr} contains `{field_ty}`, which {note}, \
and makes it not a breaking change to become non-zero-sized in the future."))
@@ -1389,11 +1451,14 @@ pub(super) fn check_type_params_are_used<'tcx>(
}
}
-pub(super) fn check_mod_item_types(tcx: TyCtxt<'_>, module_def_id: LocalDefId) {
+pub(super) fn check_mod_item_types(tcx: TyCtxt<'_>, module_def_id: LocalModDefId) {
let module = tcx.hir_module_items(module_def_id);
for id in module.items() {
check_item_type(tcx, id);
}
+ if module_def_id == LocalModDefId::CRATE_DEF_ID {
+ super::entry::check_for_entry_fn(tcx);
+ }
}
fn async_opaque_type_cycle_error(tcx: TyCtxt<'_>, span: Span) -> ErrorGuaranteed {
diff --git a/compiler/rustc_hir_analysis/src/check/compare_impl_item.rs b/compiler/rustc_hir_analysis/src/check/compare_impl_item.rs
index 22e576e34..bd0ab6463 100644
--- a/compiler/rustc_hir_analysis/src/check/compare_impl_item.rs
+++ b/compiler/rustc_hir_analysis/src/check/compare_impl_item.rs
@@ -1,7 +1,7 @@
use super::potentially_plural_count;
use crate::errors::LifetimesOrBoundsMismatchOnTrait;
use hir::def_id::{DefId, LocalDefId};
-use rustc_data_structures::fx::{FxHashMap, FxIndexSet};
+use rustc_data_structures::fx::{FxHashMap, FxHashSet, FxIndexSet};
use rustc_errors::{
pluralize, struct_span_err, Applicability, DiagnosticId, ErrorGuaranteed, MultiSpan,
};
@@ -16,7 +16,7 @@ use rustc_infer::traits::util;
use rustc_middle::ty::error::{ExpectedFound, TypeError};
use rustc_middle::ty::util::ExplicitSelf;
use rustc_middle::ty::{
- self, InternalSubsts, Ty, TypeFoldable, TypeFolder, TypeSuperFoldable, TypeVisitableExt,
+ self, GenericArgs, Ty, TypeFoldable, TypeFolder, TypeSuperFoldable, TypeVisitableExt,
};
use rustc_middle::ty::{GenericParamDefKind, ToPredicate, TyCtxt};
use rustc_span::{Span, DUMMY_SP};
@@ -76,7 +76,7 @@ fn check_method_is_structurally_compatible<'tcx>(
Ok(())
}
-/// This function is best explained by example. Consider a trait with it's implementation:
+/// This function is best explained by example. Consider a trait with its implementation:
///
/// ```rust
/// trait Trait<'t, T> {
@@ -96,15 +96,15 @@ fn check_method_is_structurally_compatible<'tcx>(
/// For this we have to show that, assuming the bounds of the impl hold, the
/// bounds of `trait_m` imply the bounds of `impl_m`.
///
-/// We start out with `trait_to_impl_substs`, that maps the trait
+/// We start out with `trait_to_impl_args`, that maps the trait
/// type parameters to impl type parameters. This is taken from the
/// impl trait reference:
///
/// ```rust,ignore (pseudo-Rust)
-/// trait_to_impl_substs = {'t => 'j, T => &'i U, Self => Foo}
+/// trait_to_impl_args = {'t => 'j, T => &'i U, Self => Foo}
/// ```
///
-/// We create a mapping `dummy_substs` that maps from the impl type
+/// We create a mapping `dummy_args` that maps from the impl type
/// parameters to fresh types and regions. For type parameters,
/// this is the identity transform, but we could as well use any
/// placeholder types. For regions, we convert from bound to free
@@ -112,32 +112,32 @@ fn check_method_is_structurally_compatible<'tcx>(
/// declared on the impl or used in type parameter bounds).
///
/// ```rust,ignore (pseudo-Rust)
-/// impl_to_placeholder_substs = {'i => 'i0, U => U0, N => N0 }
+/// impl_to_placeholder_args = {'i => 'i0, U => U0, N => N0 }
/// ```
///
-/// Now we can apply `placeholder_substs` to the type of the impl method
+/// Now we can apply `placeholder_args` to the type of the impl method
/// to yield a new function type in terms of our fresh, placeholder
/// types:
///
/// ```rust,ignore (pseudo-Rust)
-/// <'b> fn(t: &'i0 U0, m: &'b) -> Foo
+/// <'b> fn(t: &'i0 U0, m: &'b N0) -> Foo
/// ```
///
/// We now want to extract and substitute the type of the *trait*
/// method and compare it. To do so, we must create a compound
-/// substitution by combining `trait_to_impl_substs` and
-/// `impl_to_placeholder_substs`, and also adding a mapping for the method
+/// substitution by combining `trait_to_impl_args` and
+/// `impl_to_placeholder_args`, and also adding a mapping for the method
/// type parameters. We extend the mapping to also include
/// the method parameters.
///
/// ```rust,ignore (pseudo-Rust)
-/// trait_to_placeholder_substs = { T => &'i0 U0, Self => Foo, M => N0 }
+/// trait_to_placeholder_args = { T => &'i0 U0, Self => Foo, M => N0 }
/// ```
///
/// Applying this to the trait method type yields:
///
/// ```rust,ignore (pseudo-Rust)
-/// <'a> fn(t: &'i0 U0, m: &'a) -> Foo
+/// <'a> fn(t: &'i0 U0, m: &'a N0) -> Foo
/// ```
///
/// This type is also the same but the name of the bound region (`'a`
@@ -148,8 +148,8 @@ fn check_method_is_structurally_compatible<'tcx>(
/// satisfied by the implementation's method.
///
/// We do this by creating a parameter environment which contains a
-/// substitution corresponding to `impl_to_placeholder_substs`. We then build
-/// `trait_to_placeholder_substs` and use it to convert the predicates contained
+/// substitution corresponding to `impl_to_placeholder_args`. We then build
+/// `trait_to_placeholder_args` and use it to convert the predicates contained
/// in the `trait_m` generics to the placeholder form.
///
/// Finally we register each of these predicates as an obligation and check that
@@ -162,7 +162,7 @@ fn compare_method_predicate_entailment<'tcx>(
impl_trait_ref: ty::TraitRef<'tcx>,
check_implied_wf: CheckImpliedWfMode,
) -> Result<(), ErrorGuaranteed> {
- let trait_to_impl_substs = impl_trait_ref.substs;
+ let trait_to_impl_args = impl_trait_ref.args;
// This node-id should be used for the `body_id` field on each
// `ObligationCause` (and the `FnCtxt`).
@@ -182,12 +182,12 @@ fn compare_method_predicate_entailment<'tcx>(
);
// Create mapping from impl to placeholder.
- let impl_to_placeholder_substs = InternalSubsts::identity_for_item(tcx, impl_m.def_id);
+ let impl_to_placeholder_args = GenericArgs::identity_for_item(tcx, impl_m.def_id);
// Create mapping from trait to placeholder.
- let trait_to_placeholder_substs =
- impl_to_placeholder_substs.rebase_onto(tcx, impl_m.container_id(tcx), trait_to_impl_substs);
- debug!("compare_impl_method: trait_to_placeholder_substs={:?}", trait_to_placeholder_substs);
+ let trait_to_placeholder_args =
+ impl_to_placeholder_args.rebase_onto(tcx, impl_m.container_id(tcx), trait_to_impl_args);
+ debug!("compare_impl_method: trait_to_placeholder_args={:?}", trait_to_placeholder_args);
let impl_m_predicates = tcx.predicates_of(impl_m.def_id);
let trait_m_predicates = tcx.predicates_of(trait_m.def_id);
@@ -211,7 +211,7 @@ fn compare_method_predicate_entailment<'tcx>(
// if all constraints hold.
hybrid_preds.predicates.extend(
trait_m_predicates
- .instantiate_own(tcx, trait_to_placeholder_substs)
+ .instantiate_own(tcx, trait_to_placeholder_args)
.map(|(predicate, _)| predicate),
);
@@ -219,11 +219,7 @@ fn compare_method_predicate_entailment<'tcx>(
// The key step here is to update the caller_bounds's predicates to be
// the new hybrid bounds we computed.
let normalize_cause = traits::ObligationCause::misc(impl_m_span, impl_m_def_id);
- let param_env = ty::ParamEnv::new(
- tcx.mk_clauses(&hybrid_preds.predicates),
- Reveal::UserFacing,
- hir::Constness::NotConst,
- );
+ let param_env = ty::ParamEnv::new(tcx.mk_clauses(&hybrid_preds.predicates), Reveal::UserFacing);
let param_env = traits::normalize_param_env_or_error(tcx, param_env, normalize_cause);
let infcx = &tcx.infer_ctxt().build();
@@ -231,7 +227,7 @@ fn compare_method_predicate_entailment<'tcx>(
debug!("compare_impl_method: caller_bounds={:?}", param_env.caller_bounds());
- let impl_m_own_bounds = impl_m_predicates.instantiate_own(tcx, impl_to_placeholder_substs);
+ let impl_m_own_bounds = impl_m_predicates.instantiate_own(tcx, impl_to_placeholder_args);
for (predicate, span) in impl_m_own_bounds {
let normalize_cause = traits::ObligationCause::misc(span, impl_m_def_id);
let predicate = ocx.normalize(&normalize_cause, param_env, predicate);
@@ -262,22 +258,19 @@ fn compare_method_predicate_entailment<'tcx>(
// type.
// Compute placeholder form of impl and trait method tys.
- let tcx = infcx.tcx;
-
let mut wf_tys = FxIndexSet::default();
let unnormalized_impl_sig = infcx.instantiate_binder_with_fresh_vars(
impl_m_span,
infer::HigherRankedType,
- tcx.fn_sig(impl_m.def_id).subst_identity(),
+ tcx.fn_sig(impl_m.def_id).instantiate_identity(),
);
- let unnormalized_impl_fty = Ty::new_fn_ptr(tcx, ty::Binder::dummy(unnormalized_impl_sig));
let norm_cause = ObligationCause::misc(impl_m_span, impl_m_def_id);
let impl_sig = ocx.normalize(&norm_cause, param_env, unnormalized_impl_sig);
debug!("compare_impl_method: impl_fty={:?}", impl_sig);
- let trait_sig = tcx.fn_sig(trait_m.def_id).subst(tcx, trait_to_placeholder_substs);
+ let trait_sig = tcx.fn_sig(trait_m.def_id).instantiate(tcx, trait_to_placeholder_args);
let trait_sig = tcx.liberate_late_bound_regions(impl_m.def_id, trait_sig);
// Next, add all inputs and output as well-formed tys. Importantly,
@@ -315,16 +308,60 @@ fn compare_method_predicate_entailment<'tcx>(
}
if check_implied_wf == CheckImpliedWfMode::Check && !(impl_sig, trait_sig).references_error() {
- // We need to check that the impl's args are well-formed given
- // the hybrid param-env (impl + trait method where-clauses).
- ocx.register_obligation(traits::Obligation::new(
- infcx.tcx,
- ObligationCause::dummy(),
- param_env,
- ty::Binder::dummy(ty::PredicateKind::Clause(ty::ClauseKind::WellFormed(
- unnormalized_impl_fty.into(),
- ))),
- ));
+ // Select obligations to make progress on inference before processing
+ // the wf obligation below.
+ // FIXME(-Ztrait-solver=next): Not needed when the hack below is removed.
+ let errors = ocx.select_where_possible();
+ if !errors.is_empty() {
+ let reported = infcx.err_ctxt().report_fulfillment_errors(&errors);
+ return Err(reported);
+ }
+
+ // See #108544. Annoying, we can end up in cases where, because of winnowing,
+ // we pick param env candidates over a more general impl, leading to more
+ // stricter lifetime requirements than we would otherwise need. This can
+ // trigger the lint. Instead, let's only consider type outlives and
+ // region outlives obligations.
+ //
+ // FIXME(-Ztrait-solver=next): Try removing this hack again once
+ // the new solver is stable.
+ let mut wf_args: smallvec::SmallVec<[_; 4]> =
+ unnormalized_impl_sig.inputs_and_output.iter().map(|ty| ty.into()).collect();
+ // Annoyingly, asking for the WF predicates of an array (with an unevaluated const (only?))
+ // will give back the well-formed predicate of the same array.
+ let mut wf_args_seen: FxHashSet<_> = wf_args.iter().copied().collect();
+ while let Some(arg) = wf_args.pop() {
+ let Some(obligations) = rustc_trait_selection::traits::wf::obligations(
+ infcx,
+ param_env,
+ impl_m_def_id,
+ 0,
+ arg,
+ impl_m_span,
+ ) else {
+ continue;
+ };
+ for obligation in obligations {
+ debug!(?obligation);
+ match obligation.predicate.kind().skip_binder() {
+ // We need to register Projection oblgiations too, because we may end up with
+ // an implied `X::Item: 'a`, which gets desugared into `X::Item = ?0`, `?0: 'a`.
+ // If we only register the region outlives obligation, this leads to an unconstrained var.
+ // See `implied_bounds_entailment_alias_var` test.
+ ty::PredicateKind::Clause(
+ ty::ClauseKind::RegionOutlives(..)
+ | ty::ClauseKind::TypeOutlives(..)
+ | ty::ClauseKind::Projection(..),
+ ) => ocx.register_obligation(obligation),
+ ty::PredicateKind::Clause(ty::ClauseKind::WellFormed(arg)) => {
+ if wf_args_seen.insert(arg) {
+ wf_args.push(arg)
+ }
+ }
+ _ => {}
+ }
+ }
+ }
}
// Check that all obligations are satisfied by the implementation's
@@ -357,7 +394,7 @@ fn compare_method_predicate_entailment<'tcx>(
// lifetime parameters.
let outlives_env = OutlivesEnvironment::with_bounds(
param_env,
- infcx.implied_bounds_tys(param_env, impl_m_def_id, wf_tys.clone()),
+ infcx.implied_bounds_tys(param_env, impl_m_def_id, wf_tys),
);
let errors = infcx.resolve_regions(&outlives_env);
if !errors.is_empty() {
@@ -615,14 +652,14 @@ pub(super) fn collect_return_position_impl_trait_in_trait_tys<'tcx>(
let impl_m = tcx.opt_associated_item(impl_m_def_id.to_def_id()).unwrap();
let trait_m = tcx.opt_associated_item(impl_m.trait_item_def_id.unwrap()).unwrap();
let impl_trait_ref =
- tcx.impl_trait_ref(impl_m.impl_container(tcx).unwrap()).unwrap().subst_identity();
+ tcx.impl_trait_ref(impl_m.impl_container(tcx).unwrap()).unwrap().instantiate_identity();
let param_env = tcx.param_env(impl_m_def_id);
// First, check a few of the same things as `compare_impl_method`,
// just so we don't ICE during substitution later.
check_method_is_structurally_compatible(tcx, impl_m, trait_m, impl_trait_ref, true)?;
- let trait_to_impl_substs = impl_trait_ref.substs;
+ let trait_to_impl_args = impl_trait_ref.args;
let impl_m_hir_id = tcx.hir().local_def_id_to_hir_id(impl_m_def_id);
let return_span = tcx.hir().fn_decl_by_hir_id(impl_m_hir_id).unwrap().output.span();
@@ -637,11 +674,11 @@ pub(super) fn collect_return_position_impl_trait_in_trait_tys<'tcx>(
);
// Create mapping from impl to placeholder.
- let impl_to_placeholder_substs = InternalSubsts::identity_for_item(tcx, impl_m.def_id);
+ let impl_to_placeholder_args = GenericArgs::identity_for_item(tcx, impl_m.def_id);
// Create mapping from trait to placeholder.
- let trait_to_placeholder_substs =
- impl_to_placeholder_substs.rebase_onto(tcx, impl_m.container_id(tcx), trait_to_impl_substs);
+ let trait_to_placeholder_args =
+ impl_to_placeholder_args.rebase_onto(tcx, impl_m.container_id(tcx), trait_to_impl_args);
let infcx = &tcx.infer_ctxt().build();
let ocx = ObligationCtxt::new(infcx);
@@ -651,7 +688,10 @@ pub(super) fn collect_return_position_impl_trait_in_trait_tys<'tcx>(
let impl_sig = ocx.normalize(
&norm_cause,
param_env,
- tcx.liberate_late_bound_regions(impl_m.def_id, tcx.fn_sig(impl_m.def_id).subst_identity()),
+ tcx.liberate_late_bound_regions(
+ impl_m.def_id,
+ tcx.fn_sig(impl_m.def_id).instantiate_identity(),
+ ),
);
impl_sig.error_reported()?;
let impl_return_ty = impl_sig.output();
@@ -665,7 +705,7 @@ pub(super) fn collect_return_position_impl_trait_in_trait_tys<'tcx>(
.instantiate_binder_with_fresh_vars(
return_span,
infer::HigherRankedType,
- tcx.fn_sig(trait_m.def_id).subst(tcx, trait_to_placeholder_substs),
+ tcx.fn_sig(trait_m.def_id).instantiate(tcx, trait_to_placeholder_args),
)
.fold_with(&mut collector);
@@ -757,48 +797,48 @@ pub(super) fn collect_return_position_impl_trait_in_trait_tys<'tcx>(
);
ocx.resolve_regions_and_report_errors(impl_m_def_id, &outlives_env)?;
- let mut collected_tys = FxHashMap::default();
- for (def_id, (ty, substs)) in collected_types {
- match infcx.fully_resolve((ty, substs)) {
- Ok((ty, substs)) => {
+ let mut remapped_types = FxHashMap::default();
+ for (def_id, (ty, args)) in collected_types {
+ match infcx.fully_resolve((ty, args)) {
+ Ok((ty, args)) => {
// `ty` contains free regions that we created earlier while liberating the
// trait fn signature. However, projection normalization expects `ty` to
// contains `def_id`'s early-bound regions.
- let id_substs = InternalSubsts::identity_for_item(tcx, def_id);
- debug!(?id_substs, ?substs);
- let map: FxHashMap<_, _> = std::iter::zip(substs, id_substs)
+ let id_args = GenericArgs::identity_for_item(tcx, def_id);
+ debug!(?id_args, ?args);
+ let map: FxHashMap<_, _> = std::iter::zip(args, id_args)
.skip(tcx.generics_of(trait_m.def_id).count())
.filter_map(|(a, b)| Some((a.as_region()?, b.as_region()?)))
.collect();
debug!(?map);
// NOTE(compiler-errors): RPITITs, like all other RPITs, have early-bound
- // region substs that are synthesized during AST lowering. These are substs
- // that are appended to the parent substs (trait and trait method). However,
+ // region args that are synthesized during AST lowering. These are args
+ // that are appended to the parent args (trait and trait method). However,
// we're trying to infer the unsubstituted type value of the RPITIT inside
- // the *impl*, so we can later use the impl's method substs to normalize
+ // the *impl*, so we can later use the impl's method args to normalize
// an RPITIT to a concrete type (`confirm_impl_trait_in_trait_candidate`).
//
// Due to the design of RPITITs, during AST lowering, we have no idea that
// an impl method corresponds to a trait method with RPITITs in it. Therefore,
- // we don't have a list of early-bound region substs for the RPITIT in the impl.
+ // we don't have a list of early-bound region args for the RPITIT in the impl.
// Since early region parameters are index-based, we can't just rebase these
- // (trait method) early-bound region substs onto the impl, and there's no
- // guarantee that the indices from the trait substs and impl substs line up.
- // So to fix this, we subtract the number of trait substs and add the number of
- // impl substs to *renumber* these early-bound regions to their corresponding
+ // (trait method) early-bound region args onto the impl, and there's no
+ // guarantee that the indices from the trait args and impl args line up.
+ // So to fix this, we subtract the number of trait args and add the number of
+ // impl args to *renumber* these early-bound regions to their corresponding
// indices in the impl's substitutions list.
//
- // Also, we only need to account for a difference in trait and impl substs,
+ // Also, we only need to account for a difference in trait and impl args,
// since we previously enforce that the trait method and impl method have the
// same generics.
- let num_trait_substs = trait_to_impl_substs.len();
- let num_impl_substs = tcx.generics_of(impl_m.container_id(tcx)).params.len();
+ let num_trait_args = trait_to_impl_args.len();
+ let num_impl_args = tcx.generics_of(impl_m.container_id(tcx)).params.len();
let ty = match ty.try_fold_with(&mut RemapHiddenTyRegions {
tcx,
map,
- num_trait_substs,
- num_impl_substs,
+ num_trait_args,
+ num_impl_args,
def_id,
impl_def_id: impl_m.container_id(tcx),
ty,
@@ -807,24 +847,42 @@ pub(super) fn collect_return_position_impl_trait_in_trait_tys<'tcx>(
Ok(ty) => ty,
Err(guar) => Ty::new_error(tcx, guar),
};
- collected_tys.insert(def_id, ty::EarlyBinder::bind(ty));
+ remapped_types.insert(def_id, ty::EarlyBinder::bind(ty));
}
Err(err) => {
let reported = tcx.sess.delay_span_bug(
return_span,
format!("could not fully resolve: {ty} => {err:?}"),
);
- collected_tys.insert(def_id, ty::EarlyBinder::bind(Ty::new_error(tcx, reported)));
+ remapped_types.insert(def_id, ty::EarlyBinder::bind(Ty::new_error(tcx, reported)));
}
}
}
- Ok(&*tcx.arena.alloc(collected_tys))
+ // We may not collect all RPITITs that we see in the HIR for a trait signature
+ // because an RPITIT was located within a missing item. Like if we have a sig
+ // returning `-> Missing<impl Sized>`, that gets converted to `-> [type error]`,
+ // and when walking through the signature we end up never collecting the def id
+ // of the `impl Sized`. Insert that here, so we don't ICE later.
+ for assoc_item in tcx.associated_types_for_impl_traits_in_associated_fn(trait_m.def_id) {
+ if !remapped_types.contains_key(assoc_item) {
+ remapped_types.insert(
+ *assoc_item,
+ ty::EarlyBinder::bind(Ty::new_error_with_message(
+ tcx,
+ return_span,
+ "missing synthetic item for RPITIT",
+ )),
+ );
+ }
+ }
+
+ Ok(&*tcx.arena.alloc(remapped_types))
}
struct ImplTraitInTraitCollector<'a, 'tcx> {
ocx: &'a ObligationCtxt<'a, 'tcx>,
- types: FxHashMap<DefId, (Ty<'tcx>, ty::SubstsRef<'tcx>)>,
+ types: FxHashMap<DefId, (Ty<'tcx>, ty::GenericArgsRef<'tcx>)>,
span: Span,
param_env: ty::ParamEnv<'tcx>,
body_id: LocalDefId,
@@ -853,8 +911,8 @@ impl<'tcx> TypeFolder<TyCtxt<'tcx>> for ImplTraitInTraitCollector<'_, 'tcx> {
if let Some((ty, _)) = self.types.get(&proj.def_id) {
return *ty;
}
- //FIXME(RPITIT): Deny nested RPITIT in substs too
- if proj.substs.has_escaping_bound_vars() {
+ //FIXME(RPITIT): Deny nested RPITIT in args too
+ if proj.args.has_escaping_bound_vars() {
bug!("FIXME(RPITIT): error here");
}
// Replace with infer var
@@ -862,9 +920,9 @@ impl<'tcx> TypeFolder<TyCtxt<'tcx>> for ImplTraitInTraitCollector<'_, 'tcx> {
span: self.span,
kind: TypeVariableOriginKind::MiscVariable,
});
- self.types.insert(proj.def_id, (infer_ty, proj.substs));
+ self.types.insert(proj.def_id, (infer_ty, proj.args));
// Recurse into bounds
- for (pred, pred_span) in self.interner().explicit_item_bounds(proj.def_id).subst_iter_copied(self.interner(), proj.substs) {
+ for (pred, pred_span) in self.interner().explicit_item_bounds(proj.def_id).iter_instantiated_copied(self.interner(), proj.args) {
let pred = pred.fold_with(self);
let pred = self.ocx.normalize(
&ObligationCause::misc(self.span, self.body_id),
@@ -893,8 +951,8 @@ impl<'tcx> TypeFolder<TyCtxt<'tcx>> for ImplTraitInTraitCollector<'_, 'tcx> {
struct RemapHiddenTyRegions<'tcx> {
tcx: TyCtxt<'tcx>,
map: FxHashMap<ty::Region<'tcx>, ty::Region<'tcx>>,
- num_trait_substs: usize,
- num_impl_substs: usize,
+ num_trait_args: usize,
+ num_impl_args: usize,
def_id: DefId,
impl_def_id: DefId,
ty: Ty<'tcx>,
@@ -909,16 +967,16 @@ impl<'tcx> ty::FallibleTypeFolder<TyCtxt<'tcx>> for RemapHiddenTyRegions<'tcx> {
}
fn try_fold_ty(&mut self, t: Ty<'tcx>) -> Result<Ty<'tcx>, Self::Error> {
- if let ty::Alias(ty::Opaque, ty::AliasTy { substs, def_id, .. }) = *t.kind() {
- let mut mapped_substs = Vec::with_capacity(substs.len());
- for (arg, v) in std::iter::zip(substs, self.tcx.variances_of(def_id)) {
- mapped_substs.push(match (arg.unpack(), v) {
- // Skip uncaptured opaque substs
+ if let ty::Alias(ty::Opaque, ty::AliasTy { args, def_id, .. }) = *t.kind() {
+ let mut mapped_args = Vec::with_capacity(args.len());
+ for (arg, v) in std::iter::zip(args, self.tcx.variances_of(def_id)) {
+ mapped_args.push(match (arg.unpack(), v) {
+ // Skip uncaptured opaque args
(ty::GenericArgKind::Lifetime(_), ty::Bivariant) => arg,
_ => arg.try_fold_with(self)?,
});
}
- Ok(Ty::new_opaque(self.tcx, def_id, self.tcx.mk_substs(&mapped_substs)))
+ Ok(Ty::new_opaque(self.tcx, def_id, self.tcx.mk_args(&mapped_args)))
} else {
t.try_super_fold_with(self)
}
@@ -975,7 +1033,7 @@ impl<'tcx> ty::FallibleTypeFolder<TyCtxt<'tcx>> for RemapHiddenTyRegions<'tcx> {
ty::EarlyBoundRegion {
def_id: e.def_id,
name: e.name,
- index: (e.index as usize - self.num_trait_substs + self.num_impl_substs) as u32,
+ index: (e.index as usize - self.num_trait_args + self.num_impl_args) as u32,
},
))
}
@@ -1214,7 +1272,7 @@ fn compare_self_type<'tcx>(
ty::ImplContainer => impl_trait_ref.self_ty(),
ty::TraitContainer => tcx.types.self_param,
};
- let self_arg_ty = tcx.fn_sig(method.def_id).subst_identity().input(0);
+ let self_arg_ty = tcx.fn_sig(method.def_id).instantiate_identity().input(0);
let param_env = ty::ParamEnv::reveal_all();
let infcx = tcx.infer_ctxt().build();
@@ -1319,7 +1377,7 @@ fn compare_number_of_generics<'tcx>(
// has mismatched type or const generic arguments, then the method that it's
// inheriting the generics from will also have mismatched arguments, and
// we'll report an error for that instead. Delay a bug for safety, though.
- if trait_.opt_rpitit_info.is_some() {
+ if trait_.is_impl_trait_in_trait() {
return Err(tcx.sess.delay_span_bug(
rustc_span::DUMMY_SP,
"errors comparing numbers of generics of trait/impl functions were not emitted",
@@ -1669,19 +1727,19 @@ fn compare_synthetic_generics<'tcx>(
/// ```rust,ignore (pseudo-Rust)
/// trait Foo {
/// fn foo<const N: u8>();
-/// type bar<const N: u8>;
+/// type Bar<const N: u8>;
/// fn baz<const N: u32>();
-/// type blah<T>;
+/// type Blah<T>;
/// }
///
/// impl Foo for () {
/// fn foo<const N: u64>() {}
/// //~^ error
-/// type bar<const N: u64> {}
+/// type Bar<const N: u64> = ();
/// //~^ error
/// fn baz<T>() {}
/// //~^ error
-/// type blah<const N: i64> = u32;
+/// type Blah<const N: i64> = u32;
/// //~^ error
/// }
/// ```
@@ -1738,10 +1796,10 @@ fn compare_generic_param_kinds<'tcx>(
format!(
"{} const parameter of type `{}`",
prefix,
- tcx.type_of(param.def_id).subst_identity()
+ tcx.type_of(param.def_id).instantiate_identity()
)
}
- Type { .. } => format!("{} type parameter", prefix),
+ Type { .. } => format!("{prefix} type parameter"),
Lifetime { .. } => unreachable!(),
};
@@ -1769,37 +1827,83 @@ pub(super) fn compare_impl_const_raw(
let impl_const_item = tcx.associated_item(impl_const_item_def);
let trait_const_item = tcx.associated_item(trait_const_item_def);
let impl_trait_ref =
- tcx.impl_trait_ref(impl_const_item.container_id(tcx)).unwrap().subst_identity();
- debug!("compare_const_impl(impl_trait_ref={:?})", impl_trait_ref);
+ tcx.impl_trait_ref(impl_const_item.container_id(tcx)).unwrap().instantiate_identity();
- let impl_c_span = tcx.def_span(impl_const_item_def.to_def_id());
+ debug!("compare_impl_const(impl_trait_ref={:?})", impl_trait_ref);
- let infcx = tcx.infer_ctxt().build();
- let param_env = tcx.param_env(impl_const_item_def.to_def_id());
- let ocx = ObligationCtxt::new(&infcx);
+ compare_number_of_generics(tcx, impl_const_item, trait_const_item, false)?;
+ compare_generic_param_kinds(tcx, impl_const_item, trait_const_item, false)?;
+ compare_const_predicate_entailment(tcx, impl_const_item, trait_const_item, impl_trait_ref)
+}
+
+/// The equivalent of [compare_method_predicate_entailment], but for associated constants
+/// instead of associated functions.
+// FIXME(generic_const_items): If possible extract the common parts of `compare_{type,const}_predicate_entailment`.
+fn compare_const_predicate_entailment<'tcx>(
+ tcx: TyCtxt<'tcx>,
+ impl_ct: ty::AssocItem,
+ trait_ct: ty::AssocItem,
+ impl_trait_ref: ty::TraitRef<'tcx>,
+) -> Result<(), ErrorGuaranteed> {
+ let impl_ct_def_id = impl_ct.def_id.expect_local();
+ let impl_ct_span = tcx.def_span(impl_ct_def_id);
// The below is for the most part highly similar to the procedure
// for methods above. It is simpler in many respects, especially
// because we shouldn't really have to deal with lifetimes or
// predicates. In fact some of this should probably be put into
// shared functions because of DRY violations...
- let trait_to_impl_substs = impl_trait_ref.substs;
+ let impl_args = GenericArgs::identity_for_item(tcx, impl_ct.def_id);
+ let trait_to_impl_args =
+ impl_args.rebase_onto(tcx, impl_ct.container_id(tcx), impl_trait_ref.args);
// Create a parameter environment that represents the implementation's
// method.
// Compute placeholder form of impl and trait const tys.
- let impl_ty = tcx.type_of(impl_const_item_def.to_def_id()).subst_identity();
- let trait_ty = tcx.type_of(trait_const_item_def).subst(tcx, trait_to_impl_substs);
- let mut cause = ObligationCause::new(
- impl_c_span,
- impl_const_item_def,
- ObligationCauseCode::CompareImplItemObligation {
- impl_item_def_id: impl_const_item_def,
- trait_item_def_id: trait_const_item_def,
- kind: impl_const_item.kind,
- },
+ let impl_ty = tcx.type_of(impl_ct_def_id).instantiate_identity();
+
+ let trait_ty = tcx.type_of(trait_ct.def_id).instantiate(tcx, trait_to_impl_args);
+ let code = ObligationCauseCode::CompareImplItemObligation {
+ impl_item_def_id: impl_ct_def_id,
+ trait_item_def_id: trait_ct.def_id,
+ kind: impl_ct.kind,
+ };
+ let mut cause = ObligationCause::new(impl_ct_span, impl_ct_def_id, code.clone());
+
+ let impl_ct_predicates = tcx.predicates_of(impl_ct.def_id);
+ let trait_ct_predicates = tcx.predicates_of(trait_ct.def_id);
+
+ check_region_bounds_on_impl_item(tcx, impl_ct, trait_ct, false)?;
+
+ // The predicates declared by the impl definition, the trait and the
+ // associated const in the trait are assumed.
+ let impl_predicates = tcx.predicates_of(impl_ct_predicates.parent.unwrap());
+ let mut hybrid_preds = impl_predicates.instantiate_identity(tcx);
+ hybrid_preds.predicates.extend(
+ trait_ct_predicates
+ .instantiate_own(tcx, trait_to_impl_args)
+ .map(|(predicate, _)| predicate),
+ );
+
+ let param_env = ty::ParamEnv::new(tcx.mk_clauses(&hybrid_preds.predicates), Reveal::UserFacing);
+ let param_env = traits::normalize_param_env_or_error(
+ tcx,
+ param_env,
+ ObligationCause::misc(impl_ct_span, impl_ct_def_id),
);
+ let infcx = tcx.infer_ctxt().build();
+ let ocx = ObligationCtxt::new(&infcx);
+
+ let impl_ct_own_bounds = impl_ct_predicates.instantiate_own(tcx, impl_args);
+ for (predicate, span) in impl_ct_own_bounds {
+ let cause = ObligationCause::misc(span, impl_ct_def_id);
+ let predicate = ocx.normalize(&cause, param_env, predicate);
+
+ let cause = ObligationCause::new(span, impl_ct_def_id, code.clone());
+ ocx.register_obligation(traits::Obligation::new(tcx, cause, param_env, predicate));
+ }
+
// There is no "body" here, so just pass dummy id.
let impl_ty = ocx.normalize(&cause, param_env, impl_ty);
@@ -1818,7 +1922,7 @@ pub(super) fn compare_impl_const_raw(
);
// Locate the Span containing just the type of the offending impl
- let (ty, _) = tcx.hir().expect_impl_item(impl_const_item_def).expect_const();
+ let (ty, _) = tcx.hir().expect_impl_item(impl_ct_def_id).expect_const();
cause.span = ty.span;
let mut diag = struct_span_err!(
@@ -1826,12 +1930,12 @@ pub(super) fn compare_impl_const_raw(
cause.span,
E0326,
"implemented const `{}` has an incompatible type for trait",
- trait_const_item.name
+ trait_ct.name
);
- let trait_c_span = trait_const_item_def.as_local().map(|trait_c_def_id| {
+ let trait_c_span = trait_ct.def_id.as_local().map(|trait_ct_def_id| {
// Add a label to the Span containing just the type of the const
- let (ty, _) = tcx.hir().expect_trait_item(trait_c_def_id).expect_const();
+ let (ty, _) = tcx.hir().expect_trait_item(trait_ct_def_id).expect_const();
ty.span
});
@@ -1858,7 +1962,7 @@ pub(super) fn compare_impl_const_raw(
}
let outlives_env = OutlivesEnvironment::new(param_env);
- ocx.resolve_regions_and_report_errors(impl_const_item_def, &outlives_env)
+ ocx.resolve_regions_and_report_errors(impl_ct_def_id, &outlives_env)
}
pub(super) fn compare_impl_ty<'tcx>(
@@ -1885,26 +1989,26 @@ fn compare_type_predicate_entailment<'tcx>(
trait_ty: ty::AssocItem,
impl_trait_ref: ty::TraitRef<'tcx>,
) -> Result<(), ErrorGuaranteed> {
- let impl_substs = InternalSubsts::identity_for_item(tcx, impl_ty.def_id);
- let trait_to_impl_substs =
- impl_substs.rebase_onto(tcx, impl_ty.container_id(tcx), impl_trait_ref.substs);
+ let impl_args = GenericArgs::identity_for_item(tcx, impl_ty.def_id);
+ let trait_to_impl_args =
+ impl_args.rebase_onto(tcx, impl_ty.container_id(tcx), impl_trait_ref.args);
let impl_ty_predicates = tcx.predicates_of(impl_ty.def_id);
let trait_ty_predicates = tcx.predicates_of(trait_ty.def_id);
check_region_bounds_on_impl_item(tcx, impl_ty, trait_ty, false)?;
- let impl_ty_own_bounds = impl_ty_predicates.instantiate_own(tcx, impl_substs);
+ let impl_ty_own_bounds = impl_ty_predicates.instantiate_own(tcx, impl_args);
if impl_ty_own_bounds.len() == 0 {
// Nothing to check.
return Ok(());
}
- // This `HirId` should be used for the `body_id` field on each
+ // This `DefId` should be used for the `body_id` field on each
// `ObligationCause` (and the `FnCtxt`). This is what
// `regionck_item` expects.
let impl_ty_def_id = impl_ty.def_id.expect_local();
- debug!("compare_type_predicate_entailment: trait_to_impl_substs={:?}", trait_to_impl_substs);
+ debug!("compare_type_predicate_entailment: trait_to_impl_args={:?}", trait_to_impl_args);
// The predicates declared by the impl definition, the trait and the
// associated type in the trait are assumed.
@@ -1912,19 +2016,15 @@ fn compare_type_predicate_entailment<'tcx>(
let mut hybrid_preds = impl_predicates.instantiate_identity(tcx);
hybrid_preds.predicates.extend(
trait_ty_predicates
- .instantiate_own(tcx, trait_to_impl_substs)
+ .instantiate_own(tcx, trait_to_impl_args)
.map(|(predicate, _)| predicate),
);
debug!("compare_type_predicate_entailment: bounds={:?}", hybrid_preds);
let impl_ty_span = tcx.def_span(impl_ty_def_id);
- let normalize_cause = traits::ObligationCause::misc(impl_ty_span, impl_ty_def_id);
- let param_env = ty::ParamEnv::new(
- tcx.mk_clauses(&hybrid_preds.predicates),
- Reveal::UserFacing,
- hir::Constness::NotConst,
- );
+ let normalize_cause = ObligationCause::misc(impl_ty_span, impl_ty_def_id);
+ let param_env = ty::ParamEnv::new(tcx.mk_clauses(&hybrid_preds.predicates), Reveal::UserFacing);
let param_env = traits::normalize_param_env_or_error(tcx, param_env, normalize_cause);
let infcx = tcx.infer_ctxt().build();
let ocx = ObligationCtxt::new(&infcx);
@@ -1968,7 +2068,7 @@ fn compare_type_predicate_entailment<'tcx>(
///
/// trait X { type Y: Copy } impl X for T { type Y = S; }
///
-/// We are able to normalize `<T as X>::U` to `S`, and so when we check the
+/// We are able to normalize `<T as X>::Y` to `S`, and so when we check the
/// impl is well-formed we have to prove `S: Copy`.
///
/// For default associated types the normalization is not possible (the value
@@ -1990,9 +2090,9 @@ pub(super) fn check_type_bounds<'tcx>(
// }
//
// - `impl_trait_ref` would be `<(A, B) as Foo<u32>>`
- // - `normalize_impl_ty_substs` would be `[A, B, ^0.0]` (`^0.0` here is the bound var with db 0 and index 0)
+ // - `normalize_impl_ty_args` would be `[A, B, ^0.0]` (`^0.0` here is the bound var with db 0 and index 0)
// - `normalize_impl_ty` would be `Wrapper<A, B, ^0.0>`
- // - `rebased_substs` would be `[(A, B), u32, ^0.0]`, combining the substs from
+ // - `rebased_args` would be `[(A, B), u32, ^0.0]`, combining the args from
// the *trait* with the generic associated type parameters (as bound vars).
//
// A note regarding the use of bound vars here:
@@ -2022,9 +2122,11 @@ pub(super) fn check_type_bounds<'tcx>(
// the trait (notably, that X: Eq and T: Family).
let mut bound_vars: smallvec::SmallVec<[ty::BoundVariableKind; 8]> =
smallvec::SmallVec::with_capacity(tcx.generics_of(impl_ty.def_id).params.len());
- // Extend the impl's identity substs with late-bound GAT vars
- let normalize_impl_ty_substs = ty::InternalSubsts::identity_for_item(tcx, container_id)
- .extend_to(tcx, impl_ty.def_id, |param, _| match param.kind {
+ // Extend the impl's identity args with late-bound GAT vars
+ let normalize_impl_ty_args = ty::GenericArgs::identity_for_item(tcx, container_id).extend_to(
+ tcx,
+ impl_ty.def_id,
+ |param, _| match param.kind {
GenericParamDefKind::Type { .. } => {
let kind = ty::BoundTyKind::Param(param.def_id, param.name);
let bound_var = ty::BoundVariableKind::Ty(kind);
@@ -2060,7 +2162,8 @@ pub(super) fn check_type_bounds<'tcx>(
)
.into()
}
- });
+ },
+ );
// When checking something like
//
// trait X { type Y: PartialEq<<Self as X>::Y> }
@@ -2070,15 +2173,14 @@ pub(super) fn check_type_bounds<'tcx>(
// we want <T as X>::Y to normalize to S. This is valid because we are
// checking the default value specifically here. Add this equality to the
// ParamEnv for normalization specifically.
- let normalize_impl_ty = tcx.type_of(impl_ty.def_id).subst(tcx, normalize_impl_ty_substs);
- let rebased_substs =
- normalize_impl_ty_substs.rebase_onto(tcx, container_id, impl_trait_ref.substs);
+ let normalize_impl_ty = tcx.type_of(impl_ty.def_id).instantiate(tcx, normalize_impl_ty_args);
+ let rebased_args = normalize_impl_ty_args.rebase_onto(tcx, container_id, impl_trait_ref.args);
let bound_vars = tcx.mk_bound_variable_kinds(&bound_vars);
let normalize_param_env = {
let mut predicates = param_env.caller_bounds().iter().collect::<Vec<_>>();
match normalize_impl_ty.kind() {
ty::Alias(ty::Projection, proj)
- if proj.def_id == trait_ty.def_id && proj.substs == rebased_substs =>
+ if proj.def_id == trait_ty.def_id && proj.args == rebased_args =>
{
// Don't include this predicate if the projected type is
// exactly the same as the projection. This can occur in
@@ -2089,7 +2191,7 @@ pub(super) fn check_type_bounds<'tcx>(
_ => predicates.push(
ty::Binder::bind_with_vars(
ty::ProjectionPredicate {
- projection_ty: tcx.mk_alias_ty(trait_ty.def_id, rebased_substs),
+ projection_ty: tcx.mk_alias_ty(trait_ty.def_id, rebased_args),
term: normalize_impl_ty.into(),
},
bound_vars,
@@ -2097,13 +2199,13 @@ pub(super) fn check_type_bounds<'tcx>(
.to_predicate(tcx),
),
};
- ty::ParamEnv::new(tcx.mk_clauses(&predicates), Reveal::UserFacing, param_env.constness())
+ ty::ParamEnv::new(tcx.mk_clauses(&predicates), Reveal::UserFacing)
};
debug!(?normalize_param_env);
let impl_ty_def_id = impl_ty.def_id.expect_local();
- let impl_ty_substs = InternalSubsts::identity_for_item(tcx, impl_ty.def_id);
- let rebased_substs = impl_ty_substs.rebase_onto(tcx, container_id, impl_trait_ref.substs);
+ let impl_ty_args = GenericArgs::identity_for_item(tcx, impl_ty.def_id);
+ let rebased_args = impl_ty_args.rebase_onto(tcx, container_id, impl_trait_ref.args);
let infcx = tcx.infer_ctxt().build();
let ocx = ObligationCtxt::new(&infcx);
@@ -2111,7 +2213,7 @@ pub(super) fn check_type_bounds<'tcx>(
// A synthetic impl Trait for RPITIT desugaring has no HIR, which we currently use to get the
// span for an impl's associated type. Instead, for these, use the def_span for the synthesized
// associated type.
- let impl_ty_span = if impl_ty.opt_rpitit_info.is_some() {
+ let impl_ty_span = if impl_ty.is_impl_trait_in_trait() {
tcx.def_span(impl_ty_def_id)
} else {
match tcx.hir().get_by_def_id(impl_ty_def_id) {
@@ -2144,7 +2246,7 @@ pub(super) fn check_type_bounds<'tcx>(
let obligations: Vec<_> = tcx
.explicit_item_bounds(trait_ty.def_id)
- .subst_iter_copied(tcx, rebased_substs)
+ .iter_instantiated_copied(tcx, rebased_args)
.map(|(concrete_ty_bound, span)| {
debug!("check_type_bounds: concrete_ty_bound = {:?}", concrete_ty_bound);
traits::Obligation::new(tcx, mk_cause(span), param_env, concrete_ty_bound)
diff --git a/compiler/rustc_hir_analysis/src/check/dropck.rs b/compiler/rustc_hir_analysis/src/check/dropck.rs
index 13d1abe2a..dda3f7425 100644
--- a/compiler/rustc_hir_analysis/src/check/dropck.rs
+++ b/compiler/rustc_hir_analysis/src/check/dropck.rs
@@ -5,8 +5,8 @@ use rustc_data_structures::fx::FxHashSet;
use rustc_errors::{struct_span_err, ErrorGuaranteed};
use rustc_infer::infer::outlives::env::OutlivesEnvironment;
use rustc_infer::infer::{RegionResolutionError, TyCtxtInferExt};
-use rustc_middle::ty::subst::SubstsRef;
use rustc_middle::ty::util::CheckRegions;
+use rustc_middle::ty::GenericArgsRef;
use rustc_middle::ty::{self, TyCtxt};
use rustc_trait_selection::traits::{self, ObligationCtxt};
@@ -44,21 +44,21 @@ pub fn check_drop_impl(tcx: TyCtxt<'_>, drop_impl_did: DefId) -> Result<(), Erro
}));
}
}
- let dtor_self_type = tcx.type_of(drop_impl_did).subst_identity();
+ let dtor_self_type = tcx.type_of(drop_impl_did).instantiate_identity();
match dtor_self_type.kind() {
- ty::Adt(adt_def, adt_to_impl_substs) => {
+ ty::Adt(adt_def, adt_to_impl_args) => {
ensure_drop_params_and_item_params_correspond(
tcx,
drop_impl_did.expect_local(),
adt_def.did(),
- adt_to_impl_substs,
+ adt_to_impl_args,
)?;
ensure_drop_predicates_are_implied_by_item_defn(
tcx,
drop_impl_did.expect_local(),
adt_def.did().expect_local(),
- adt_to_impl_substs,
+ adt_to_impl_args,
)
}
_ => {
@@ -79,10 +79,11 @@ fn ensure_drop_params_and_item_params_correspond<'tcx>(
tcx: TyCtxt<'tcx>,
drop_impl_did: LocalDefId,
self_type_did: DefId,
- adt_to_impl_substs: SubstsRef<'tcx>,
+ adt_to_impl_args: GenericArgsRef<'tcx>,
) -> Result<(), ErrorGuaranteed> {
- let Err(arg) = tcx.uses_unique_generic_params(adt_to_impl_substs, CheckRegions::OnlyEarlyBound) else {
- return Ok(())
+ let Err(arg) = tcx.uses_unique_generic_params(adt_to_impl_args, CheckRegions::OnlyEarlyBound)
+ else {
+ return Ok(());
};
let drop_impl_span = tcx.def_span(drop_impl_did);
@@ -114,12 +115,12 @@ fn ensure_drop_predicates_are_implied_by_item_defn<'tcx>(
tcx: TyCtxt<'tcx>,
drop_impl_def_id: LocalDefId,
adt_def_id: LocalDefId,
- adt_to_impl_substs: SubstsRef<'tcx>,
+ adt_to_impl_args: GenericArgsRef<'tcx>,
) -> Result<(), ErrorGuaranteed> {
let infcx = tcx.infer_ctxt().build();
let ocx = ObligationCtxt::new(&infcx);
- // Take the param-env of the adt and substitute the substs that show up in
+ // Take the param-env of the adt and substitute the args that show up in
// the implementation's self type. This gives us the assumptions that the
// self ty of the implementation is allowed to know just from it being a
// well-formed adt, since that's all we're allowed to assume while proving
@@ -128,9 +129,8 @@ fn ensure_drop_predicates_are_implied_by_item_defn<'tcx>(
// We don't need to normalize this param-env or anything, since we're only
// substituting it with free params, so no additional param-env normalization
// can occur on top of what has been done in the param_env query itself.
- let param_env = ty::EarlyBinder::bind(tcx.param_env(adt_def_id))
- .subst(tcx, adt_to_impl_substs)
- .with_constness(tcx.constness(drop_impl_def_id));
+ let param_env =
+ ty::EarlyBinder::bind(tcx.param_env(adt_def_id)).instantiate(tcx, adt_to_impl_args);
for (pred, span) in tcx.predicates_of(drop_impl_def_id).instantiate_identity(tcx) {
let normalize_cause = traits::ObligationCause::misc(span, adt_def_id);
diff --git a/compiler/rustc_hir_analysis/src/check/entry.rs b/compiler/rustc_hir_analysis/src/check/entry.rs
new file mode 100644
index 000000000..fcaefe026
--- /dev/null
+++ b/compiler/rustc_hir_analysis/src/check/entry.rs
@@ -0,0 +1,277 @@
+use rustc_hir as hir;
+use rustc_hir::Node;
+use rustc_infer::infer::TyCtxtInferExt;
+use rustc_middle::ty::{self, Ty, TyCtxt};
+use rustc_session::config::EntryFnType;
+use rustc_span::def_id::{DefId, LocalDefId, CRATE_DEF_ID};
+use rustc_span::{symbol::sym, Span};
+use rustc_target::spec::abi::Abi;
+use rustc_trait_selection::traits::error_reporting::TypeErrCtxtExt as _;
+use rustc_trait_selection::traits::{self, ObligationCause, ObligationCauseCode};
+
+use std::ops::Not;
+
+use crate::errors;
+use crate::require_same_types;
+
+pub(crate) fn check_for_entry_fn(tcx: TyCtxt<'_>) {
+ match tcx.entry_fn(()) {
+ Some((def_id, EntryFnType::Main { .. })) => check_main_fn_ty(tcx, def_id),
+ Some((def_id, EntryFnType::Start)) => check_start_fn_ty(tcx, def_id),
+ _ => {}
+ }
+}
+
+fn check_main_fn_ty(tcx: TyCtxt<'_>, main_def_id: DefId) {
+ let main_fnsig = tcx.fn_sig(main_def_id).instantiate_identity();
+ let main_span = tcx.def_span(main_def_id);
+
+ fn main_fn_diagnostics_def_id(tcx: TyCtxt<'_>, def_id: DefId, sp: Span) -> LocalDefId {
+ if let Some(local_def_id) = def_id.as_local() {
+ let hir_type = tcx.type_of(local_def_id).instantiate_identity();
+ if !matches!(hir_type.kind(), ty::FnDef(..)) {
+ span_bug!(sp, "main has a non-function type: found `{}`", hir_type);
+ }
+ local_def_id
+ } else {
+ CRATE_DEF_ID
+ }
+ }
+
+ fn main_fn_generics_params_span(tcx: TyCtxt<'_>, def_id: DefId) -> Option<Span> {
+ if !def_id.is_local() {
+ return None;
+ }
+ let hir_id = tcx.hir().local_def_id_to_hir_id(def_id.expect_local());
+ match tcx.hir().find(hir_id) {
+ Some(Node::Item(hir::Item { kind: hir::ItemKind::Fn(_, generics, _), .. })) => {
+ generics.params.is_empty().not().then_some(generics.span)
+ }
+ _ => {
+ span_bug!(tcx.def_span(def_id), "main has a non-function type");
+ }
+ }
+ }
+
+ fn main_fn_where_clauses_span(tcx: TyCtxt<'_>, def_id: DefId) -> Option<Span> {
+ if !def_id.is_local() {
+ return None;
+ }
+ let hir_id = tcx.hir().local_def_id_to_hir_id(def_id.expect_local());
+ match tcx.hir().find(hir_id) {
+ Some(Node::Item(hir::Item { kind: hir::ItemKind::Fn(_, generics, _), .. })) => {
+ Some(generics.where_clause_span)
+ }
+ _ => {
+ span_bug!(tcx.def_span(def_id), "main has a non-function type");
+ }
+ }
+ }
+
+ fn main_fn_asyncness_span(tcx: TyCtxt<'_>, def_id: DefId) -> Option<Span> {
+ if !def_id.is_local() {
+ return None;
+ }
+ Some(tcx.def_span(def_id))
+ }
+
+ fn main_fn_return_type_span(tcx: TyCtxt<'_>, def_id: DefId) -> Option<Span> {
+ if !def_id.is_local() {
+ return None;
+ }
+ let hir_id = tcx.hir().local_def_id_to_hir_id(def_id.expect_local());
+ match tcx.hir().find(hir_id) {
+ Some(Node::Item(hir::Item { kind: hir::ItemKind::Fn(fn_sig, _, _), .. })) => {
+ Some(fn_sig.decl.output.span())
+ }
+ _ => {
+ span_bug!(tcx.def_span(def_id), "main has a non-function type");
+ }
+ }
+ }
+
+ let mut error = false;
+ let main_diagnostics_def_id = main_fn_diagnostics_def_id(tcx, main_def_id, main_span);
+ let main_fn_generics = tcx.generics_of(main_def_id);
+ let main_fn_predicates = tcx.predicates_of(main_def_id);
+ if main_fn_generics.count() != 0 || !main_fnsig.bound_vars().is_empty() {
+ let generics_param_span = main_fn_generics_params_span(tcx, main_def_id);
+ tcx.sess.emit_err(errors::MainFunctionGenericParameters {
+ span: generics_param_span.unwrap_or(main_span),
+ label_span: generics_param_span,
+ });
+ error = true;
+ } else if !main_fn_predicates.predicates.is_empty() {
+ // generics may bring in implicit predicates, so we skip this check if generics is present.
+ let generics_where_clauses_span = main_fn_where_clauses_span(tcx, main_def_id);
+ tcx.sess.emit_err(errors::WhereClauseOnMain {
+ span: generics_where_clauses_span.unwrap_or(main_span),
+ generics_span: generics_where_clauses_span,
+ });
+ error = true;
+ }
+
+ let main_asyncness = tcx.asyncness(main_def_id);
+ if let hir::IsAsync::Async = main_asyncness {
+ let asyncness_span = main_fn_asyncness_span(tcx, main_def_id);
+ tcx.sess.emit_err(errors::MainFunctionAsync { span: main_span, asyncness: asyncness_span });
+ error = true;
+ }
+
+ for attr in tcx.get_attrs(main_def_id, sym::track_caller) {
+ tcx.sess.emit_err(errors::TrackCallerOnMain { span: attr.span, annotated: main_span });
+ error = true;
+ }
+
+ if !tcx.codegen_fn_attrs(main_def_id).target_features.is_empty()
+ // Calling functions with `#[target_feature]` is not unsafe on WASM, see #84988
+ && !tcx.sess.target.is_like_wasm
+ && !tcx.sess.opts.actually_rustdoc
+ {
+ tcx.sess.emit_err(errors::TargetFeatureOnMain { main: main_span });
+ error = true;
+ }
+
+ if error {
+ return;
+ }
+
+ // Main should have no WC, so empty param env is OK here.
+ let param_env = ty::ParamEnv::empty();
+ let expected_return_type;
+ if let Some(term_did) = tcx.lang_items().termination() {
+ let return_ty = main_fnsig.output();
+ let return_ty_span = main_fn_return_type_span(tcx, main_def_id).unwrap_or(main_span);
+ if !return_ty.bound_vars().is_empty() {
+ tcx.sess.emit_err(errors::MainFunctionReturnTypeGeneric { span: return_ty_span });
+ error = true;
+ }
+ let return_ty = return_ty.skip_binder();
+ let infcx = tcx.infer_ctxt().build();
+ let cause = traits::ObligationCause::new(
+ return_ty_span,
+ main_diagnostics_def_id,
+ ObligationCauseCode::MainFunctionType,
+ );
+ let ocx = traits::ObligationCtxt::new(&infcx);
+ let norm_return_ty = ocx.normalize(&cause, param_env, return_ty);
+ ocx.register_bound(cause, param_env, norm_return_ty, term_did);
+ let errors = ocx.select_all_or_error();
+ if !errors.is_empty() {
+ infcx.err_ctxt().report_fulfillment_errors(&errors);
+ error = true;
+ }
+ // now we can take the return type of the given main function
+ expected_return_type = main_fnsig.output();
+ } else {
+ // standard () main return type
+ expected_return_type = ty::Binder::dummy(Ty::new_unit(tcx));
+ }
+
+ if error {
+ return;
+ }
+
+ let se_ty = Ty::new_fn_ptr(
+ tcx,
+ expected_return_type.map_bound(|expected_return_type| {
+ tcx.mk_fn_sig([], expected_return_type, false, hir::Unsafety::Normal, Abi::Rust)
+ }),
+ );
+
+ require_same_types(
+ tcx,
+ &ObligationCause::new(
+ main_span,
+ main_diagnostics_def_id,
+ ObligationCauseCode::MainFunctionType,
+ ),
+ param_env,
+ se_ty,
+ Ty::new_fn_ptr(tcx, main_fnsig),
+ );
+}
+
+fn check_start_fn_ty(tcx: TyCtxt<'_>, start_def_id: DefId) {
+ let start_def_id = start_def_id.expect_local();
+ let start_id = tcx.hir().local_def_id_to_hir_id(start_def_id);
+ let start_span = tcx.def_span(start_def_id);
+ let start_t = tcx.type_of(start_def_id).instantiate_identity();
+ match start_t.kind() {
+ ty::FnDef(..) => {
+ if let Some(Node::Item(it)) = tcx.hir().find(start_id) {
+ if let hir::ItemKind::Fn(sig, generics, _) = &it.kind {
+ let mut error = false;
+ if !generics.params.is_empty() {
+ tcx.sess.emit_err(errors::StartFunctionParameters { span: generics.span });
+ error = true;
+ }
+ if generics.has_where_clause_predicates {
+ tcx.sess.emit_err(errors::StartFunctionWhere {
+ span: generics.where_clause_span,
+ });
+ error = true;
+ }
+ if let hir::IsAsync::Async = sig.header.asyncness {
+ let span = tcx.def_span(it.owner_id);
+ tcx.sess.emit_err(errors::StartAsync { span: span });
+ error = true;
+ }
+
+ let attrs = tcx.hir().attrs(start_id);
+ for attr in attrs {
+ if attr.has_name(sym::track_caller) {
+ tcx.sess.emit_err(errors::StartTrackCaller {
+ span: attr.span,
+ start: start_span,
+ });
+ error = true;
+ }
+ if attr.has_name(sym::target_feature)
+ // Calling functions with `#[target_feature]` is
+ // not unsafe on WASM, see #84988
+ && !tcx.sess.target.is_like_wasm
+ && !tcx.sess.opts.actually_rustdoc
+ {
+ tcx.sess.emit_err(errors::StartTargetFeature {
+ span: attr.span,
+ start: start_span,
+ });
+ error = true;
+ }
+ }
+
+ if error {
+ return;
+ }
+ }
+ }
+
+ let se_ty = Ty::new_fn_ptr(
+ tcx,
+ ty::Binder::dummy(tcx.mk_fn_sig(
+ [tcx.types.isize, Ty::new_imm_ptr(tcx, Ty::new_imm_ptr(tcx, tcx.types.u8))],
+ tcx.types.isize,
+ false,
+ hir::Unsafety::Normal,
+ Abi::Rust,
+ )),
+ );
+
+ require_same_types(
+ tcx,
+ &ObligationCause::new(
+ start_span,
+ start_def_id,
+ ObligationCauseCode::StartFunctionType,
+ ),
+ ty::ParamEnv::empty(), // start should not have any where bounds.
+ se_ty,
+ Ty::new_fn_ptr(tcx, tcx.fn_sig(start_def_id).instantiate_identity()),
+ );
+ }
+ _ => {
+ span_bug!(start_span, "start has a non-function type: found `{}`", start_t);
+ }
+ }
+}
diff --git a/compiler/rustc_hir_analysis/src/check/intrinsic.rs b/compiler/rustc_hir_analysis/src/check/intrinsic.rs
index 1248f991c..f89e2e5c2 100644
--- a/compiler/rustc_hir_analysis/src/check/intrinsic.rs
+++ b/compiler/rustc_hir_analysis/src/check/intrinsic.rs
@@ -60,7 +60,7 @@ fn equate_intrinsic_type<'tcx>(
tcx,
&cause,
ty::ParamEnv::empty(), // FIXME: do all intrinsics have an empty param env?
- Ty::new_fn_ptr(tcx, tcx.fn_sig(it.owner_id).subst_identity()),
+ Ty::new_fn_ptr(tcx, tcx.fn_sig(it.owner_id).instantiate_identity()),
fty,
);
}
@@ -134,7 +134,7 @@ pub fn intrinsic_operation_unsafety(tcx: TyCtxt<'_>, intrinsic_id: DefId) -> hir
/// Remember to add all intrinsics here, in `compiler/rustc_codegen_llvm/src/intrinsic.rs`,
/// and in `library/core/src/intrinsics.rs`.
pub fn check_intrinsic_type(tcx: TyCtxt<'_>, it: &hir::ForeignItem<'_>) {
- let param = |n| Ty::new_param(tcx, n, Symbol::intern(&format!("P{}", n)));
+ let param = |n| Ty::new_param(tcx, n, Symbol::intern(&format!("P{n}")));
let intrinsic_id = it.owner_id.to_def_id();
let intrinsic_name = tcx.item_name(intrinsic_id);
let name_str = intrinsic_name.as_str();
@@ -155,7 +155,7 @@ pub fn check_intrinsic_type(tcx: TyCtxt<'_>, it: &hir::ForeignItem<'_>) {
ty::INNERMOST,
ty::BoundRegion { var: ty::BoundVar::from_u32(1), kind: ty::BrEnv },
);
- let va_list_ty = tcx.type_of(did).subst(tcx, &[region.into()]);
+ let va_list_ty = tcx.type_of(did).instantiate(tcx, &[region.into()]);
(Ty::new_ref(tcx, env_region, ty::TypeAndMut { ty: va_list_ty, mutbl }), va_list_ty)
})
};
@@ -238,7 +238,7 @@ pub fn check_intrinsic_type(tcx: TyCtxt<'_>, it: &hir::ForeignItem<'_>) {
ty: Ty::new_adt(
tcx,
tcx.adt_def(option_def_id),
- tcx.mk_substs_from_iter([ty::GenericArg::from(p0)].into_iter()),
+ tcx.mk_args_from_iter([ty::GenericArg::from(p0)].into_iter()),
),
mutbl: hir::Mutability::Not,
},
@@ -273,6 +273,10 @@ pub fn check_intrinsic_type(tcx: TyCtxt<'_>, it: &hir::ForeignItem<'_>) {
],
Ty::new_unit(tcx),
),
+ sym::compare_bytes => {
+ let byte_ptr = Ty::new_imm_ptr(tcx, tcx.types.u8);
+ (0, vec![byte_ptr, byte_ptr, tcx.types.usize], tcx.types.i32)
+ }
sym::write_bytes | sym::volatile_set_memory => (
1,
vec![
@@ -412,7 +416,7 @@ pub fn check_intrinsic_type(tcx: TyCtxt<'_>, it: &hir::ForeignItem<'_>) {
ty::Region::new_late_bound(tcx, ty::INNERMOST, br),
param(0),
)],
- Ty::new_projection(tcx, discriminant_def_id, tcx.mk_substs(&[param(0).into()])),
+ Ty::new_projection(tcx, discriminant_def_id, tcx.mk_args(&[param(0).into()])),
)
}
@@ -494,7 +498,7 @@ pub fn check_intrinsic_type(tcx: TyCtxt<'_>, it: &hir::ForeignItem<'_>) {
/// Type-check `extern "platform-intrinsic" { ... }` functions.
pub fn check_platform_intrinsic_type(tcx: TyCtxt<'_>, it: &hir::ForeignItem<'_>) {
let param = |n| {
- let name = Symbol::intern(&format!("P{}", n));
+ let name = Symbol::intern(&format!("P{n}"));
Ty::new_param(tcx, n, name)
};
@@ -521,6 +525,10 @@ pub fn check_platform_intrinsic_type(tcx: TyCtxt<'_>, it: &hir::ForeignItem<'_>)
| sym::simd_saturating_sub => (1, vec![param(0), param(0)], param(0)),
sym::simd_arith_offset => (2, vec![param(0), param(1)], param(0)),
sym::simd_neg
+ | sym::simd_bswap
+ | sym::simd_bitreverse
+ | sym::simd_ctlz
+ | sym::simd_cttz
| sym::simd_fsqrt
| sym::simd_fsin
| sym::simd_fcos
@@ -563,20 +571,6 @@ pub fn check_platform_intrinsic_type(tcx: TyCtxt<'_>, it: &hir::ForeignItem<'_>)
| sym::simd_reduce_min_nanless
| sym::simd_reduce_max_nanless => (2, vec![param(0)], param(1)),
sym::simd_shuffle => (3, vec![param(0), param(0), param(1)], param(2)),
- name if name.as_str().starts_with("simd_shuffle") => {
- match name.as_str()["simd_shuffle".len()..].parse() {
- Ok(n) => {
- let params = vec![param(0), param(0), Ty::new_array(tcx, tcx.types.u32, n)];
- (2, params, param(1))
- }
- Err(_) => {
- let msg =
- format!("unrecognized platform-specific intrinsic function: `{name}`");
- tcx.sess.struct_span_err(it.span, msg).emit();
- return;
- }
- }
- }
_ => {
let msg = format!("unrecognized platform-specific intrinsic function: `{name}`");
tcx.sess.struct_span_err(it.span, msg).emit();
diff --git a/compiler/rustc_hir_analysis/src/check/intrinsicck.rs b/compiler/rustc_hir_analysis/src/check/intrinsicck.rs
index 0bb1467ef..945953edd 100644
--- a/compiler/rustc_hir_analysis/src/check/intrinsicck.rs
+++ b/compiler/rustc_hir_analysis/src/check/intrinsicck.rs
@@ -68,7 +68,7 @@ impl<'a, 'tcx> InlineAsmCtxt<'a, 'tcx> {
let asm_ty = match *ty.kind() {
// `!` is allowed for input but not for output (issue #87802)
ty::Never if is_input => return None,
- ty::Error(_) => return None,
+ _ if ty.references_error() => return None,
ty::Int(IntTy::I8) | ty::Uint(UintTy::U8) => Some(InlineAsmType::I8),
ty::Int(IntTy::I16) | ty::Uint(UintTy::U16) => Some(InlineAsmType::I16),
ty::Int(IntTy::I32) | ty::Uint(UintTy::U32) => Some(InlineAsmType::I32),
@@ -81,9 +81,9 @@ impl<'a, 'tcx> InlineAsmCtxt<'a, 'tcx> {
ty::RawPtr(ty::TypeAndMut { ty, mutbl: _ }) if self.is_thin_ptr_ty(ty) => {
Some(asm_ty_isize)
}
- ty::Adt(adt, substs) if adt.repr().simd() => {
+ ty::Adt(adt, args) if adt.repr().simd() => {
let fields = &adt.non_enum_variant().fields;
- let elem_ty = fields[FieldIdx::from_u32(0)].ty(self.tcx, substs);
+ let elem_ty = fields[FieldIdx::from_u32(0)].ty(self.tcx, args);
let (size, ty) = match elem_ty.kind() {
ty::Array(ty, len) => {
@@ -186,18 +186,14 @@ impl<'a, 'tcx> InlineAsmCtxt<'a, 'tcx> {
let Some((_, feature)) = supported_tys.iter().find(|&&(t, _)| t == asm_ty) else {
let msg = format!("type `{ty}` cannot be used with this register class");
let mut err = self.tcx.sess.struct_span_err(expr.span, msg);
- let supported_tys: Vec<_> =
- supported_tys.iter().map(|(t, _)| t.to_string()).collect();
+ let supported_tys: Vec<_> = supported_tys.iter().map(|(t, _)| t.to_string()).collect();
err.note(format!(
"register class `{}` supports these types: {}",
reg_class.name(),
supported_tys.join(", "),
));
if let Some(suggest) = reg_class.suggest_class(asm_arch, asm_ty) {
- err.help(format!(
- "consider using the `{}` register class instead",
- suggest.name()
- ));
+ err.help(format!("consider using the `{}` register class instead", suggest.name()));
}
err.emit();
return Some(asm_ty);
@@ -215,7 +211,7 @@ impl<'a, 'tcx> InlineAsmCtxt<'a, 'tcx> {
// register class is usable at all.
if let Some(feature) = feature {
if !target_features.contains(feature) {
- let msg = format!("`{}` target feature is not enabled", feature);
+ let msg = format!("`{feature}` target feature is not enabled");
let mut err = self.tcx.sess.struct_span_err(expr.span, msg);
err.note(format!(
"this is required to use type `{}` with register class `{}`",
@@ -427,7 +423,7 @@ impl<'a, 'tcx> InlineAsmCtxt<'a, 'tcx> {
// Check that sym actually points to a function. Later passes
// depend on this.
hir::InlineAsmOperand::SymFn { anon_const } => {
- let ty = self.tcx.type_of(anon_const.def_id).subst_identity();
+ let ty = self.tcx.type_of(anon_const.def_id).instantiate_identity();
match ty.kind() {
ty::Never | ty::Error(_) => {}
ty::FnDef(..) => {}
diff --git a/compiler/rustc_hir_analysis/src/check/mod.rs b/compiler/rustc_hir_analysis/src/check/mod.rs
index ce2da7cb1..4cf358732 100644
--- a/compiler/rustc_hir_analysis/src/check/mod.rs
+++ b/compiler/rustc_hir_analysis/src/check/mod.rs
@@ -38,7 +38,7 @@ can be broken down into several distinct phases:
While type checking a function, the intermediate types for the
expressions, blocks, and so forth contained within the function are
-stored in `fcx.node_types` and `fcx.node_substs`. These types
+stored in `fcx.node_types` and `fcx.node_args`. These types
may contain unresolved type variables. After type checking is
complete, the functions in the writeback module are used to take the
types from this table, resolve them, and then write them into their
@@ -65,6 +65,7 @@ a type parameter).
mod check;
mod compare_impl_item;
pub mod dropck;
+mod entry;
pub mod intrinsic;
pub mod intrinsicck;
mod region;
@@ -80,7 +81,7 @@ use rustc_hir::intravisit::Visitor;
use rustc_index::bit_set::BitSet;
use rustc_middle::query::Providers;
use rustc_middle::ty::{self, Ty, TyCtxt};
-use rustc_middle::ty::{InternalSubsts, SubstsRef};
+use rustc_middle::ty::{GenericArgs, GenericArgsRef};
use rustc_session::parse::feature_err;
use rustc_span::source_map::DUMMY_SP;
use rustc_span::symbol::{kw, Ident};
@@ -188,7 +189,7 @@ fn missing_items_err(
full_impl_span: Span,
) {
let missing_items =
- missing_items.iter().filter(|trait_item| trait_item.opt_rpitit_info.is_none());
+ missing_items.iter().filter(|trait_item| !trait_item.is_impl_trait_in_trait());
let missing_items_msg = missing_items
.clone()
@@ -211,9 +212,9 @@ fn missing_items_err(
let snippet = suggestion_signature(
tcx,
trait_item,
- tcx.impl_trait_ref(impl_def_id).unwrap().subst_identity(),
+ tcx.impl_trait_ref(impl_def_id).unwrap().instantiate_identity(),
);
- let code = format!("{}{}\n{}", padding, snippet, padding);
+ let code = format!("{padding}{snippet}\n{padding}");
if let Some(span) = tcx.hir().span_if_local(trait_item.def_id) {
missing_trait_item_label
.push(errors::MissingTraitItemLabel { span, item: trait_item.name });
@@ -408,7 +409,7 @@ fn fn_sig_suggestion<'tcx>(
let asyncness = if tcx.asyncness(assoc.def_id).is_async() {
output = if let ty::Alias(_, alias_ty) = *output.kind() {
tcx.explicit_item_bounds(alias_ty.def_id)
- .subst_iter_copied(tcx, alias_ty.substs)
+ .iter_instantiated_copied(tcx, alias_ty.args)
.find_map(|(bound, _)| bound.as_projection_clause()?.no_bound_vars()?.term.ty())
.unwrap_or_else(|| {
span_bug!(
@@ -461,10 +462,10 @@ fn suggestion_signature<'tcx>(
assoc: ty::AssocItem,
impl_trait_ref: ty::TraitRef<'tcx>,
) -> String {
- let substs = ty::InternalSubsts::identity_for_item(tcx, assoc.def_id).rebase_onto(
+ let args = ty::GenericArgs::identity_for_item(tcx, assoc.def_id).rebase_onto(
tcx,
assoc.container_id(tcx),
- impl_trait_ref.with_self_ty(tcx, tcx.types.self_param).substs,
+ impl_trait_ref.with_self_ty(tcx, tcx.types.self_param).args,
);
match assoc.kind {
@@ -472,21 +473,21 @@ fn suggestion_signature<'tcx>(
tcx,
tcx.liberate_late_bound_regions(
assoc.def_id,
- tcx.fn_sig(assoc.def_id).subst(tcx, substs),
+ tcx.fn_sig(assoc.def_id).instantiate(tcx, args),
),
assoc.ident(tcx),
- tcx.predicates_of(assoc.def_id).instantiate_own(tcx, substs),
+ tcx.predicates_of(assoc.def_id).instantiate_own(tcx, args),
assoc,
),
ty::AssocKind::Type => {
let (generics, where_clauses) = bounds_from_generic_predicates(
tcx,
- tcx.predicates_of(assoc.def_id).instantiate_own(tcx, substs),
+ tcx.predicates_of(assoc.def_id).instantiate_own(tcx, args),
);
format!("type {}{generics} = /* Type */{where_clauses};", assoc.name)
}
ty::AssocKind::Const => {
- let ty = tcx.type_of(assoc.def_id).subst_identity();
+ let ty = tcx.type_of(assoc.def_id).instantiate_identity();
let val = ty_kind_suggestion(ty).unwrap_or("todo!()");
format!("const {}: {} = {};", assoc.name, ty, val)
}
diff --git a/compiler/rustc_hir_analysis/src/check/wfcheck.rs b/compiler/rustc_hir_analysis/src/check/wfcheck.rs
index d4748b7ef..f5beefc47 100644
--- a/compiler/rustc_hir_analysis/src/check/wfcheck.rs
+++ b/compiler/rustc_hir_analysis/src/check/wfcheck.rs
@@ -5,7 +5,7 @@ use rustc_ast as ast;
use rustc_data_structures::fx::{FxHashMap, FxHashSet, FxIndexSet};
use rustc_errors::{pluralize, struct_span_err, Applicability, DiagnosticBuilder, ErrorGuaranteed};
use rustc_hir as hir;
-use rustc_hir::def_id::{DefId, LocalDefId};
+use rustc_hir::def_id::{DefId, LocalDefId, LocalModDefId};
use rustc_hir::lang_items::LangItem;
use rustc_hir::ItemKind;
use rustc_infer::infer::outlives::env::{OutlivesEnvironment, RegionBoundPairs};
@@ -18,7 +18,7 @@ use rustc_middle::ty::{
self, AdtKind, GenericParamDefKind, ToPredicate, Ty, TyCtxt, TypeFoldable, TypeSuperVisitable,
TypeVisitable, TypeVisitableExt, TypeVisitor,
};
-use rustc_middle::ty::{GenericArgKind, InternalSubsts};
+use rustc_middle::ty::{GenericArgKind, GenericArgs};
use rustc_session::parse::feature_err;
use rustc_span::symbol::{sym, Ident, Symbol};
use rustc_span::{Span, DUMMY_SP};
@@ -75,12 +75,10 @@ impl<'tcx> WfCheckingCtxt<'_, 'tcx> {
self.body_def_id,
ObligationCauseCode::WellFormed(loc),
);
- // for a type to be WF, we do not need to check if const trait predicates satisfy.
- let param_env = self.param_env.without_const();
self.ocx.register_obligation(traits::Obligation::new(
self.tcx(),
cause,
- param_env,
+ self.param_env,
ty::Binder::dummy(ty::PredicateKind::Clause(ty::ClauseKind::WellFormed(arg))),
));
}
@@ -196,7 +194,7 @@ fn check_item<'tcx>(tcx: TyCtxt<'tcx>, item: &'tcx hir::Item<'tcx>) {
// We match on both `ty::ImplPolarity` and `ast::ImplPolarity` just to get the `!` span.
match (tcx.impl_polarity(def_id), impl_.polarity) {
(ty::ImplPolarity::Positive, _) => {
- check_impl(tcx, item, impl_.self_ty, &impl_.of_trait, impl_.constness);
+ check_impl(tcx, item, impl_.self_ty, &impl_.of_trait);
}
(ty::ImplPolarity::Negative, ast::ImplPolarity::Negative(span)) => {
// FIXME(#27579): what amount of WF checking do we need for neg impls?
@@ -247,10 +245,14 @@ fn check_item<'tcx>(tcx: TyCtxt<'tcx>, item: &'tcx hir::Item<'tcx>) {
}
// `ForeignItem`s are handled separately.
hir::ItemKind::ForeignMod { .. } => {}
- hir::ItemKind::TyAlias(hir_ty, ..) => {
- if tcx.type_of(item.owner_id.def_id).skip_binder().has_opaque_types() {
- // Bounds are respected for `type X = impl Trait` and `type X = (impl Trait, Y);`
+ hir::ItemKind::TyAlias(hir_ty, ast_generics) => {
+ if tcx.features().lazy_type_alias
+ || tcx.type_of(item.owner_id).skip_binder().has_opaque_types()
+ {
+ // Bounds of lazy type aliases and of eager ones that contain opaque types are respected.
+ // E.g: `type X = impl Trait;`, `type X = (impl Trait, Y);`.
check_item_type(tcx, def_id, hir_ty.span, UnsizedHandling::Allow);
+ check_variances_for_type_defn(tcx, item, ast_generics);
}
}
_ => {}
@@ -286,11 +288,22 @@ fn check_trait_item(tcx: TyCtxt<'_>, trait_item: &hir::TraitItem<'_>) {
};
check_object_unsafe_self_trait_by_name(tcx, trait_item);
check_associated_item(tcx, def_id, span, method_sig);
+
+ if matches!(trait_item.kind, hir::TraitItemKind::Fn(..)) {
+ for &assoc_ty_def_id in tcx.associated_types_for_impl_traits_in_associated_fn(def_id) {
+ check_associated_item(
+ tcx,
+ assoc_ty_def_id.expect_local(),
+ tcx.def_span(assoc_ty_def_id),
+ None,
+ );
+ }
+ }
}
/// Require that the user writes where clauses on GATs for the implicit
/// outlives bounds involving trait parameters in trait functions and
-/// lifetimes passed as GAT substs. See `self-outlives-lint` test.
+/// lifetimes passed as GAT args. See `self-outlives-lint` test.
///
/// We use the following trait as an example throughout this function:
/// ```rust,ignore (this code fails due to this lint)
@@ -314,7 +327,7 @@ fn check_gat_where_clauses(tcx: TyCtxt<'_>, associated_items: &[hir::TraitItemRe
for gat_item in associated_items {
let gat_def_id = gat_item.id.owner_id;
let gat_item = tcx.associated_item(gat_def_id);
- // If this item is not an assoc ty, or has no substs, then it's not a GAT
+ // If this item is not an assoc ty, or has no args, then it's not a GAT
if gat_item.kind != ty::AssocKind::Type {
continue;
}
@@ -345,7 +358,7 @@ fn check_gat_where_clauses(tcx: TyCtxt<'_>, associated_items: &[hir::TraitItemRe
// `Self::Iter<'a>` is a GAT we want to gather any potential missing bounds from.
let sig: ty::FnSig<'_> = tcx.liberate_late_bound_regions(
item_def_id.to_def_id(),
- tcx.fn_sig(item_def_id).subst_identity(),
+ tcx.fn_sig(item_def_id).instantiate_identity(),
);
gather_gat_bounds(
tcx,
@@ -374,7 +387,7 @@ fn check_gat_where_clauses(tcx: TyCtxt<'_>, associated_items: &[hir::TraitItemRe
param_env,
item_def_id,
tcx.explicit_item_bounds(item_def_id)
- .subst_identity_iter_copied()
+ .instantiate_identity_iter_copied()
.collect::<Vec<_>>(),
&FxIndexSet::default(),
gat_def_id.def_id,
@@ -472,8 +485,7 @@ fn check_gat_where_clauses(tcx: TyCtxt<'_>, associated_items: &[hir::TraitItemRe
let bound =
if unsatisfied_bounds.len() > 1 { "these bounds are" } else { "this bound is" };
err.note(format!(
- "{} currently required to ensure that impls have maximum flexibility",
- bound
+ "{bound} currently required to ensure that impls have maximum flexibility"
));
err.note(
"we are soliciting feedback, see issue #87479 \
@@ -505,7 +517,7 @@ fn augment_param_env<'tcx>(
);
// FIXME(compiler-errors): Perhaps there is a case where we need to normalize this
// i.e. traits::normalize_param_env_or_error
- ty::ParamEnv::new(bounds, param_env.reveal(), param_env.constness())
+ ty::ParamEnv::new(bounds, param_env.reveal())
}
/// We use the following trait as an example throughout this function.
@@ -544,8 +556,8 @@ fn gather_gat_bounds<'tcx, T: TypeFoldable<TyCtxt<'tcx>>>(
for (region_a, region_a_idx) in &regions {
// Ignore `'static` lifetimes for the purpose of this lint: it's
// because we know it outlives everything and so doesn't give meaningful
- // clues
- if let ty::ReStatic = **region_a {
+ // clues. Also ignore `ReError`, to avoid knock-down errors.
+ if let ty::ReStatic | ty::ReError(_) = **region_a {
continue;
}
// For each region argument (e.g., `'a` in our example), check for a
@@ -588,8 +600,9 @@ fn gather_gat_bounds<'tcx, T: TypeFoldable<TyCtxt<'tcx>>>(
// on the GAT itself.
for (region_b, region_b_idx) in &regions {
// Again, skip `'static` because it outlives everything. Also, we trivially
- // know that a region outlives itself.
- if ty::ReStatic == **region_b || region_a == region_b {
+ // know that a region outlives itself. Also ignore `ReError`, to avoid
+ // knock-down errors.
+ if matches!(**region_b, ty::ReStatic | ty::ReError(_)) || region_a == region_b {
continue;
}
if region_known_to_outlive(
@@ -737,7 +750,7 @@ impl<'tcx> TypeVisitor<TyCtxt<'tcx>> for GATSubstCollector<'tcx> {
fn visit_ty(&mut self, t: Ty<'tcx>) -> ControlFlow<Self::BreakTy> {
match t.kind() {
ty::Alias(ty::Projection, p) if p.def_id == self.gat => {
- for (idx, subst) in p.substs.iter().enumerate() {
+ for (idx, subst) in p.args.iter().enumerate() {
match subst.unpack() {
GenericArgKind::Lifetime(lt) if !lt.is_late_bound() => {
self.regions.insert((lt, idx));
@@ -836,7 +849,7 @@ fn check_param_wf(tcx: TyCtxt<'_>, param: &hir::GenericParam<'_>) {
// Const parameters are well formed if their type is structural match.
hir::GenericParamKind::Const { ty: hir_ty, default: _ } => {
- let ty = tcx.type_of(param.def_id).subst_identity();
+ let ty = tcx.type_of(param.def_id).instantiate_identity();
if tcx.features().adt_const_params {
enter_wf_checking_ctxt(tcx, hir_ty.span, param.def_id, |wfcx| {
@@ -910,17 +923,17 @@ fn check_associated_item(
let self_ty = match item.container {
ty::TraitContainer => tcx.types.self_param,
- ty::ImplContainer => tcx.type_of(item.container_id(tcx)).subst_identity(),
+ ty::ImplContainer => tcx.type_of(item.container_id(tcx)).instantiate_identity(),
};
match item.kind {
ty::AssocKind::Const => {
- let ty = tcx.type_of(item.def_id).subst_identity();
+ let ty = tcx.type_of(item.def_id).instantiate_identity();
let ty = wfcx.normalize(span, Some(WellFormedLoc::Ty(item_id)), ty);
wfcx.register_wf_obligation(span, loc, ty.into());
}
ty::AssocKind::Fn => {
- let sig = tcx.fn_sig(item.def_id).subst_identity();
+ let sig = tcx.fn_sig(item.def_id).instantiate_identity();
let hir_sig = sig_if_method.expect("bad signature for method");
check_fn_or_method(
wfcx,
@@ -936,7 +949,7 @@ fn check_associated_item(
check_associated_type_bounds(wfcx, item, span)
}
if item.defaultness(tcx).has_value() {
- let ty = tcx.type_of(item.def_id).subst_identity();
+ let ty = tcx.type_of(item.def_id).instantiate_identity();
let ty = wfcx.normalize(span, Some(WellFormedLoc::Ty(item_id)), ty);
wfcx.register_wf_obligation(span, loc, ty.into());
}
@@ -969,7 +982,11 @@ fn check_type_defn<'tcx>(tcx: TyCtxt<'tcx>, item: &hir::Item<'tcx>, all_sized: b
let field_id = field.did.expect_local();
let hir::FieldDef { ty: hir_ty, .. } =
tcx.hir().get_by_def_id(field_id).expect_field();
- let ty = wfcx.normalize(hir_ty.span, None, tcx.type_of(field.did).subst_identity());
+ let ty = wfcx.normalize(
+ hir_ty.span,
+ None,
+ tcx.type_of(field.did).instantiate_identity(),
+ );
wfcx.register_wf_obligation(
hir_ty.span,
Some(WellFormedLoc::Ty(field_id)),
@@ -981,11 +998,11 @@ fn check_type_defn<'tcx>(tcx: TyCtxt<'tcx>, item: &hir::Item<'tcx>, all_sized: b
// intermediate types must be sized.
let needs_drop_copy = || {
packed && {
- let ty = tcx.type_of(variant.tail().did).subst_identity();
+ let ty = tcx.type_of(variant.tail().did).instantiate_identity();
let ty = tcx.erase_regions(ty);
if ty.has_infer() {
tcx.sess
- .delay_span_bug(item.span, format!("inference variables in {:?}", ty));
+ .delay_span_bug(item.span, format!("inference variables in {ty:?}"));
// Just treat unresolved type expression as if it needs drop.
true
} else {
@@ -1003,7 +1020,11 @@ fn check_type_defn<'tcx>(tcx: TyCtxt<'tcx>, item: &hir::Item<'tcx>, all_sized: b
let field_id = field.did.expect_local();
let hir::FieldDef { ty: hir_ty, .. } =
tcx.hir().get_by_def_id(field_id).expect_field();
- let ty = wfcx.normalize(hir_ty.span, None, tcx.type_of(field.did).subst_identity());
+ let ty = wfcx.normalize(
+ hir_ty.span,
+ None,
+ tcx.type_of(field.did).instantiate_identity(),
+ );
wfcx.register_bound(
traits::ObligationCause::new(
hir_ty.span,
@@ -1083,16 +1104,17 @@ fn check_associated_type_bounds(wfcx: &WfCheckingCtxt<'_, '_>, item: ty::AssocIt
let bounds = wfcx.tcx().explicit_item_bounds(item.def_id);
debug!("check_associated_type_bounds: bounds={:?}", bounds);
- let wf_obligations = bounds.subst_identity_iter_copied().flat_map(|(bound, bound_span)| {
- let normalized_bound = wfcx.normalize(span, None, bound);
- traits::wf::predicate_obligations(
- wfcx.infcx,
- wfcx.param_env,
- wfcx.body_def_id,
- normalized_bound.as_predicate(),
- bound_span,
- )
- });
+ let wf_obligations =
+ bounds.instantiate_identity_iter_copied().flat_map(|(bound, bound_span)| {
+ let normalized_bound = wfcx.normalize(span, None, bound);
+ traits::wf::predicate_obligations(
+ wfcx.infcx,
+ wfcx.param_env,
+ wfcx.body_def_id,
+ normalized_bound.as_predicate(),
+ bound_span,
+ )
+ });
wfcx.register_obligations(wf_obligations);
}
@@ -1105,7 +1127,7 @@ fn check_item_fn(
decl: &hir::FnDecl<'_>,
) {
enter_wf_checking_ctxt(tcx, span, def_id, |wfcx| {
- let sig = tcx.fn_sig(def_id).subst_identity();
+ let sig = tcx.fn_sig(def_id).instantiate_identity();
check_fn_or_method(wfcx, ident.span, sig, decl, def_id);
})
}
@@ -1125,7 +1147,7 @@ fn check_item_type(
debug!("check_item_type: {:?}", item_id);
enter_wf_checking_ctxt(tcx, ty_span, item_id, |wfcx| {
- let ty = tcx.type_of(item_id).subst_identity();
+ let ty = tcx.type_of(item_id).instantiate_identity();
let item_ty = wfcx.normalize(ty_span, Some(WellFormedLoc::Ty(item_id)), ty);
let forbid_unsized = match unsized_handling {
@@ -1170,7 +1192,6 @@ fn check_impl<'tcx>(
item: &'tcx hir::Item<'tcx>,
ast_self_ty: &hir::Ty<'_>,
ast_trait_ref: &Option<hir::TraitRef<'_>>,
- constness: hir::Constness,
) {
enter_wf_checking_ctxt(tcx, item.span, item.owner_id.def_id, |wfcx| {
match ast_trait_ref {
@@ -1178,20 +1199,14 @@ fn check_impl<'tcx>(
// `#[rustc_reservation_impl]` impls are not real impls and
// therefore don't need to be WF (the trait's `Self: Trait` predicate
// won't hold).
- let trait_ref = tcx.impl_trait_ref(item.owner_id).unwrap().subst_identity();
+ let trait_ref = tcx.impl_trait_ref(item.owner_id).unwrap().instantiate_identity();
let trait_ref = wfcx.normalize(
ast_trait_ref.path.span,
Some(WellFormedLoc::Ty(item.hir_id().expect_owner().def_id)),
trait_ref,
);
- let trait_pred = ty::TraitPredicate {
- trait_ref,
- constness: match constness {
- hir::Constness::Const => ty::BoundConstness::ConstIfConst,
- hir::Constness::NotConst => ty::BoundConstness::NotConst,
- },
- polarity: ty::ImplPolarity::Positive,
- };
+ let trait_pred =
+ ty::TraitPredicate { trait_ref, polarity: ty::ImplPolarity::Positive };
let mut obligations = traits::wf::trait_obligations(
wfcx.infcx,
wfcx.param_env,
@@ -1211,7 +1226,7 @@ fn check_impl<'tcx>(
wfcx.register_obligations(obligations);
}
None => {
- let self_ty = tcx.type_of(item.owner_id).subst_identity();
+ let self_ty = tcx.type_of(item.owner_id).instantiate_identity();
let self_ty = wfcx.normalize(
item.span,
Some(WellFormedLoc::Ty(item.hir_id().expect_owner().def_id)),
@@ -1256,7 +1271,7 @@ fn check_where_clauses<'tcx>(wfcx: &WfCheckingCtxt<'_, 'tcx>, span: Span, def_id
match param.kind {
GenericParamDefKind::Type { .. } => {
if is_our_default(param) {
- let ty = tcx.type_of(param.def_id).subst_identity();
+ let ty = tcx.type_of(param.def_id).instantiate_identity();
// Ignore dependent defaults -- that is, where the default of one type
// parameter includes another (e.g., `<T, U = T>`). In those cases, we can't
// be sure if it will error or not as user might always specify the other.
@@ -1272,10 +1287,10 @@ fn check_where_clauses<'tcx>(wfcx: &WfCheckingCtxt<'_, 'tcx>, span: Span, def_id
GenericParamDefKind::Const { .. } => {
if is_our_default(param) {
// FIXME(const_generics_defaults): This
- // is incorrect when dealing with unused substs, for example
+ // is incorrect when dealing with unused args, for example
// for `struct Foo<const N: usize, const M: usize = { 1 - 2 }>`
// we should eagerly error.
- let default_ct = tcx.const_param_default(param.def_id).subst_identity();
+ let default_ct = tcx.const_param_default(param.def_id).instantiate_identity();
if !default_ct.has_param() {
wfcx.register_wf_obligation(
tcx.def_span(param.def_id),
@@ -1298,7 +1313,7 @@ fn check_where_clauses<'tcx>(wfcx: &WfCheckingCtxt<'_, 'tcx>, span: Span, def_id
// For more examples see tests `defaults-well-formedness.rs` and `type-check-defaults.rs`.
//
// First we build the defaulted substitution.
- let substs = InternalSubsts::for_item(tcx, def_id.to_def_id(), |param, _| {
+ let args = GenericArgs::for_item(tcx, def_id.to_def_id(), |param, _| {
match param.kind {
GenericParamDefKind::Lifetime => {
// All regions are identity.
@@ -1308,7 +1323,7 @@ fn check_where_clauses<'tcx>(wfcx: &WfCheckingCtxt<'_, 'tcx>, span: Span, def_id
GenericParamDefKind::Type { .. } => {
// If the param has a default, ...
if is_our_default(param) {
- let default_ty = tcx.type_of(param.def_id).subst_identity();
+ let default_ty = tcx.type_of(param.def_id).instantiate_identity();
// ... and it's not a dependent default, ...
if !default_ty.has_param() {
// ... then substitute it with the default.
@@ -1321,7 +1336,7 @@ fn check_where_clauses<'tcx>(wfcx: &WfCheckingCtxt<'_, 'tcx>, span: Span, def_id
GenericParamDefKind::Const { .. } => {
// If the param has a default, ...
if is_our_default(param) {
- let default_ct = tcx.const_param_default(param.def_id).subst_identity();
+ let default_ct = tcx.const_param_default(param.def_id).instantiate_identity();
// ... and it's not a dependent default, ...
if !default_ct.has_param() {
// ... then substitute it with the default.
@@ -1366,7 +1381,7 @@ fn check_where_clauses<'tcx>(wfcx: &WfCheckingCtxt<'_, 'tcx>, span: Span, def_id
}
let mut param_count = CountParams::default();
let has_region = pred.visit_with(&mut param_count).is_break();
- let substituted_pred = ty::EarlyBinder::bind(pred).subst(tcx, substs);
+ let substituted_pred = ty::EarlyBinder::bind(pred).instantiate(tcx, args);
// Don't check non-defaulted params, dependent defaults (including lifetimes)
// or preds with multiple params.
if substituted_pred.has_non_region_param() || param_count.params.len() > 1 || has_region
@@ -1407,7 +1422,7 @@ fn check_where_clauses<'tcx>(wfcx: &WfCheckingCtxt<'_, 'tcx>, span: Span, def_id
let wf_obligations = predicates.into_iter().flat_map(|(p, sp)| {
traits::wf::predicate_obligations(
infcx,
- wfcx.param_env.without_const(),
+ wfcx.param_env,
wfcx.body_def_id,
p.as_predicate(),
sp,
@@ -1460,13 +1475,6 @@ fn check_fn_or_method<'tcx>(
check_where_clauses(wfcx, span, def_id);
- check_return_position_impl_trait_in_trait_bounds(
- wfcx,
- def_id,
- sig.output(),
- hir_decl.output.span(),
- );
-
if sig.abi == Abi::RustCall {
let span = tcx.def_span(def_id);
let has_implicit_self = hir_decl.implicit_self != hir::ImplicitSelfKind::None;
@@ -1501,87 +1509,6 @@ fn check_fn_or_method<'tcx>(
}
}
-/// Basically `check_associated_type_bounds`, but separated for now and should be
-/// deduplicated when RPITITs get lowered into real associated items.
-#[tracing::instrument(level = "trace", skip(wfcx))]
-fn check_return_position_impl_trait_in_trait_bounds<'tcx>(
- wfcx: &WfCheckingCtxt<'_, 'tcx>,
- fn_def_id: LocalDefId,
- fn_output: Ty<'tcx>,
- span: Span,
-) {
- let tcx = wfcx.tcx();
- let Some(assoc_item) = tcx.opt_associated_item(fn_def_id.to_def_id()) else {
- return;
- };
- if assoc_item.container != ty::AssocItemContainer::TraitContainer {
- return;
- }
- fn_output.visit_with(&mut ImplTraitInTraitFinder {
- wfcx,
- fn_def_id,
- depth: ty::INNERMOST,
- seen: FxHashSet::default(),
- });
-}
-
-// FIXME(-Zlower-impl-trait-in-trait-to-assoc-ty): Even with the new lowering
-// strategy, we can't just call `check_associated_item` on the new RPITITs,
-// because tests like `tests/ui/async-await/in-trait/implied-bounds.rs` will fail.
-// That's because we need to check that the bounds of the RPITIT hold using
-// the special substs that we create during opaque type lowering, otherwise we're
-// getting a bunch of early bound and free regions mixed up... Haven't looked too
-// deep into this, though.
-struct ImplTraitInTraitFinder<'a, 'tcx> {
- wfcx: &'a WfCheckingCtxt<'a, 'tcx>,
- fn_def_id: LocalDefId,
- depth: ty::DebruijnIndex,
- seen: FxHashSet<DefId>,
-}
-impl<'tcx> TypeVisitor<TyCtxt<'tcx>> for ImplTraitInTraitFinder<'_, 'tcx> {
- type BreakTy = !;
-
- fn visit_ty(&mut self, ty: Ty<'tcx>) -> ControlFlow<!> {
- let tcx = self.wfcx.tcx();
- if let ty::Alias(ty::Opaque, unshifted_opaque_ty) = *ty.kind()
- && self.seen.insert(unshifted_opaque_ty.def_id)
- && let Some(opaque_def_id) = unshifted_opaque_ty.def_id.as_local()
- && let origin = tcx.opaque_type_origin(opaque_def_id)
- && let hir::OpaqueTyOrigin::FnReturn(source) | hir::OpaqueTyOrigin::AsyncFn(source) = origin
- && source == self.fn_def_id
- {
- let opaque_ty = tcx.fold_regions(unshifted_opaque_ty, |re, _depth| {
- match re.kind() {
- ty::ReEarlyBound(_) | ty::ReFree(_) | ty::ReError(_) | ty::ReStatic => re,
- r => bug!("unexpected region: {r:?}"),
- }
- });
- for (bound, bound_span) in tcx
- .explicit_item_bounds(opaque_ty.def_id)
- .subst_iter_copied(tcx, opaque_ty.substs)
- {
- let bound = self.wfcx.normalize(bound_span, None, bound);
- self.wfcx.register_obligations(traits::wf::predicate_obligations(
- self.wfcx.infcx,
- self.wfcx.param_env,
- self.wfcx.body_def_id,
- bound.as_predicate(),
- bound_span,
- ));
- // Set the debruijn index back to innermost here, since we already eagerly
- // shifted the substs that we use to generate these bounds. This is unfortunately
- // subtly different behavior than the `ImplTraitInTraitFinder` we use in `param_env`,
- // but that function doesn't actually need to normalize the bound it's visiting
- // (whereas we have to do so here)...
- let old_depth = std::mem::replace(&mut self.depth, ty::INNERMOST);
- bound.visit_with(self);
- self.depth = old_depth;
- }
- }
- ty.super_visit_with(self)
- }
-}
-
const HELP_FOR_SELF_TYPE: &str = "consider changing to `self`, `&self`, `&mut self`, `self: Box<Self>`, \
`self: Rc<Self>`, `self: Arc<Self>`, or `self: Pin<P>` (where P is one \
of the previous types except `Self`)";
@@ -1601,7 +1528,7 @@ fn check_method_receiver<'tcx>(
let span = fn_sig.decl.inputs[0].span;
- let sig = tcx.fn_sig(method.def_id).subst_identity();
+ let sig = tcx.fn_sig(method.def_id).instantiate_identity();
let sig = tcx.liberate_late_bound_regions(method.def_id, sig);
let sig = wfcx.normalize(span, None, sig);
@@ -1773,11 +1700,28 @@ fn check_variances_for_type_defn<'tcx>(
item: &hir::Item<'tcx>,
hir_generics: &hir::Generics<'_>,
) {
- let identity_substs = ty::InternalSubsts::identity_for_item(tcx, item.owner_id);
- for field in tcx.adt_def(item.owner_id).all_fields() {
- if field.ty(tcx, identity_substs).references_error() {
- return;
+ let identity_args = ty::GenericArgs::identity_for_item(tcx, item.owner_id);
+
+ match item.kind {
+ ItemKind::Enum(..) | ItemKind::Struct(..) | ItemKind::Union(..) => {
+ for field in tcx.adt_def(item.owner_id).all_fields() {
+ if field.ty(tcx, identity_args).references_error() {
+ return;
+ }
+ }
+ }
+ ItemKind::TyAlias(..) => {
+ let ty = tcx.type_of(item.owner_id).instantiate_identity();
+
+ if tcx.features().lazy_type_alias || ty.has_opaque_types() {
+ if ty.references_error() {
+ return;
+ }
+ } else {
+ bug!();
+ }
}
+ _ => bug!(),
}
let ty_predicates = tcx.predicates_of(item.owner_id);
@@ -1854,8 +1798,7 @@ fn report_bivariance(
if matches!(param.kind, hir::GenericParamKind::Type { .. }) && !has_explicit_bounds {
err.help(format!(
- "if you intended `{0}` to be a const parameter, use `const {0}: usize` instead",
- param_name
+ "if you intended `{param_name}` to be a const parameter, use `const {param_name}: usize` instead"
));
}
err.emit()
@@ -1911,7 +1854,7 @@ impl<'tcx> WfCheckingCtxt<'_, 'tcx> {
}
}
-fn check_mod_type_wf(tcx: TyCtxt<'_>, module: LocalDefId) {
+fn check_mod_type_wf(tcx: TyCtxt<'_>, module: LocalModDefId) {
let items = tcx.hir_module_items(module);
items.par_items(|item| tcx.ensure().check_well_formed(item.owner_id));
items.par_impl_items(|item| tcx.ensure().check_well_formed(item.owner_id));
diff --git a/compiler/rustc_hir_analysis/src/check_unused.rs b/compiler/rustc_hir_analysis/src/check_unused.rs
index 268b9ac53..9ad73eeff 100644
--- a/compiler/rustc_hir_analysis/src/check_unused.rs
+++ b/compiler/rustc_hir_analysis/src/check_unused.rs
@@ -1,12 +1,21 @@
-use rustc_data_structures::unord::UnordSet;
+use rustc_data_structures::unord::{ExtendUnord, UnordSet};
use rustc_hir::def::DefKind;
use rustc_hir::def_id::LocalDefId;
+use rustc_middle::query::Providers;
use rustc_middle::ty::TyCtxt;
use rustc_session::lint;
-pub fn check_crate(tcx: TyCtxt<'_>) {
- let mut used_trait_imports: UnordSet<LocalDefId> = Default::default();
+pub fn provide(providers: &mut Providers) {
+ *providers = Providers { check_unused_traits, ..*providers };
+}
+
+fn check_unused_traits(tcx: TyCtxt<'_>, (): ()) {
+ let mut used_trait_imports = UnordSet::<LocalDefId>::default();
+ // FIXME: Use `tcx.hir().par_body_owners()` when we implement creating `DefId`s
+ // for anon constants during their parents' typeck.
+ // Doing so at current will produce queries cycle errors because it may typeck
+ // on anon constants directly.
for item_def_id in tcx.hir().body_owners() {
let imports = tcx.used_trait_imports(item_def_id);
debug!("GatherVisitor: item_def_id={:?} with imports {:#?}", item_def_id, imports);
@@ -27,7 +36,7 @@ pub fn check_crate(tcx: TyCtxt<'_>) {
}
let (path, _) = item.expect_use();
let msg = if let Ok(snippet) = tcx.sess.source_map().span_to_snippet(path.span) {
- format!("unused import: `{}`", snippet)
+ format!("unused import: `{snippet}`")
} else {
"unused import".to_owned()
};
diff --git a/compiler/rustc_hir_analysis/src/coherence/builtin.rs b/compiler/rustc_hir_analysis/src/coherence/builtin.rs
index 79cc43edf..c930537d4 100644
--- a/compiler/rustc_hir_analysis/src/coherence/builtin.rs
+++ b/compiler/rustc_hir_analysis/src/coherence/builtin.rs
@@ -57,7 +57,7 @@ impl<'tcx> Checker<'tcx> {
fn visit_implementation_of_drop(tcx: TyCtxt<'_>, impl_did: LocalDefId) {
// Destructors only work on local ADT types.
- match tcx.type_of(impl_did).subst_identity().kind() {
+ match tcx.type_of(impl_did).instantiate_identity().kind() {
ty::Adt(def, _) if def.did().is_local() => return,
ty::Error(_) => return,
_ => {}
@@ -71,7 +71,7 @@ fn visit_implementation_of_drop(tcx: TyCtxt<'_>, impl_did: LocalDefId) {
fn visit_implementation_of_copy(tcx: TyCtxt<'_>, impl_did: LocalDefId) {
debug!("visit_implementation_of_copy: impl_did={:?}", impl_did);
- let self_type = tcx.type_of(impl_did).subst_identity();
+ let self_type = tcx.type_of(impl_did).instantiate_identity();
debug!("visit_implementation_of_copy: self_type={:?} (bound)", self_type);
let param_env = tcx.param_env(impl_did);
@@ -100,7 +100,7 @@ fn visit_implementation_of_copy(tcx: TyCtxt<'_>, impl_did: LocalDefId) {
}
fn visit_implementation_of_const_param_ty(tcx: TyCtxt<'_>, impl_did: LocalDefId) {
- let self_type = tcx.type_of(impl_did).subst_identity();
+ let self_type = tcx.type_of(impl_did).instantiate_identity();
assert!(!self_type.has_escaping_bound_vars());
let param_env = tcx.param_env(impl_did);
@@ -139,13 +139,13 @@ fn visit_implementation_of_dispatch_from_dyn(tcx: TyCtxt<'_>, impl_did: LocalDef
let dispatch_from_dyn_trait = tcx.require_lang_item(LangItem::DispatchFromDyn, Some(span));
- let source = tcx.type_of(impl_did).subst_identity();
+ let source = tcx.type_of(impl_did).instantiate_identity();
assert!(!source.has_escaping_bound_vars());
let target = {
- let trait_ref = tcx.impl_trait_ref(impl_did).unwrap().subst_identity();
+ let trait_ref = tcx.impl_trait_ref(impl_did).unwrap().instantiate_identity();
assert_eq!(trait_ref.def_id, dispatch_from_dyn_trait);
- trait_ref.substs.type_at(1)
+ trait_ref.args.type_at(1)
};
debug!("visit_implementation_of_dispatch_from_dyn: {:?} -> {:?}", source, target);
@@ -163,9 +163,7 @@ fn visit_implementation_of_dispatch_from_dyn(tcx: TyCtxt<'_>, impl_did: LocalDef
if infcx.at(&cause, param_env).eq(DefineOpaqueTypes::No, r_a, *r_b).is_ok()
&& mutbl_a == *mutbl_b => {}
(&RawPtr(tm_a), &RawPtr(tm_b)) if tm_a.mutbl == tm_b.mutbl => (),
- (&Adt(def_a, substs_a), &Adt(def_b, substs_b))
- if def_a.is_struct() && def_b.is_struct() =>
- {
+ (&Adt(def_a, args_a), &Adt(def_b, args_b)) if def_a.is_struct() && def_b.is_struct() => {
if def_a != def_b {
let source_path = tcx.def_path_str(def_a.did());
let target_path = tcx.def_path_str(def_b.did());
@@ -173,8 +171,7 @@ fn visit_implementation_of_dispatch_from_dyn(tcx: TyCtxt<'_>, impl_did: LocalDef
create_err(&format!(
"the trait `DispatchFromDyn` may only be implemented \
for a coercion between structures with the same \
- definition; expected `{}`, found `{}`",
- source_path, target_path,
+ definition; expected `{source_path}`, found `{target_path}`",
))
.emit();
@@ -194,8 +191,8 @@ fn visit_implementation_of_dispatch_from_dyn(tcx: TyCtxt<'_>, impl_did: LocalDef
let coerced_fields = fields
.iter()
.filter(|field| {
- let ty_a = field.ty(tcx, substs_a);
- let ty_b = field.ty(tcx, substs_b);
+ let ty_a = field.ty(tcx, args_a);
+ let ty_b = field.ty(tcx, args_b);
if let Ok(layout) = tcx.layout_of(param_env.and(ty_a)) {
if layout.is_zst() && layout.align.abi.bytes() == 1 {
@@ -250,8 +247,8 @@ fn visit_implementation_of_dispatch_from_dyn(tcx: TyCtxt<'_>, impl_did: LocalDef
format!(
"`{}` (`{}` to `{}`)",
field.name,
- field.ty(tcx, substs_a),
- field.ty(tcx, substs_b),
+ field.ty(tcx, args_a),
+ field.ty(tcx, args_b),
)
})
.collect::<Vec<_>>()
@@ -268,7 +265,7 @@ fn visit_implementation_of_dispatch_from_dyn(tcx: TyCtxt<'_>, impl_did: LocalDef
ty::TraitRef::new(
tcx,
dispatch_from_dyn_trait,
- [field.ty(tcx, substs_a), field.ty(tcx, substs_b)],
+ [field.ty(tcx, args_a), field.ty(tcx, args_b)],
),
));
}
@@ -300,10 +297,10 @@ pub fn coerce_unsized_info<'tcx>(tcx: TyCtxt<'tcx>, impl_did: LocalDefId) -> Coe
let unsize_trait = tcx.require_lang_item(LangItem::Unsize, Some(span));
- let source = tcx.type_of(impl_did).subst_identity();
- let trait_ref = tcx.impl_trait_ref(impl_did).unwrap().subst_identity();
+ let source = tcx.type_of(impl_did).instantiate_identity();
+ let trait_ref = tcx.impl_trait_ref(impl_did).unwrap().instantiate_identity();
assert_eq!(trait_ref.def_id, coerce_unsized_trait);
- let target = trait_ref.substs.type_at(1);
+ let target = trait_ref.args.type_at(1);
debug!("visit_implementation_of_coerce_unsized: {:?} -> {:?} (bound)", source, target);
let param_env = tcx.param_env(impl_did);
@@ -348,7 +345,7 @@ pub fn coerce_unsized_info<'tcx>(tcx: TyCtxt<'tcx>, impl_did: LocalDefId) -> Coe
check_mutbl(mt_a, mt_b, &|ty| Ty::new_imm_ptr(tcx, ty))
}
- (&ty::Adt(def_a, substs_a), &ty::Adt(def_b, substs_b))
+ (&ty::Adt(def_a, args_a), &ty::Adt(def_b, args_b))
if def_a.is_struct() && def_b.is_struct() =>
{
if def_a != def_b {
@@ -411,9 +408,9 @@ pub fn coerce_unsized_info<'tcx>(tcx: TyCtxt<'tcx>, impl_did: LocalDefId) -> Coe
let diff_fields = fields
.iter_enumerated()
.filter_map(|(i, f)| {
- let (a, b) = (f.ty(tcx, substs_a), f.ty(tcx, substs_b));
+ let (a, b) = (f.ty(tcx, args_a), f.ty(tcx, args_b));
- if tcx.type_of(f.did).subst_identity().is_phantom_data() {
+ if tcx.type_of(f.did).instantiate_identity().is_phantom_data() {
// Ignore PhantomData fields
return None;
}
diff --git a/compiler/rustc_hir_analysis/src/coherence/inherent_impls.rs b/compiler/rustc_hir_analysis/src/coherence/inherent_impls.rs
index 335590206..a94c75f91 100644
--- a/compiler/rustc_hir_analysis/src/coherence/inherent_impls.rs
+++ b/compiler/rustc_hir_analysis/src/coherence/inherent_impls.rs
@@ -148,8 +148,7 @@ impl<'tcx> InherentCollect<'tcx> {
if let ty::Ref(_, subty, _) = ty.kind() {
err.note(format!(
"you could also try moving the reference to \
- uses of `{}` (such as `self`) within the implementation",
- subty
+ uses of `{subty}` (such as `self`) within the implementation"
));
}
err.emit();
@@ -171,7 +170,7 @@ impl<'tcx> InherentCollect<'tcx> {
let id = id.owner_id.def_id;
let item_span = self.tcx.def_span(id);
- let self_ty = self.tcx.type_of(id).subst_identity();
+ let self_ty = self.tcx.type_of(id).instantiate_identity();
match *self_ty.kind() {
ty::Adt(def, _) => self.check_def_id(id, self_ty, def.did()),
ty::Foreign(did) => self.check_def_id(id, self_ty, did),
diff --git a/compiler/rustc_hir_analysis/src/coherence/inherent_impls_overlap.rs b/compiler/rustc_hir_analysis/src/coherence/inherent_impls_overlap.rs
index 3bd293126..7205b7a21 100644
--- a/compiler/rustc_hir_analysis/src/coherence/inherent_impls_overlap.rs
+++ b/compiler/rustc_hir_analysis/src/coherence/inherent_impls_overlap.rs
@@ -77,8 +77,8 @@ impl<'tcx> InherentOverlapChecker<'tcx> {
"duplicate definitions with name `{}`",
ident,
);
- err.span_label(span, format!("duplicate definitions for `{}`", ident));
- err.span_label(*former, format!("other definition for `{}`", ident));
+ err.span_label(span, format!("duplicate definitions for `{ident}`"));
+ err.span_label(*former, format!("other definition for `{ident}`"));
err.emit();
}
@@ -114,11 +114,11 @@ impl<'tcx> InherentOverlapChecker<'tcx> {
);
err.span_label(
self.tcx.def_span(item1.def_id),
- format!("duplicate definitions for `{}`", name),
+ format!("duplicate definitions for `{name}`"),
);
err.span_label(
self.tcx.def_span(item2.def_id),
- format!("other definition for `{}`", name),
+ format!("other definition for `{name}`"),
);
for cause in &overlap.intercrate_ambiguity_causes {
diff --git a/compiler/rustc_hir_analysis/src/coherence/mod.rs b/compiler/rustc_hir_analysis/src/coherence/mod.rs
index 5097f4360..fc8fab0ea 100644
--- a/compiler/rustc_hir_analysis/src/coherence/mod.rs
+++ b/compiler/rustc_hir_analysis/src/coherence/mod.rs
@@ -122,7 +122,7 @@ fn coherent_trait(tcx: TyCtxt<'_>, def_id: DefId) {
let impls = tcx.hir().trait_impls(def_id);
for &impl_def_id in impls {
- let trait_ref = tcx.impl_trait_ref(impl_def_id).unwrap().subst_identity();
+ let trait_ref = tcx.impl_trait_ref(impl_def_id).unwrap().instantiate_identity();
check_impl(tcx, impl_def_id, trait_ref);
check_object_overlap(tcx, impl_def_id, trait_ref);
diff --git a/compiler/rustc_hir_analysis/src/coherence/orphan.rs b/compiler/rustc_hir_analysis/src/coherence/orphan.rs
index 025bab140..bbdb108c5 100644
--- a/compiler/rustc_hir_analysis/src/coherence/orphan.rs
+++ b/compiler/rustc_hir_analysis/src/coherence/orphan.rs
@@ -5,8 +5,8 @@ use rustc_data_structures::fx::FxHashSet;
use rustc_errors::{struct_span_err, DelayDm};
use rustc_errors::{Diagnostic, ErrorGuaranteed};
use rustc_hir as hir;
-use rustc_middle::ty::subst::InternalSubsts;
use rustc_middle::ty::util::CheckRegions;
+use rustc_middle::ty::GenericArgs;
use rustc_middle::ty::{
self, AliasKind, ImplPolarity, Ty, TyCtxt, TypeSuperVisitable, TypeVisitable, TypeVisitableExt,
TypeVisitor,
@@ -22,7 +22,7 @@ pub(crate) fn orphan_check_impl(
tcx: TyCtxt<'_>,
impl_def_id: LocalDefId,
) -> Result<(), ErrorGuaranteed> {
- let trait_ref = tcx.impl_trait_ref(impl_def_id).unwrap().subst_identity();
+ let trait_ref = tcx.impl_trait_ref(impl_def_id).unwrap().instantiate_identity();
trait_ref.error_reported()?;
let ret = do_orphan_check_impl(tcx, trait_ref, impl_def_id);
@@ -352,7 +352,7 @@ fn emit_orphan_check_error<'tcx>(
let this = |name: &str| {
if !trait_ref.def_id.is_local() && !is_target_ty {
- msg("this", &format!(" because this is a foreign trait"))
+ msg("this", " because this is a foreign trait")
} else {
msg("this", &format!(" because {name} are always foreign"))
}
@@ -412,9 +412,8 @@ fn emit_orphan_check_error<'tcx>(
.span_label(
sp,
format!(
- "type parameter `{}` must be covered by another type \
- when it appears before the first local type (`{}`)",
- param_ty, local_type
+ "type parameter `{param_ty}` must be covered by another type \
+ when it appears before the first local type (`{local_type}`)"
),
)
.note(
@@ -441,9 +440,8 @@ fn emit_orphan_check_error<'tcx>(
.span_label(
sp,
format!(
- "type parameter `{}` must be used as the type parameter for some \
+ "type parameter `{param_ty}` must be used as the type parameter for some \
local type",
- param_ty,
),
)
.note(
@@ -488,10 +486,10 @@ fn lint_auto_trait_impl<'tcx>(
trait_ref: ty::TraitRef<'tcx>,
impl_def_id: LocalDefId,
) {
- assert_eq!(trait_ref.substs.len(), 1);
+ assert_eq!(trait_ref.args.len(), 1);
let self_ty = trait_ref.self_ty();
- let (self_type_did, substs) = match self_ty.kind() {
- ty::Adt(def, substs) => (def.did(), substs),
+ let (self_type_did, args) = match self_ty.kind() {
+ ty::Adt(def, args) => (def.did(), args),
_ => {
// FIXME: should also lint for stuff like `&i32` but
// considering that auto traits are unstable, that
@@ -502,9 +500,9 @@ fn lint_auto_trait_impl<'tcx>(
};
// Impls which completely cover a given root type are fine as they
- // disable auto impls entirely. So only lint if the substs
- // are not a permutation of the identity substs.
- let Err(arg) = tcx.uses_unique_generic_params(substs, CheckRegions::No) else {
+ // disable auto impls entirely. So only lint if the args
+ // are not a permutation of the identity args.
+ let Err(arg) = tcx.uses_unique_generic_params(args, CheckRegions::No) else {
// ok
return;
};
@@ -541,17 +539,16 @@ fn lint_auto_trait_impl<'tcx>(
let self_descr = tcx.def_descr(self_type_did);
match arg {
ty::util::NotUniqueParam::DuplicateParam(arg) => {
- lint.note(format!("`{}` is mentioned multiple times", arg));
+ lint.note(format!("`{arg}` is mentioned multiple times"));
}
ty::util::NotUniqueParam::NotParam(arg) => {
- lint.note(format!("`{}` is not a generic parameter", arg));
+ lint.note(format!("`{arg}` is not a generic parameter"));
}
}
lint.span_note(
item_span,
format!(
- "try using the same sequence of generic parameters as the {} definition",
- self_descr,
+ "try using the same sequence of generic parameters as the {self_descr} definition",
),
)
},
@@ -568,10 +565,10 @@ fn fast_reject_auto_impl<'tcx>(tcx: TyCtxt<'tcx>, trait_def_id: DefId, self_ty:
impl<'tcx> TypeVisitor<TyCtxt<'tcx>> for DisableAutoTraitVisitor<'tcx> {
type BreakTy = ();
- fn visit_ty(&mut self, t: Ty<'tcx>) -> ControlFlow<Self::BreakTy> {
+ fn visit_ty(&mut self, ty: Ty<'tcx>) -> ControlFlow<Self::BreakTy> {
let tcx = self.tcx;
- if t != self.self_ty_root {
- for impl_def_id in tcx.non_blanket_impls_for_ty(self.trait_def_id, t) {
+ if ty != self.self_ty_root {
+ for impl_def_id in tcx.non_blanket_impls_for_ty(self.trait_def_id, ty) {
match tcx.impl_polarity(impl_def_id) {
ImplPolarity::Negative => return ControlFlow::Break(()),
ImplPolarity::Reservation => {}
@@ -584,30 +581,28 @@ fn fast_reject_auto_impl<'tcx>(tcx: TyCtxt<'tcx>, trait_def_id: DefId, self_ty:
}
}
- match t.kind() {
- ty::Adt(def, substs) if def.is_phantom_data() => substs.visit_with(self),
- ty::Adt(def, substs) => {
+ match ty.kind() {
+ ty::Adt(def, args) if def.is_phantom_data() => args.visit_with(self),
+ ty::Adt(def, args) => {
// @lcnr: This is the only place where cycles can happen. We avoid this
// by only visiting each `DefId` once.
//
// This will be is incorrect in subtle cases, but I don't care :)
if self.seen.insert(def.did()) {
- for ty in def.all_fields().map(|field| field.ty(tcx, substs)) {
+ for ty in def.all_fields().map(|field| field.ty(tcx, args)) {
ty.visit_with(self)?;
}
}
ControlFlow::Continue(())
}
- _ => t.super_visit_with(self),
+ _ => ty.super_visit_with(self),
}
}
}
let self_ty_root = match self_ty.kind() {
- ty::Adt(def, _) => {
- Ty::new_adt(tcx, *def, InternalSubsts::identity_for_item(tcx, def.did()))
- }
+ ty::Adt(def, _) => Ty::new_adt(tcx, *def, GenericArgs::identity_for_item(tcx, def.did())),
_ => unimplemented!("unexpected self ty {:?}", self_ty),
};
diff --git a/compiler/rustc_hir_analysis/src/coherence/unsafety.rs b/compiler/rustc_hir_analysis/src/coherence/unsafety.rs
index c6b161713..6b18b0ebe 100644
--- a/compiler/rustc_hir_analysis/src/coherence/unsafety.rs
+++ b/compiler/rustc_hir_analysis/src/coherence/unsafety.rs
@@ -12,7 +12,7 @@ pub(super) fn check_item(tcx: TyCtxt<'_>, def_id: LocalDefId) {
let impl_ = item.expect_impl();
if let Some(trait_ref) = tcx.impl_trait_ref(item.owner_id) {
- let trait_ref = trait_ref.subst_identity();
+ let trait_ref = trait_ref.instantiate_identity();
let trait_def = tcx.trait_def(trait_ref.def_id);
let unsafe_attr =
impl_.generics.params.iter().find(|p| p.pure_wrt_drop).map(|_| "may_dangle");
diff --git a/compiler/rustc_hir_analysis/src/collect.rs b/compiler/rustc_hir_analysis/src/collect.rs
index f47df4f21..7b9f61d7a 100644
--- a/compiler/rustc_hir_analysis/src/collect.rs
+++ b/compiler/rustc_hir_analysis/src/collect.rs
@@ -22,7 +22,7 @@ use rustc_data_structures::captures::Captures;
use rustc_data_structures::fx::{FxHashMap, FxHashSet};
use rustc_errors::{Applicability, DiagnosticBuilder, ErrorGuaranteed, StashKey};
use rustc_hir as hir;
-use rustc_hir::def_id::{DefId, LocalDefId};
+use rustc_hir::def_id::{DefId, LocalDefId, LocalModDefId};
use rustc_hir::intravisit::{self, Visitor};
use rustc_hir::{GenericParamKind, Node};
use rustc_infer::infer::{InferCtxt, TyCtxtInferExt};
@@ -48,7 +48,7 @@ mod type_of;
///////////////////////////////////////////////////////////////////////////
// Main entry point
-fn collect_mod_item_types(tcx: TyCtxt<'_>, module_def_id: LocalDefId) {
+fn collect_mod_item_types(tcx: TyCtxt<'_>, module_def_id: LocalModDefId) {
tcx.hir().visit_item_likes_in_module(module_def_id, &mut CollectItemTypesVisitor { tcx });
}
@@ -195,9 +195,9 @@ pub(crate) fn placeholder_type_error_diag<'tcx>(
sugg.push((arg.span, (*type_name).to_string()));
} else if let Some(span) = generics.span_for_param_suggestion() {
// Account for bounds, we want `fn foo<T: E, K>(_: K)` not `fn foo<T, K: E>(_: K)`.
- sugg.push((span, format!(", {}", type_name)));
+ sugg.push((span, format!(", {type_name}")));
} else {
- sugg.push((generics.span, format!("<{}>", type_name)));
+ sugg.push((generics.span, format!("<{type_name}>")));
}
}
@@ -329,7 +329,7 @@ fn bad_placeholder<'tcx>(
mut spans: Vec<Span>,
kind: &'static str,
) -> DiagnosticBuilder<'tcx, ErrorGuaranteed> {
- let kind = if kind.ends_with('s') { format!("{}es", kind) } else { format!("{}s", kind) };
+ let kind = if kind.ends_with('s') { format!("{kind}es") } else { format!("{kind}s") };
spans.sort();
tcx.sess.create_err(errors::PlaceholderNotAllowedItemSignatures { spans, kind })
@@ -401,13 +401,13 @@ impl<'tcx> AstConv<'tcx> for ItemCtxt<'tcx> {
poly_trait_ref: ty::PolyTraitRef<'tcx>,
) -> Ty<'tcx> {
if let Some(trait_ref) = poly_trait_ref.no_bound_vars() {
- let item_substs = self.astconv().create_substs_for_associated_item(
+ let item_args = self.astconv().create_args_for_associated_item(
span,
item_def_id,
item_segment,
- trait_ref.substs,
+ trait_ref.args,
);
- Ty::new_projection(self.tcx(), item_def_id, item_substs)
+ Ty::new_projection(self.tcx(), item_def_id, item_args)
} else {
// There are no late-bound regions; we can just ignore the binder.
let (mut mpart_sugg, mut inferred_sugg) = (None, None);
@@ -425,10 +425,8 @@ impl<'tcx> AstConv<'tcx> for ItemCtxt<'tcx> {
| hir::ItemKind::Union(_, generics) => {
let lt_name = get_new_lifetime_name(self.tcx, poly_trait_ref, generics);
let (lt_sp, sugg) = match generics.params {
- [] => (generics.span, format!("<{}>", lt_name)),
- [bound, ..] => {
- (bound.span.shrink_to_lo(), format!("{}, ", lt_name))
- }
+ [] => (generics.span, format!("<{lt_name}>")),
+ [bound, ..] => (bound.span.shrink_to_lo(), format!("{lt_name}, ")),
};
mpart_sugg = Some(errors::AssociatedTypeTraitUninferredGenericParamsMultipartSuggestion {
fspan: lt_sp,
@@ -1027,7 +1025,7 @@ fn trait_def(tcx: TyCtxt<'_>, def_id: LocalDefId) -> ty::TraitDef {
} else {
tcx.sess.span_err(
meta.span(),
- format!("unknown meta item passed to `rustc_deny_explicit_impl` {:?}", meta),
+ format!("unknown meta item passed to `rustc_deny_explicit_impl` {meta:?}"),
);
}
}
@@ -1145,8 +1143,8 @@ fn fn_sig(tcx: TyCtxt<'_>, def_id: LocalDefId) -> ty::EarlyBinder<ty::PolyFnSig<
}
Ctor(data) | Variant(hir::Variant { data, .. }) if data.ctor().is_some() => {
- let ty = tcx.type_of(tcx.hir().get_parent_item(hir_id)).subst_identity();
- let inputs = data.fields().iter().map(|f| tcx.type_of(f.def_id).subst_identity());
+ let ty = tcx.type_of(tcx.hir().get_parent_item(hir_id)).instantiate_identity();
+ let inputs = data.fields().iter().map(|f| tcx.type_of(f.def_id).instantiate_identity());
ty::Binder::dummy(tcx.mk_fn_sig(
inputs,
ty,
@@ -1161,15 +1159,13 @@ fn fn_sig(tcx: TyCtxt<'_>, def_id: LocalDefId) -> ty::EarlyBinder<ty::PolyFnSig<
// signatures and cannot be accessed through `fn_sig`. For
// example, a closure signature excludes the `self`
// argument. In any case they are embedded within the
- // closure type as part of the `ClosureSubsts`.
+ // closure type as part of the `ClosureArgs`.
//
// To get the signature of a closure, you should use the
- // `sig` method on the `ClosureSubsts`:
+ // `sig` method on the `ClosureArgs`:
//
- // substs.as_closure().sig(def_id, tcx)
- bug!(
- "to get the signature of a closure, use `substs.as_closure().sig()` not `fn_sig()`",
- );
+ // args.as_closure().sig(def_id, tcx)
+ bug!("to get the signature of a closure, use `args.as_closure().sig()` not `fn_sig()`",);
}
x => {
@@ -1266,7 +1262,7 @@ fn suggest_impl_trait<'tcx>(
) -> Option<String> {
let format_as_assoc: fn(_, _, _, _, _) -> _ =
|tcx: TyCtxt<'tcx>,
- _: ty::SubstsRef<'tcx>,
+ _: ty::GenericArgsRef<'tcx>,
trait_def_id: DefId,
assoc_item_def_id: DefId,
item_ty: Ty<'tcx>| {
@@ -1276,13 +1272,15 @@ fn suggest_impl_trait<'tcx>(
};
let format_as_parenthesized: fn(_, _, _, _, _) -> _ =
|tcx: TyCtxt<'tcx>,
- substs: ty::SubstsRef<'tcx>,
+ args: ty::GenericArgsRef<'tcx>,
trait_def_id: DefId,
_: DefId,
item_ty: Ty<'tcx>| {
let trait_name = tcx.item_name(trait_def_id);
- let args_tuple = substs.type_at(1);
- let ty::Tuple(types) = *args_tuple.kind() else { return None; };
+ let args_tuple = args.type_at(1);
+ let ty::Tuple(types) = *args_tuple.kind() else {
+ return None;
+ };
let types = types.make_suggestable(tcx, false)?;
let maybe_ret =
if item_ty.is_unit() { String::new() } else { format!(" -> {item_ty}") };
@@ -1315,31 +1313,34 @@ fn suggest_impl_trait<'tcx>(
format_as_parenthesized,
),
] {
- let Some(trait_def_id) = trait_def_id else { continue; };
- let Some(assoc_item_def_id) = assoc_item_def_id else { continue; };
+ let Some(trait_def_id) = trait_def_id else {
+ continue;
+ };
+ let Some(assoc_item_def_id) = assoc_item_def_id else {
+ continue;
+ };
if tcx.def_kind(assoc_item_def_id) != DefKind::AssocTy {
continue;
}
let param_env = tcx.param_env(def_id);
let infcx = tcx.infer_ctxt().build();
- let substs = ty::InternalSubsts::for_item(tcx, trait_def_id, |param, _| {
+ let args = ty::GenericArgs::for_item(tcx, trait_def_id, |param, _| {
if param.index == 0 { ret_ty.into() } else { infcx.var_for_def(span, param) }
});
- if !infcx.type_implements_trait(trait_def_id, substs, param_env).must_apply_modulo_regions()
- {
+ if !infcx.type_implements_trait(trait_def_id, args, param_env).must_apply_modulo_regions() {
continue;
}
let ocx = ObligationCtxt::new(&infcx);
let item_ty = ocx.normalize(
&ObligationCause::misc(span, def_id),
param_env,
- Ty::new_projection(tcx, assoc_item_def_id, substs),
+ Ty::new_projection(tcx, assoc_item_def_id, args),
);
// FIXME(compiler-errors): We may benefit from resolving regions here.
if ocx.select_where_possible().is_empty()
&& let item_ty = infcx.resolve_vars_if_possible(item_ty)
&& let Some(item_ty) = item_ty.make_suggestable(tcx, false)
- && let Some(sugg) = formatter(tcx, infcx.resolve_vars_if_possible(substs), trait_def_id, assoc_item_def_id, item_ty)
+ && let Some(sugg) = formatter(tcx, infcx.resolve_vars_if_possible(args), trait_def_id, assoc_item_def_id, item_ty)
{
return Some(sugg);
}
@@ -1357,39 +1358,61 @@ fn impl_trait_ref(
.of_trait
.as_ref()
.map(|ast_trait_ref| {
- let selfty = tcx.type_of(def_id).subst_identity();
- icx.astconv().instantiate_mono_trait_ref(
- ast_trait_ref,
- selfty,
- check_impl_constness(tcx, impl_.constness, ast_trait_ref),
- )
+ let selfty = tcx.type_of(def_id).instantiate_identity();
+
+ if let Some(ErrorGuaranteed { .. }) = check_impl_constness(
+ tcx,
+ tcx.is_const_trait_impl_raw(def_id.to_def_id()),
+ &ast_trait_ref,
+ ) {
+ // we have a const impl, but for a trait without `#[const_trait]`, so
+ // without the host param. If we continue with the HIR trait ref, we get
+ // ICEs for generic arg count mismatch. We do a little HIR editing to
+ // make astconv happy.
+ let mut path_segments = ast_trait_ref.path.segments.to_vec();
+ let last_segment = path_segments.len() - 1;
+ let mut args = path_segments[last_segment].args().clone();
+ let last_arg = args.args.len() - 1;
+ assert!(matches!(args.args[last_arg], hir::GenericArg::Const(anon_const) if tcx.has_attr(anon_const.value.def_id, sym::rustc_host)));
+ args.args = &args.args[..args.args.len() - 1];
+ path_segments[last_segment].args = Some(&args);
+ let path = hir::Path {
+ span: ast_trait_ref.path.span,
+ res: ast_trait_ref.path.res,
+ segments: &path_segments,
+ };
+ let trait_ref = hir::TraitRef { path: &path, hir_ref_id: ast_trait_ref.hir_ref_id };
+ icx.astconv().instantiate_mono_trait_ref(&trait_ref, selfty)
+ } else {
+ icx.astconv().instantiate_mono_trait_ref(&ast_trait_ref, selfty)
+ }
})
.map(ty::EarlyBinder::bind)
}
fn check_impl_constness(
tcx: TyCtxt<'_>,
- constness: hir::Constness,
+ is_const: bool,
ast_trait_ref: &hir::TraitRef<'_>,
-) -> ty::BoundConstness {
- match constness {
- hir::Constness::Const => {
- if let Some(trait_def_id) = ast_trait_ref.trait_def_id() && !tcx.has_attr(trait_def_id, sym::const_trait) {
- let trait_name = tcx.item_name(trait_def_id).to_string();
- tcx.sess.emit_err(errors::ConstImplForNonConstTrait {
- trait_ref_span: ast_trait_ref.path.span,
- trait_name,
- local_trait_span: trait_def_id.as_local().map(|_| tcx.def_span(trait_def_id).shrink_to_lo()),
- marking: (),
- adding: (),
- });
- ty::BoundConstness::NotConst
- } else {
- ty::BoundConstness::ConstIfConst
- }
- },
- hir::Constness::NotConst => ty::BoundConstness::NotConst,
+) -> Option<ErrorGuaranteed> {
+ if !is_const {
+ return None;
+ }
+
+ let trait_def_id = ast_trait_ref.trait_def_id()?;
+ if tcx.has_attr(trait_def_id, sym::const_trait) {
+ return None;
}
+
+ let trait_name = tcx.item_name(trait_def_id).to_string();
+ Some(tcx.sess.emit_err(errors::ConstImplForNonConstTrait {
+ trait_ref_span: ast_trait_ref.path.span,
+ trait_name,
+ local_trait_span:
+ trait_def_id.as_local().map(|_| tcx.def_span(trait_def_id).shrink_to_lo()),
+ marking: (),
+ adding: (),
+ }))
}
fn impl_polarity(tcx: TyCtxt<'_>, def_id: LocalDefId) -> ty::ImplPolarity {
@@ -1502,7 +1525,7 @@ fn compute_sig_of_foreign_fn_decl<'tcx>(
.sess
.source_map()
.span_to_snippet(ast_ty.span)
- .map_or_else(|_| String::new(), |s| format!(" `{}`", s));
+ .map_or_else(|_| String::new(), |s| format!(" `{s}`"));
tcx.sess.emit_err(errors::SIMDFFIHighlyExperimental { span: ast_ty.span, snip });
}
};
diff --git a/compiler/rustc_hir_analysis/src/collect/generics_of.rs b/compiler/rustc_hir_analysis/src/collect/generics_of.rs
index ccc9f8084..484200827 100644
--- a/compiler/rustc_hir_analysis/src/collect/generics_of.rs
+++ b/compiler/rustc_hir_analysis/src/collect/generics_of.rs
@@ -68,17 +68,17 @@ pub(super) fn generics_of(tcx: TyCtxt<'_>, def_id: LocalDefId) -> ty::Generics {
// ^ parent_def_id
//
// then we only want to return generics for params to the left of `N`. If we don't do that we
- // end up with that const looking like: `ty::ConstKind::Unevaluated(def_id, substs: [N#0])`.
+ // end up with that const looking like: `ty::ConstKind::Unevaluated(def_id, args: [N#0])`.
//
- // This causes ICEs (#86580) when building the substs for Foo in `fn foo() -> Foo { .. }` as
- // we substitute the defaults with the partially built substs when we build the substs. Subst'ing
- // the `N#0` on the unevaluated const indexes into the empty substs we're in the process of building.
+ // This causes ICEs (#86580) when building the args for Foo in `fn foo() -> Foo { .. }` as
+ // we substitute the defaults with the partially built args when we build the args. Subst'ing
+ // the `N#0` on the unevaluated const indexes into the empty args we're in the process of building.
//
// We fix this by having this function return the parent's generics ourselves and truncating the
// generics to only include non-forward declared params (with the exception of the `Self` ty)
//
- // For the above code example that means we want `substs: []`
- // For the following struct def we want `substs: [N#0]` when generics_of is called on
+ // For the above code example that means we want `args: []`
+ // For the following struct def we want `args: [N#0]` when generics_of is called on
// the def id of the `{ N + 1 }` anon const
// struct Foo<const N: usize, const M: usize = { N + 1 }>;
//
@@ -93,7 +93,7 @@ pub(super) fn generics_of(tcx: TyCtxt<'_>, def_id: LocalDefId) -> ty::Generics {
return ty::Generics {
// we set the parent of these generics to be our parent's parent so that we
- // dont end up with substs: [N, M, N] for the const default on a struct like this:
+ // dont end up with args: [N, M, N] for the const default on a struct like this:
// struct Foo<const N: usize, const M: usize = { ... }>;
parent: generics.parent,
parent_count: generics.parent_count,
@@ -209,6 +209,7 @@ pub(super) fn generics_of(tcx: TyCtxt<'_>, def_id: LocalDefId) -> ty::Generics {
| ItemKind::Struct(..)
| ItemKind::OpaqueTy(..)
| ItemKind::Union(..) => (None, Defaults::Allowed),
+ ItemKind::Const(..) => (None, Defaults::Deny),
_ => (None, Defaults::FutureCompatDisallowed),
}
}
@@ -319,7 +320,7 @@ pub(super) fn generics_of(tcx: TyCtxt<'_>, def_id: LocalDefId) -> ty::Generics {
bug!("parent also has host effect param? index: {idx}, def: {def_id:?}");
}
- host_effect_index = Some(parent_count + index as usize);
+ host_effect_index = Some(index as usize);
}
Some(ty::GenericParamDef {
diff --git a/compiler/rustc_hir_analysis/src/collect/item_bounds.rs b/compiler/rustc_hir_analysis/src/collect/item_bounds.rs
index 57f74172e..4b7743fae 100644
--- a/compiler/rustc_hir_analysis/src/collect/item_bounds.rs
+++ b/compiler/rustc_hir_analysis/src/collect/item_bounds.rs
@@ -2,15 +2,15 @@ use super::ItemCtxt;
use crate::astconv::{AstConv, PredicateFilter};
use rustc_hir as hir;
use rustc_infer::traits::util;
-use rustc_middle::ty::subst::InternalSubsts;
-use rustc_middle::ty::{self, Ty, TyCtxt};
+use rustc_middle::ty::GenericArgs;
+use rustc_middle::ty::{self, Ty, TyCtxt, TypeFoldable, TypeFolder};
use rustc_span::def_id::{DefId, LocalDefId};
use rustc_span::Span;
/// For associated types we include both bounds written on the type
/// (`type X: Trait`) and predicates from the trait: `where Self::X: Trait`.
///
-/// Note that this filtering is done with the items identity substs to
+/// Note that this filtering is done with the items identity args to
/// simplify checking that these bounds are met in impls. This means that
/// a bound such as `for<'b> <Self as X<'b>>::U: Clone` can't be used, as in
/// `hr-associated-type-bound-1.rs`.
@@ -23,7 +23,7 @@ fn associated_type_bounds<'tcx>(
let item_ty = Ty::new_projection(
tcx,
assoc_item_def_id.to_def_id(),
- InternalSubsts::identity_for_item(tcx, assoc_item_def_id),
+ GenericArgs::identity_for_item(tcx, assoc_item_def_id),
);
let icx = ItemCtxt::new(tcx, assoc_item_def_id);
@@ -95,7 +95,7 @@ pub(super) fn explicit_item_bounds(
Ty::new_projection(
tcx,
def_id.to_def_id(),
- ty::InternalSubsts::identity_for_item(tcx, def_id),
+ ty::GenericArgs::identity_for_item(tcx, def_id),
),
item.span,
));
@@ -113,18 +113,35 @@ pub(super) fn explicit_item_bounds(
..
}) => associated_type_bounds(tcx, def_id, bounds, *span),
hir::Node::Item(hir::Item {
- kind: hir::ItemKind::OpaqueTy(hir::OpaqueTy { bounds, in_trait, .. }),
+ kind: hir::ItemKind::OpaqueTy(hir::OpaqueTy { bounds, in_trait: false, .. }),
span,
..
}) => {
- let substs = InternalSubsts::identity_for_item(tcx, def_id);
- let item_ty = if *in_trait && !tcx.lower_impl_trait_in_trait_to_assoc_ty() {
- Ty::new_projection(tcx, def_id.to_def_id(), substs)
- } else {
- Ty::new_opaque(tcx, def_id.to_def_id(), substs)
- };
+ let args = GenericArgs::identity_for_item(tcx, def_id);
+ let item_ty = Ty::new_opaque(tcx, def_id.to_def_id(), args);
opaque_type_bounds(tcx, def_id, bounds, item_ty, *span)
}
+ // Since RPITITs are astconv'd as projections in `ast_ty_to_ty`, when we're asking
+ // for the item bounds of the *opaques* in a trait's default method signature, we
+ // need to map these projections back to opaques.
+ hir::Node::Item(hir::Item {
+ kind: hir::ItemKind::OpaqueTy(hir::OpaqueTy { bounds, in_trait: true, origin, .. }),
+ span,
+ ..
+ }) => {
+ let (hir::OpaqueTyOrigin::FnReturn(fn_def_id)
+ | hir::OpaqueTyOrigin::AsyncFn(fn_def_id)) = *origin
+ else {
+ bug!()
+ };
+ let args = GenericArgs::identity_for_item(tcx, def_id);
+ let item_ty = Ty::new_opaque(tcx, def_id.to_def_id(), args);
+ tcx.arena.alloc_slice(
+ &opaque_type_bounds(tcx, def_id, bounds, item_ty, *span)
+ .to_vec()
+ .fold_with(&mut AssocTyToOpaque { tcx, fn_def_id: fn_def_id.to_def_id() }),
+ )
+ }
hir::Node::Item(hir::Item { kind: hir::ItemKind::TyAlias(..), .. }) => &[],
_ => bug!("item_bounds called on {:?}", def_id),
};
@@ -139,3 +156,26 @@ pub(super) fn item_bounds(
tcx.mk_clauses_from_iter(util::elaborate(tcx, bounds.iter().map(|&(bound, _span)| bound)))
})
}
+
+struct AssocTyToOpaque<'tcx> {
+ tcx: TyCtxt<'tcx>,
+ fn_def_id: DefId,
+}
+
+impl<'tcx> TypeFolder<TyCtxt<'tcx>> for AssocTyToOpaque<'tcx> {
+ fn interner(&self) -> TyCtxt<'tcx> {
+ self.tcx
+ }
+
+ fn fold_ty(&mut self, ty: Ty<'tcx>) -> Ty<'tcx> {
+ if let ty::Alias(ty::Projection, projection_ty) = ty.kind()
+ && let Some(ty::ImplTraitInTraitData::Trait { fn_def_id, .. })
+ = self.tcx.opt_rpitit_info(projection_ty.def_id)
+ && fn_def_id == self.fn_def_id
+ {
+ self.tcx.type_of(projection_ty.def_id).instantiate(self.tcx, projection_ty.args)
+ } else {
+ ty
+ }
+ }
+}
diff --git a/compiler/rustc_hir_analysis/src/collect/predicates_of.rs b/compiler/rustc_hir_analysis/src/collect/predicates_of.rs
index 129366641..495e66366 100644
--- a/compiler/rustc_hir_analysis/src/collect/predicates_of.rs
+++ b/compiler/rustc_hir_analysis/src/collect/predicates_of.rs
@@ -2,17 +2,16 @@ use crate::astconv::{AstConv, OnlySelfBounds, PredicateFilter};
use crate::bounds::Bounds;
use crate::collect::ItemCtxt;
use crate::constrained_generic_params as cgp;
-use hir::{HirId, Lifetime, Node};
+use hir::{HirId, Node};
use rustc_data_structures::fx::FxIndexSet;
use rustc_hir as hir;
use rustc_hir::def::DefKind;
use rustc_hir::def_id::{DefId, LocalDefId};
use rustc_hir::intravisit::{self, Visitor};
-use rustc_middle::ty::subst::InternalSubsts;
use rustc_middle::ty::{self, Ty, TyCtxt};
-use rustc_middle::ty::{GenericPredicates, Generics, ImplTraitInTraitData, ToPredicate};
-use rustc_span::symbol::{sym, Ident};
-use rustc_span::{Span, Symbol, DUMMY_SP};
+use rustc_middle::ty::{GenericPredicates, ImplTraitInTraitData, ToPredicate};
+use rustc_span::symbol::Ident;
+use rustc_span::{Span, DUMMY_SP};
/// Returns a list of all type predicates (explicit and implicit) for the definition with
/// ID `def_id`. This includes all predicates returned by `predicates_defined_on`, plus
@@ -38,17 +37,10 @@ pub(super) fn predicates_of(tcx: TyCtxt<'_>, def_id: DefId) -> ty::GenericPredic
// from the trait itself that *shouldn't* be shown as the source of
// an obligation and instead be skipped. Otherwise we'd use
// `tcx.def_span(def_id);`
-
- let constness = if tcx.has_attr(def_id, sym::const_trait) {
- ty::BoundConstness::ConstIfConst
- } else {
- ty::BoundConstness::NotConst
- };
-
let span = rustc_span::DUMMY_SP;
result.predicates =
tcx.arena.alloc_from_iter(result.predicates.iter().copied().chain(std::iter::once((
- ty::TraitRef::identity(tcx, def_id).with_constness(constness).to_predicate(tcx),
+ ty::TraitRef::identity(tcx, def_id).to_predicate(tcx),
span,
))));
}
@@ -63,33 +55,25 @@ fn gather_explicit_predicates_of(tcx: TyCtxt<'_>, def_id: LocalDefId) -> ty::Gen
use rustc_hir::*;
match tcx.opt_rpitit_info(def_id.to_def_id()) {
- Some(ImplTraitInTraitData::Trait { opaque_def_id, fn_def_id }) => {
- let opaque_ty_id = tcx.hir().local_def_id_to_hir_id(opaque_def_id.expect_local());
- let opaque_ty_node = tcx.hir().get(opaque_ty_id);
- let Node::Item(&Item { kind: ItemKind::OpaqueTy(OpaqueTy { lifetime_mapping, .. }), .. }) = opaque_ty_node else {
- bug!("unexpected {opaque_ty_node:?}")
- };
-
+ Some(ImplTraitInTraitData::Trait { fn_def_id, .. }) => {
let mut predicates = Vec::new();
// RPITITs should inherit the predicates of their parent. This is
// both to ensure that the RPITITs are only instantiated when the
// parent predicates would hold, and also so that the param-env
// inherits these predicates as assumptions.
- let identity_substs = InternalSubsts::identity_for_item(tcx, def_id);
- predicates.extend(
- tcx.explicit_predicates_of(fn_def_id).instantiate_own(tcx, identity_substs),
- );
+ let identity_args = ty::GenericArgs::identity_for_item(tcx, def_id);
+ predicates
+ .extend(tcx.explicit_predicates_of(fn_def_id).instantiate_own(tcx, identity_args));
// We also install bidirectional outlives predicates for the RPITIT
// to keep the duplicates lifetimes from opaque lowering in sync.
+ // We only need to compute bidirectional outlives for the duplicated
+ // opaque lifetimes, which explains the slicing below.
compute_bidirectional_outlives_predicates(
tcx,
- def_id,
- lifetime_mapping.iter().map(|(lifetime, def_id)| {
- (*lifetime, (*def_id, lifetime.ident.name, lifetime.ident.span))
- }),
- tcx.generics_of(def_id.to_def_id()),
+ &tcx.generics_of(def_id.to_def_id()).params
+ [tcx.generics_of(fn_def_id).params.len()..],
&mut predicates,
);
@@ -104,15 +88,15 @@ fn gather_explicit_predicates_of(tcx: TyCtxt<'_>, def_id: LocalDefId) -> ty::Gen
let trait_assoc_predicates =
tcx.explicit_predicates_of(assoc_item.trait_item_def_id.unwrap());
- let impl_assoc_identity_substs = InternalSubsts::identity_for_item(tcx, def_id);
+ let impl_assoc_identity_args = ty::GenericArgs::identity_for_item(tcx, def_id);
let impl_def_id = tcx.parent(fn_def_id);
- let impl_trait_ref_substs =
- tcx.impl_trait_ref(impl_def_id).unwrap().subst_identity().substs;
+ let impl_trait_ref_args =
+ tcx.impl_trait_ref(impl_def_id).unwrap().instantiate_identity().args;
- let impl_assoc_substs =
- impl_assoc_identity_substs.rebase_onto(tcx, impl_def_id, impl_trait_ref_substs);
+ let impl_assoc_args =
+ impl_assoc_identity_args.rebase_onto(tcx, impl_def_id, impl_trait_ref_args);
- let impl_predicates = trait_assoc_predicates.instantiate_own(tcx, impl_assoc_substs);
+ let impl_predicates = trait_assoc_predicates.instantiate_own(tcx, impl_assoc_args);
return ty::GenericPredicates {
parent: Some(impl_def_id),
@@ -146,13 +130,15 @@ fn gather_explicit_predicates_of(tcx: TyCtxt<'_>, def_id: LocalDefId) -> ty::Gen
Node::Item(item) => match item.kind {
ItemKind::Impl(impl_) => {
if impl_.defaultness.is_default() {
- is_default_impl_trait =
- tcx.impl_trait_ref(def_id).map(|t| ty::Binder::dummy(t.subst_identity()));
+ is_default_impl_trait = tcx
+ .impl_trait_ref(def_id)
+ .map(|t| ty::Binder::dummy(t.instantiate_identity()));
}
impl_.generics
}
ItemKind::Fn(.., generics, _)
| ItemKind::TyAlias(_, generics)
+ | ItemKind::Const(_, generics, _)
| ItemKind::Enum(_, generics)
| ItemKind::Struct(_, generics)
| ItemKind::Union(_, generics) => generics,
@@ -200,7 +186,7 @@ fn gather_explicit_predicates_of(tcx: TyCtxt<'_>, def_id: LocalDefId) -> ty::Gen
// (see below). Recall that a default impl is not itself an impl, but rather a
// set of defaults that can be incorporated into another impl.
if let Some(trait_ref) = is_default_impl_trait {
- predicates.insert((trait_ref.without_const().to_predicate(tcx), tcx.def_span(def_id)));
+ predicates.insert((trait_ref.to_predicate(tcx), tcx.def_span(def_id)));
}
// Collect the region predicates that were declared inline as
@@ -333,8 +319,8 @@ fn gather_explicit_predicates_of(tcx: TyCtxt<'_>, def_id: LocalDefId) -> ty::Gen
// in trait checking. See `setup_constraining_predicates`
// for details.
if let Node::Item(&Item { kind: ItemKind::Impl { .. }, .. }) = node {
- let self_ty = tcx.type_of(def_id).subst_identity();
- let trait_ref = tcx.impl_trait_ref(def_id).map(ty::EarlyBinder::subst_identity);
+ let self_ty = tcx.type_of(def_id).instantiate_identity();
+ let trait_ref = tcx.impl_trait_ref(def_id).map(ty::EarlyBinder::instantiate_identity);
cgp::setup_constraining_predicates(
tcx,
&mut predicates,
@@ -354,21 +340,7 @@ fn gather_explicit_predicates_of(tcx: TyCtxt<'_>, def_id: LocalDefId) -> ty::Gen
};
debug!(?lifetimes);
- let lifetime_mapping = std::iter::zip(lifetimes, ast_generics.params)
- .map(|(arg, dup)| {
- let hir::GenericArg::Lifetime(arg) = arg else { bug!() };
- (**arg, dup)
- })
- .filter(|(_, dup)| matches!(dup.kind, hir::GenericParamKind::Lifetime { .. }))
- .map(|(lifetime, dup)| (lifetime, (dup.def_id, dup.name.ident().name, dup.span)));
-
- compute_bidirectional_outlives_predicates(
- tcx,
- def_id,
- lifetime_mapping,
- generics,
- &mut predicates,
- );
+ compute_bidirectional_outlives_predicates(tcx, &generics.params, &mut predicates);
debug!(?predicates);
}
@@ -382,39 +354,28 @@ fn gather_explicit_predicates_of(tcx: TyCtxt<'_>, def_id: LocalDefId) -> ty::Gen
/// enforce that these lifetimes stay in sync.
fn compute_bidirectional_outlives_predicates<'tcx>(
tcx: TyCtxt<'tcx>,
- item_def_id: LocalDefId,
- lifetime_mapping: impl Iterator<Item = (Lifetime, (LocalDefId, Symbol, Span))>,
- generics: &Generics,
+ opaque_own_params: &[ty::GenericParamDef],
predicates: &mut Vec<(ty::Clause<'tcx>, Span)>,
) {
- let icx = ItemCtxt::new(tcx, item_def_id);
-
- for (arg, (dup_def, name, span)) in lifetime_mapping {
- let orig_region = icx.astconv().ast_region_to_region(&arg, None);
- if !matches!(orig_region.kind(), ty::ReEarlyBound(..)) {
- // There is no late-bound lifetime to actually match up here, since the lifetime doesn't
- // show up in the opaque's parent's substs.
- continue;
+ for param in opaque_own_params {
+ let orig_lifetime = tcx.map_rpit_lifetime_to_fn_lifetime(param.def_id.expect_local());
+ if let ty::ReEarlyBound(..) = *orig_lifetime {
+ let dup_lifetime = ty::Region::new_early_bound(
+ tcx,
+ ty::EarlyBoundRegion { def_id: param.def_id, index: param.index, name: param.name },
+ );
+ let span = tcx.def_span(param.def_id);
+ predicates.push((
+ ty::ClauseKind::RegionOutlives(ty::OutlivesPredicate(orig_lifetime, dup_lifetime))
+ .to_predicate(tcx),
+ span,
+ ));
+ predicates.push((
+ ty::ClauseKind::RegionOutlives(ty::OutlivesPredicate(dup_lifetime, orig_lifetime))
+ .to_predicate(tcx),
+ span,
+ ));
}
-
- let Some(dup_index) = generics.param_def_id_to_index(icx.tcx, dup_def.to_def_id()) else { bug!() };
-
- let dup_region = ty::Region::new_early_bound(
- tcx,
- ty::EarlyBoundRegion { def_id: dup_def.to_def_id(), index: dup_index, name },
- );
-
- predicates.push((
- ty::ClauseKind::RegionOutlives(ty::OutlivesPredicate(orig_region, dup_region))
- .to_predicate(tcx),
- span,
- ));
-
- predicates.push((
- ty::ClauseKind::RegionOutlives(ty::OutlivesPredicate(dup_region, orig_region))
- .to_predicate(tcx),
- span,
- ));
}
}
@@ -493,20 +454,20 @@ pub(super) fn explicit_predicates_of<'tcx>(
// Remove bounds on associated types from the predicates, they will be
// returned by `explicit_item_bounds`.
let predicates_and_bounds = tcx.trait_explicit_predicates_and_bounds(def_id);
- let trait_identity_substs = InternalSubsts::identity_for_item(tcx, def_id);
+ let trait_identity_args = ty::GenericArgs::identity_for_item(tcx, def_id);
let is_assoc_item_ty = |ty: Ty<'tcx>| {
// For a predicate from a where clause to become a bound on an
// associated type:
- // * It must use the identity substs of the item.
+ // * It must use the identity args of the item.
// * We're in the scope of the trait, so we can't name any
// parameters of the GAT. That means that all we need to
- // check are that the substs of the projection are the
- // identity substs of the trait.
+ // check are that the args of the projection are the
+ // identity args of the trait.
// * It must be an associated type for this trait (*not* a
// supertrait).
if let ty::Alias(ty::Projection, projection) = ty.kind() {
- projection.substs == trait_identity_substs
+ projection.args == trait_identity_args
// FIXME(return_type_notation): This check should be more robust
&& !tcx.is_impl_trait_in_trait(projection.def_id)
&& tcx.associated_item(projection.def_id).container_id(tcx)
@@ -757,6 +718,7 @@ pub(super) fn type_param_predicates(
ItemKind::Fn(.., generics, _)
| ItemKind::Impl(&hir::Impl { generics, .. })
| ItemKind::TyAlias(_, generics)
+ | ItemKind::Const(_, generics, _)
| ItemKind::OpaqueTy(&OpaqueTy {
generics,
origin: hir::OpaqueTyOrigin::TyAlias { .. },
@@ -770,8 +732,7 @@ pub(super) fn type_param_predicates(
if param_id == item_hir_id {
let identity_trait_ref =
ty::TraitRef::identity(tcx, item_def_id.to_def_id());
- extend =
- Some((identity_trait_ref.without_const().to_predicate(tcx), item.span));
+ extend = Some((identity_trait_ref.to_predicate(tcx), item.span));
}
generics
}
diff --git a/compiler/rustc_hir_analysis/src/collect/resolve_bound_vars.rs b/compiler/rustc_hir_analysis/src/collect/resolve_bound_vars.rs
index acd0bcd8e..6dd0c840d 100644
--- a/compiler/rustc_hir_analysis/src/collect/resolve_bound_vars.rs
+++ b/compiler/rustc_hir_analysis/src/collect/resolve_bound_vars.rs
@@ -137,12 +137,6 @@ enum Scope<'a> {
s: ScopeRef<'a>,
},
- /// A scope which either determines unspecified lifetimes or errors
- /// on them (e.g., due to ambiguity).
- Elision {
- s: ScopeRef<'a>,
- },
-
/// Use a specific lifetime (if `Some`) or leave it unset (to be
/// inferred in a function body or potentially error outside one),
/// for the default choice of lifetime in a trait object type.
@@ -211,7 +205,6 @@ impl<'a> fmt::Debug for TruncatedScopeDebug<'a> {
Scope::Body { id, s: _ } => {
f.debug_struct("Body").field("id", id).field("s", &"..").finish()
}
- Scope::Elision { s: _ } => f.debug_struct("Elision").field("s", &"..").finish(),
Scope::ObjectLifetimeDefault { lifetime, s: _ } => f
.debug_struct("ObjectLifetimeDefault")
.field("lifetime", lifetime)
@@ -325,9 +318,7 @@ impl<'a, 'tcx> BoundVarContext<'a, 'tcx> {
break (vec![], BinderScopeType::Normal);
}
- Scope::Elision { s, .. }
- | Scope::ObjectLifetimeDefault { s, .. }
- | Scope::AnonConstBoundary { s } => {
+ Scope::ObjectLifetimeDefault { s, .. } | Scope::AnonConstBoundary { s } => {
scope = s;
}
@@ -526,16 +517,11 @@ impl<'a, 'tcx> Visitor<'tcx> for BoundVarContext<'a, 'tcx> {
| hir::ItemKind::Macro(..)
| hir::ItemKind::Mod(..)
| hir::ItemKind::ForeignMod { .. }
+ | hir::ItemKind::Static(..)
| hir::ItemKind::GlobalAsm(..) => {
// These sorts of items have no lifetime parameters at all.
intravisit::walk_item(self, item);
}
- hir::ItemKind::Static(..) | hir::ItemKind::Const(..) => {
- // No lifetime parameters, but implied 'static.
- self.with(Scope::Elision { s: self.scope }, |this| {
- intravisit::walk_item(this, item)
- });
- }
hir::ItemKind::OpaqueTy(hir::OpaqueTy {
origin: hir::OpaqueTyOrigin::TyAlias { .. },
..
@@ -596,6 +582,7 @@ impl<'a, 'tcx> Visitor<'tcx> for BoundVarContext<'a, 'tcx> {
})
}
hir::ItemKind::TyAlias(_, generics)
+ | hir::ItemKind::Const(_, generics, _)
| hir::ItemKind::Enum(_, generics)
| hir::ItemKind::Struct(_, generics)
| hir::ItemKind::Union(_, generics)
@@ -603,21 +590,7 @@ impl<'a, 'tcx> Visitor<'tcx> for BoundVarContext<'a, 'tcx> {
| hir::ItemKind::TraitAlias(generics, ..)
| hir::ItemKind::Impl(&hir::Impl { generics, .. }) => {
// These kinds of items have only early-bound lifetime parameters.
- let bound_vars = generics.params.iter().map(ResolvedArg::early).collect();
- self.record_late_bound_vars(item.hir_id(), vec![]);
- let scope = Scope::Binder {
- hir_id: item.hir_id(),
- bound_vars,
- scope_type: BinderScopeType::Normal,
- s: self.scope,
- where_bound_origin: None,
- };
- self.with(scope, |this| {
- let scope = Scope::TraitRefBoundary { s: this.scope };
- this.with(scope, |this| {
- intravisit::walk_item(this, item);
- });
- });
+ self.visit_early(item.hir_id(), generics, |this| intravisit::walk_item(this, item));
}
}
}
@@ -727,12 +700,7 @@ impl<'a, 'tcx> Visitor<'tcx> for BoundVarContext<'a, 'tcx> {
// Elided lifetimes are not allowed in non-return
// position impl Trait
let scope = Scope::TraitRefBoundary { s: self.scope };
- self.with(scope, |this| {
- let scope = Scope::Elision { s: this.scope };
- this.with(scope, |this| {
- intravisit::walk_item(this, opaque_ty);
- })
- });
+ self.with(scope, |this| intravisit::walk_item(this, opaque_ty));
return;
}
@@ -749,9 +717,7 @@ impl<'a, 'tcx> Visitor<'tcx> for BoundVarContext<'a, 'tcx> {
// `fn foo<'a>() -> MyAnonTy<'a> { ... }`
// ^ ^this gets resolved in the current scope
for lifetime in lifetimes {
- let hir::GenericArg::Lifetime(lifetime) = lifetime else {
- continue
- };
+ let hir::GenericArg::Lifetime(lifetime) = lifetime else { continue };
self.visit_lifetime(lifetime);
// Check for predicates like `impl for<'a> Trait<impl OtherTrait<'a>>`
@@ -759,12 +725,8 @@ impl<'a, 'tcx> Visitor<'tcx> for BoundVarContext<'a, 'tcx> {
// well-supported at the moment, so this doesn't work.
// In the future, this should be fixed and this error should be removed.
let def = self.map.defs.get(&lifetime.hir_id).cloned();
- let Some(ResolvedArg::LateBound(_, _, def_id)) = def else {
- continue
- };
- let Some(def_id) = def_id.as_local() else {
- continue
- };
+ let Some(ResolvedArg::LateBound(_, _, def_id)) = def else { continue };
+ let Some(def_id) = def_id.as_local() else { continue };
let hir_id = self.tcx.hir().local_def_id_to_hir_id(def_id);
// Ensure that the parent of the def is an item, not HRTB
let parent_id = self.tcx.hir().parent_id(hir_id);
@@ -801,39 +763,24 @@ impl<'a, 'tcx> Visitor<'tcx> for BoundVarContext<'a, 'tcx> {
use self::hir::TraitItemKind::*;
match trait_item.kind {
Fn(_, _) => {
- self.visit_early_late(trait_item.hir_id(), &trait_item.generics, |this| {
+ self.visit_early_late(trait_item.hir_id(), trait_item.generics, |this| {
intravisit::walk_trait_item(this, trait_item)
});
}
Type(bounds, ty) => {
- let generics = &trait_item.generics;
- let bound_vars = generics.params.iter().map(ResolvedArg::early).collect();
- self.record_late_bound_vars(trait_item.hir_id(), vec![]);
- let scope = Scope::Binder {
- hir_id: trait_item.hir_id(),
- bound_vars,
- s: self.scope,
- scope_type: BinderScopeType::Normal,
- where_bound_origin: None,
- };
- self.with(scope, |this| {
- let scope = Scope::TraitRefBoundary { s: this.scope };
- this.with(scope, |this| {
- this.visit_generics(generics);
- for bound in bounds {
- this.visit_param_bound(bound);
- }
- if let Some(ty) = ty {
- this.visit_ty(ty);
- }
- })
- });
- }
- Const(_, _) => {
- // Only methods and types support generics.
- assert!(trait_item.generics.params.is_empty());
- intravisit::walk_trait_item(self, trait_item);
+ self.visit_early(trait_item.hir_id(), trait_item.generics, |this| {
+ this.visit_generics(&trait_item.generics);
+ for bound in bounds {
+ this.visit_param_bound(bound);
+ }
+ if let Some(ty) = ty {
+ this.visit_ty(ty);
+ }
+ })
}
+ Const(_, _) => self.visit_early(trait_item.hir_id(), trait_item.generics, |this| {
+ intravisit::walk_trait_item(this, trait_item)
+ }),
}
}
@@ -841,34 +788,16 @@ impl<'a, 'tcx> Visitor<'tcx> for BoundVarContext<'a, 'tcx> {
fn visit_impl_item(&mut self, impl_item: &'tcx hir::ImplItem<'tcx>) {
use self::hir::ImplItemKind::*;
match impl_item.kind {
- Fn(..) => self.visit_early_late(impl_item.hir_id(), &impl_item.generics, |this| {
+ Fn(..) => self.visit_early_late(impl_item.hir_id(), impl_item.generics, |this| {
+ intravisit::walk_impl_item(this, impl_item)
+ }),
+ Type(ty) => self.visit_early(impl_item.hir_id(), impl_item.generics, |this| {
+ this.visit_generics(impl_item.generics);
+ this.visit_ty(ty);
+ }),
+ Const(_, _) => self.visit_early(impl_item.hir_id(), impl_item.generics, |this| {
intravisit::walk_impl_item(this, impl_item)
}),
- Type(ty) => {
- let generics = &impl_item.generics;
- let bound_vars: FxIndexMap<LocalDefId, ResolvedArg> =
- generics.params.iter().map(ResolvedArg::early).collect();
- self.record_late_bound_vars(impl_item.hir_id(), vec![]);
- let scope = Scope::Binder {
- hir_id: impl_item.hir_id(),
- bound_vars,
- s: self.scope,
- scope_type: BinderScopeType::Normal,
- where_bound_origin: None,
- };
- self.with(scope, |this| {
- let scope = Scope::TraitRefBoundary { s: this.scope };
- this.with(scope, |this| {
- this.visit_generics(generics);
- this.visit_ty(ty);
- })
- });
- }
- Const(_, _) => {
- // Only methods and types support generics.
- assert!(impl_item.generics.params.is_empty());
- intravisit::walk_impl_item(self, impl_item);
- }
}
}
@@ -1204,6 +1133,25 @@ impl<'a, 'tcx> BoundVarContext<'a, 'tcx> {
self.with(scope, walk);
}
+ fn visit_early<F>(&mut self, hir_id: hir::HirId, generics: &'tcx hir::Generics<'tcx>, walk: F)
+ where
+ F: for<'b, 'c> FnOnce(&'b mut BoundVarContext<'c, 'tcx>),
+ {
+ let bound_vars = generics.params.iter().map(ResolvedArg::early).collect();
+ self.record_late_bound_vars(hir_id, vec![]);
+ let scope = Scope::Binder {
+ hir_id,
+ bound_vars,
+ s: self.scope,
+ scope_type: BinderScopeType::Normal,
+ where_bound_origin: None,
+ };
+ self.with(scope, |this| {
+ let scope = Scope::TraitRefBoundary { s: this.scope };
+ this.with(scope, walk)
+ });
+ }
+
#[instrument(level = "debug", skip(self))]
fn resolve_lifetime_ref(
&mut self,
@@ -1299,8 +1247,7 @@ impl<'a, 'tcx> BoundVarContext<'a, 'tcx> {
scope = s;
}
- Scope::Elision { s, .. }
- | Scope::ObjectLifetimeDefault { s, .. }
+ Scope::ObjectLifetimeDefault { s, .. }
| Scope::Supertrait { s, .. }
| Scope::TraitRefBoundary { s, .. }
| Scope::AnonConstBoundary { s } => {
@@ -1363,7 +1310,6 @@ impl<'a, 'tcx> BoundVarContext<'a, 'tcx> {
Scope::Root { .. } => break,
Scope::Binder { s, .. }
| Scope::Body { s, .. }
- | Scope::Elision { s, .. }
| Scope::ObjectLifetimeDefault { s, .. }
| Scope::Supertrait { s, .. }
| Scope::TraitRefBoundary { s, .. }
@@ -1415,8 +1361,7 @@ impl<'a, 'tcx> BoundVarContext<'a, 'tcx> {
scope = s;
}
- Scope::Elision { s, .. }
- | Scope::ObjectLifetimeDefault { s, .. }
+ Scope::ObjectLifetimeDefault { s, .. }
| Scope::Supertrait { s, .. }
| Scope::TraitRefBoundary { s, .. } => {
scope = s;
@@ -1489,7 +1434,6 @@ impl<'a, 'tcx> BoundVarContext<'a, 'tcx> {
Scope::Root { .. } => break,
Scope::Binder { s, .. }
| Scope::Body { s, .. }
- | Scope::Elision { s, .. }
| Scope::ObjectLifetimeDefault { s, .. }
| Scope::Supertrait { s, .. }
| Scope::TraitRefBoundary { s, .. }
@@ -1536,7 +1480,7 @@ impl<'a, 'tcx> BoundVarContext<'a, 'tcx> {
DefKind::Struct
| DefKind::Union
| DefKind::Enum
- | DefKind::TyAlias
+ | DefKind::TyAlias { .. }
| DefKind::Trait,
def_id,
) if depth == 0 => Some(def_id),
@@ -1570,7 +1514,6 @@ impl<'a, 'tcx> BoundVarContext<'a, 'tcx> {
Scope::Body { .. } => break true,
Scope::Binder { s, .. }
- | Scope::Elision { s, .. }
| Scope::ObjectLifetimeDefault { s, .. }
| Scope::Supertrait { s, .. }
| Scope::TraitRefBoundary { s, .. }
@@ -1727,7 +1670,7 @@ impl<'a, 'tcx> BoundVarContext<'a, 'tcx> {
},
));
bound_vars
- .extend(self.tcx.fn_sig(assoc_fn.def_id).subst_identity().bound_vars());
+ .extend(self.tcx.fn_sig(assoc_fn.def_id).instantiate_identity().bound_vars());
bound_vars
} else {
self.tcx.sess.delay_span_bug(
@@ -1838,14 +1781,20 @@ impl<'a, 'tcx> BoundVarContext<'a, 'tcx> {
output: Option<&'tcx hir::Ty<'tcx>>,
in_closure: bool,
) {
- self.with(Scope::Elision { s: self.scope }, |this| {
- for input in inputs {
- this.visit_ty(input);
- }
- if !in_closure && let Some(output) = output {
- this.visit_ty(output);
- }
- });
+ self.with(
+ Scope::ObjectLifetimeDefault {
+ lifetime: Some(ResolvedArg::StaticLifetime),
+ s: self.scope,
+ },
+ |this| {
+ for input in inputs {
+ this.visit_ty(input);
+ }
+ if !in_closure && let Some(output) = output {
+ this.visit_ty(output);
+ }
+ },
+ );
if in_closure && let Some(output) = output {
self.visit_ty(output);
}
@@ -1865,7 +1814,7 @@ impl<'a, 'tcx> BoundVarContext<'a, 'tcx> {
scope = s;
}
- Scope::Root { .. } | Scope::Elision { .. } => break ResolvedArg::StaticLifetime,
+ Scope::Root { .. } => break ResolvedArg::StaticLifetime,
Scope::Body { .. } | Scope::ObjectLifetimeDefault { lifetime: None, .. } => return,
@@ -2041,15 +1990,15 @@ fn is_late_bound_map(
hir::TyKind::Path(hir::QPath::Resolved(
None,
- hir::Path { res: Res::Def(DefKind::TyAlias, alias_def), segments, span },
+ hir::Path { res: Res::Def(DefKind::TyAlias { .. }, alias_def), segments, span },
)) => {
// See comments on `ConstrainedCollectorPostAstConv` for why this arm does not just consider
- // substs to be unconstrained.
+ // args to be unconstrained.
let generics = self.tcx.generics_of(alias_def);
let mut walker = ConstrainedCollectorPostAstConv {
arg_is_constrained: vec![false; generics.params.len()].into_boxed_slice(),
};
- walker.visit_ty(self.tcx.type_of(alias_def).subst_identity());
+ walker.visit_ty(self.tcx.type_of(alias_def).instantiate_identity());
match segments.last() {
Some(hir::PathSegment { args: Some(args), .. }) => {
@@ -2063,8 +2012,7 @@ fn is_late_bound_map(
tcx.sess.delay_span_bug(
*span,
format!(
- "Incorrect generic arg count for alias {:?}",
- alias_def
+ "Incorrect generic arg count for alias {alias_def:?}"
),
);
None
diff --git a/compiler/rustc_hir_analysis/src/collect/type_of.rs b/compiler/rustc_hir_analysis/src/collect/type_of.rs
index 3755342ae..2bbdbe3a1 100644
--- a/compiler/rustc_hir_analysis/src/collect/type_of.rs
+++ b/compiler/rustc_hir_analysis/src/collect/type_of.rs
@@ -3,7 +3,6 @@ use rustc_hir as hir;
use rustc_hir::def_id::LocalDefId;
use rustc_hir::HirId;
use rustc_middle::ty::print::with_forced_trimmed_paths;
-use rustc_middle::ty::subst::InternalSubsts;
use rustc_middle::ty::util::IntTypeExt;
use rustc_middle::ty::{self, ImplTraitInTraitData, IsSuggestable, Ty, TyCtxt, TypeVisitableExt};
use rustc_span::symbol::Ident;
@@ -157,7 +156,7 @@ fn anon_const_type_of<'tcx>(tcx: TyCtxt<'tcx>, def_id: LocalDefId) -> Ty<'tcx> {
let Some(type_dependent_def) = tables.type_dependent_def_id(parent_node_id) else {
return Ty::new_error_with_message(tcx,
tcx.def_span(def_id),
- format!("unable to find type-dependent def for {:?}", parent_node_id),
+ format!("unable to find type-dependent def for {parent_node_id:?}"),
);
};
let idx = segment
@@ -198,14 +197,14 @@ fn anon_const_type_of<'tcx>(tcx: TyCtxt<'tcx>, def_id: LocalDefId) -> Ty<'tcx> {
} else {
return Ty::new_error_with_message(tcx,
tcx.def_span(def_id),
- format!("unable to find const parent for {} in pat {:?}", hir_id, pat),
+ format!("unable to find const parent for {hir_id} in pat {pat:?}"),
);
}
}
_ => {
return Ty::new_error_with_message(tcx,
tcx.def_span(def_id),
- format!("unexpected const parent path {:?}", parent_node),
+ format!("unexpected const parent path {parent_node:?}"),
);
}
};
@@ -338,8 +337,8 @@ pub(super) fn type_of(tcx: TyCtxt<'_>, def_id: LocalDefId) -> ty::EarlyBinder<Ty
let output = match tcx.hir().get(hir_id) {
Node::TraitItem(item) => match item.kind {
TraitItemKind::Fn(..) => {
- let substs = InternalSubsts::identity_for_item(tcx, def_id);
- Ty::new_fn_def(tcx, def_id.to_def_id(), substs)
+ let args = ty::GenericArgs::identity_for_item(tcx, def_id);
+ Ty::new_fn_def(tcx, def_id.to_def_id(), args)
}
TraitItemKind::Const(ty, body_id) => body_id
.and_then(|body_id| {
@@ -363,8 +362,8 @@ pub(super) fn type_of(tcx: TyCtxt<'_>, def_id: LocalDefId) -> ty::EarlyBinder<Ty
Node::ImplItem(item) => match item.kind {
ImplItemKind::Fn(..) => {
- let substs = InternalSubsts::identity_for_item(tcx, def_id);
- Ty::new_fn_def(tcx, def_id.to_def_id(), substs)
+ let args = ty::GenericArgs::identity_for_item(tcx, def_id);
+ Ty::new_fn_def(tcx, def_id.to_def_id(), args)
}
ImplItemKind::Const(ty, body_id) => {
if is_suggestable_infer_ty(ty) {
@@ -405,7 +404,7 @@ pub(super) fn type_of(tcx: TyCtxt<'_>, def_id: LocalDefId) -> ty::EarlyBinder<Ty
icx.to_ty(ty)
}
}
- ItemKind::Const(ty, body_id) => {
+ ItemKind::Const(ty, _, body_id) => {
if is_suggestable_infer_ty(ty) {
infer_placeholder_type(
tcx, def_id, body_id, ty.span, item.ident, "constant",
@@ -426,13 +425,13 @@ pub(super) fn type_of(tcx: TyCtxt<'_>, def_id: LocalDefId) -> ty::EarlyBinder<Ty
_ => icx.to_ty(*self_ty),
},
ItemKind::Fn(..) => {
- let substs = InternalSubsts::identity_for_item(tcx, def_id);
- Ty::new_fn_def(tcx, def_id.to_def_id(), substs)
+ let args = ty::GenericArgs::identity_for_item(tcx, def_id);
+ Ty::new_fn_def(tcx, def_id.to_def_id(), args)
}
ItemKind::Enum(..) | ItemKind::Struct(..) | ItemKind::Union(..) => {
let def = tcx.adt_def(def_id);
- let substs = InternalSubsts::identity_for_item(tcx, def_id);
- Ty::new_adt(tcx, def, substs)
+ let args = ty::GenericArgs::identity_for_item(tcx, def_id);
+ Ty::new_adt(tcx, def, args)
}
ItemKind::OpaqueTy(OpaqueTy {
origin: hir::OpaqueTyOrigin::TyAlias { .. },
@@ -472,8 +471,8 @@ pub(super) fn type_of(tcx: TyCtxt<'_>, def_id: LocalDefId) -> ty::EarlyBinder<Ty
Node::ForeignItem(foreign_item) => match foreign_item.kind {
ForeignItemKind::Fn(..) => {
- let substs = InternalSubsts::identity_for_item(tcx, def_id);
- Ty::new_fn_def(tcx, def_id.to_def_id(), substs)
+ let args = ty::GenericArgs::identity_for_item(tcx, def_id);
+ Ty::new_fn_def(tcx, def_id.to_def_id(), args)
}
ForeignItemKind::Static(t, _) => icx.to_ty(t),
ForeignItemKind::Type => Ty::new_foreign(tcx, def_id.to_def_id()),
@@ -481,11 +480,11 @@ pub(super) fn type_of(tcx: TyCtxt<'_>, def_id: LocalDefId) -> ty::EarlyBinder<Ty
Node::Ctor(def) | Node::Variant(Variant { data: def, .. }) => match def {
VariantData::Unit(..) | VariantData::Struct(..) => {
- tcx.type_of(tcx.hir().get_parent_item(hir_id)).subst_identity()
+ tcx.type_of(tcx.hir().get_parent_item(hir_id)).instantiate_identity()
}
VariantData::Tuple(..) => {
- let substs = InternalSubsts::identity_for_item(tcx, def_id);
- Ty::new_fn_def(tcx, def_id.to_def_id(), substs)
+ let args = ty::GenericArgs::identity_for_item(tcx, def_id);
+ Ty::new_fn_def(tcx, def_id.to_def_id(), args)
}
},
@@ -498,8 +497,8 @@ pub(super) fn type_of(tcx: TyCtxt<'_>, def_id: LocalDefId) -> ty::EarlyBinder<Ty
Node::AnonConst(_) => anon_const_type_of(tcx, def_id),
Node::ConstBlock(_) => {
- let substs = InternalSubsts::identity_for_item(tcx, def_id.to_def_id());
- substs.as_inline_const().ty()
+ let args = ty::GenericArgs::identity_for_item(tcx, def_id.to_def_id());
+ args.as_inline_const().ty()
}
Node::GenericParam(param) => match &param.kind {
@@ -545,7 +544,7 @@ fn infer_placeholder_type<'a>(
if let Some(ty) = ty.make_suggestable(tcx, false) {
err.span_suggestion(
span,
- format!("provide a type for the {item}", item = kind),
+ format!("provide a type for the {kind}"),
format!("{colon} {ty}"),
Applicability::MachineApplicable,
);
diff --git a/compiler/rustc_hir_analysis/src/constrained_generic_params.rs b/compiler/rustc_hir_analysis/src/constrained_generic_params.rs
index 35882ad35..5591fa6f2 100644
--- a/compiler/rustc_hir_analysis/src/constrained_generic_params.rs
+++ b/compiler/rustc_hir_analysis/src/constrained_generic_params.rs
@@ -59,7 +59,7 @@ struct ParameterCollector {
impl<'tcx> TypeVisitor<TyCtxt<'tcx>> for ParameterCollector {
fn visit_ty(&mut self, t: Ty<'tcx>) -> ControlFlow<Self::BreakTy> {
match *t.kind() {
- ty::Alias(ty::Projection | ty::Inherent, ..) if !self.include_nonconstraining => {
+ ty::Alias(..) if !self.include_nonconstraining => {
// projections are not injective
return ControlFlow::Continue(());
}
diff --git a/compiler/rustc_hir_analysis/src/errors.rs b/compiler/rustc_hir_analysis/src/errors.rs
index c2d2e5f7e..9471ad9ca 100644
--- a/compiler/rustc_hir_analysis/src/errors.rs
+++ b/compiler/rustc_hir_analysis/src/errors.rs
@@ -216,7 +216,7 @@ impl<'a> IntoDiagnostic<'a> for MissingTypeParams {
"parameters",
self.missing_type_params
.iter()
- .map(|n| format!("`{}`", n))
+ .map(|n| format!("`{n}`"))
.collect::<Vec<_>>()
.join(", "),
);
@@ -918,3 +918,12 @@ pub struct UnusedAssociatedTypeBounds {
#[suggestion(code = "")]
pub span: Span,
}
+
+#[derive(Diagnostic)]
+#[diag(hir_analysis_assoc_bound_on_const)]
+#[note]
+pub struct AssocBoundOnConst {
+ #[primary_span]
+ pub span: Span,
+ pub descr: &'static str,
+}
diff --git a/compiler/rustc_hir_analysis/src/hir_wf_check.rs b/compiler/rustc_hir_analysis/src/hir_wf_check.rs
index f1765174d..ca7679cfb 100644
--- a/compiler/rustc_hir_analysis/src/hir_wf_check.rs
+++ b/compiler/rustc_hir_analysis/src/hir_wf_check.rs
@@ -130,7 +130,7 @@ fn diagnostic_hir_wf_check<'tcx>(
hir::Node::Item(item) => match item.kind {
hir::ItemKind::TyAlias(ty, _)
| hir::ItemKind::Static(ty, _, _)
- | hir::ItemKind::Const(ty, _) => vec![ty],
+ | hir::ItemKind::Const(ty, _, _) => vec![ty],
hir::ItemKind::Impl(impl_) => match &impl_.of_trait {
Some(t) => t
.path
diff --git a/compiler/rustc_hir_analysis/src/impl_wf_check.rs b/compiler/rustc_hir_analysis/src/impl_wf_check.rs
index 5526dd4b0..788121f7a 100644
--- a/compiler/rustc_hir_analysis/src/impl_wf_check.rs
+++ b/compiler/rustc_hir_analysis/src/impl_wf_check.rs
@@ -14,7 +14,7 @@ use min_specialization::check_min_specialization;
use rustc_data_structures::fx::FxHashSet;
use rustc_errors::struct_span_err;
use rustc_hir::def::DefKind;
-use rustc_hir::def_id::LocalDefId;
+use rustc_hir::def_id::{LocalDefId, LocalModDefId};
use rustc_middle::query::Providers;
use rustc_middle::ty::{self, TyCtxt, TypeVisitableExt};
use rustc_span::{Span, Symbol};
@@ -51,7 +51,7 @@ mod min_specialization;
/// impl<'a> Trait<Foo> for Bar { type X = &'a i32; }
/// // ^ 'a is unused and appears in assoc type, error
/// ```
-fn check_mod_impl_wf(tcx: TyCtxt<'_>, module_def_id: LocalDefId) {
+fn check_mod_impl_wf(tcx: TyCtxt<'_>, module_def_id: LocalModDefId) {
let min_specialization = tcx.features().min_specialization;
let module = tcx.hir_module_items(module_def_id);
for id in module.items() {
@@ -70,22 +70,21 @@ pub fn provide(providers: &mut Providers) {
fn enforce_impl_params_are_constrained(tcx: TyCtxt<'_>, impl_def_id: LocalDefId) {
// Every lifetime used in an associated type must be constrained.
- let impl_self_ty = tcx.type_of(impl_def_id).subst_identity();
+ let impl_self_ty = tcx.type_of(impl_def_id).instantiate_identity();
if impl_self_ty.references_error() {
// Don't complain about unconstrained type params when self ty isn't known due to errors.
// (#36836)
tcx.sess.delay_span_bug(
tcx.def_span(impl_def_id),
format!(
- "potentially unconstrained type parameters weren't evaluated: {:?}",
- impl_self_ty,
+ "potentially unconstrained type parameters weren't evaluated: {impl_self_ty:?}",
),
);
return;
}
let impl_generics = tcx.generics_of(impl_def_id);
let impl_predicates = tcx.predicates_of(impl_def_id);
- let impl_trait_ref = tcx.impl_trait_ref(impl_def_id).map(ty::EarlyBinder::subst_identity);
+ let impl_trait_ref = tcx.impl_trait_ref(impl_def_id).map(ty::EarlyBinder::instantiate_identity);
let mut input_parameters = cgp::parameters_for_impl(impl_self_ty, impl_trait_ref);
cgp::identify_constrained_generic_params(
@@ -104,25 +103,12 @@ fn enforce_impl_params_are_constrained(tcx: TyCtxt<'_>, impl_def_id: LocalDefId)
match item.kind {
ty::AssocKind::Type => {
if item.defaultness(tcx).has_value() {
- cgp::parameters_for(&tcx.type_of(def_id).subst_identity(), true)
+ cgp::parameters_for(&tcx.type_of(def_id).instantiate_identity(), true)
} else {
vec![]
}
}
- ty::AssocKind::Fn => {
- if !tcx.lower_impl_trait_in_trait_to_assoc_ty()
- && item.defaultness(tcx).has_value()
- && tcx.impl_method_has_trait_impl_trait_tys(item.def_id)
- && let Ok(table) = tcx.collect_return_position_impl_trait_in_trait_tys(def_id)
- {
- table.values().copied().flat_map(|ty| {
- cgp::parameters_for(&ty.subst_identity(), true)
- }).collect()
- } else {
- vec![]
- }
- }
- ty::AssocKind::Const => vec![],
+ ty::AssocKind::Fn | ty::AssocKind::Const => vec![],
}
})
.collect();
@@ -193,7 +179,7 @@ fn report_unused_parameter(tcx: TyCtxt<'_>, span: Span, kind: &str, name: Symbol
kind,
name
);
- err.span_label(span, format!("unconstrained {} parameter", kind));
+ err.span_label(span, format!("unconstrained {kind} parameter"));
if kind == "const" {
err.note(
"expressions using a const parameter must map each value to a distinct output value",
diff --git a/compiler/rustc_hir_analysis/src/impl_wf_check/min_specialization.rs b/compiler/rustc_hir_analysis/src/impl_wf_check/min_specialization.rs
index c64fb469b..3760195a5 100644
--- a/compiler/rustc_hir_analysis/src/impl_wf_check/min_specialization.rs
+++ b/compiler/rustc_hir_analysis/src/impl_wf_check/min_specialization.rs
@@ -14,15 +14,15 @@
//! To enforce this requirement on specializations we take the following
//! approach:
//!
-//! 1. Match up the substs for `impl2` so that the implemented trait and
+//! 1. Match up the args for `impl2` so that the implemented trait and
//! self-type match those for `impl1`.
-//! 2. Check for any direct use of `'static` in the substs of `impl2`.
+//! 2. Check for any direct use of `'static` in the args of `impl2`.
//! 3. Check that all of the generic parameters of `impl1` occur at most once
-//! in the *unconstrained* substs for `impl2`. A parameter is constrained if
+//! in the *unconstrained* args for `impl2`. A parameter is constrained if
//! its value is completely determined by an associated type projection
//! predicate.
//! 4. Check that all predicates on `impl1` either exist on `impl2` (after
-//! matching substs), or are well-formed predicates for the trait's type
+//! matching args), or are well-formed predicates for the trait's type
//! arguments.
//!
//! ## Example
@@ -74,13 +74,13 @@ use rustc_hir::def_id::{DefId, LocalDefId};
use rustc_infer::infer::outlives::env::OutlivesEnvironment;
use rustc_infer::infer::TyCtxtInferExt;
use rustc_infer::traits::specialization_graph::Node;
-use rustc_middle::ty::subst::{GenericArg, InternalSubsts, SubstsRef};
use rustc_middle::ty::trait_def::TraitSpecializationKind;
use rustc_middle::ty::{self, TyCtxt, TypeVisitableExt};
+use rustc_middle::ty::{GenericArg, GenericArgs, GenericArgsRef};
use rustc_span::{ErrorGuaranteed, Span};
use rustc_trait_selection::traits::error_reporting::TypeErrCtxtExt;
use rustc_trait_selection::traits::outlives_bounds::InferCtxtExt as _;
-use rustc_trait_selection::traits::{self, translate_substs_with_cause, wf, ObligationCtxt};
+use rustc_trait_selection::traits::{self, translate_args_with_cause, wf, ObligationCtxt};
pub(super) fn check_min_specialization(tcx: TyCtxt<'_>, impl_def_id: LocalDefId) {
if let Some(node) = parent_specialization_node(tcx, impl_def_id) {
@@ -113,20 +113,20 @@ fn check_always_applicable(tcx: TyCtxt<'_>, impl1_def_id: LocalDefId, impl2_node
let span = tcx.def_span(impl1_def_id);
check_has_items(tcx, impl1_def_id, impl2_node, span);
- if let Ok((impl1_substs, impl2_substs)) = get_impl_substs(tcx, impl1_def_id, impl2_node) {
+ if let Ok((impl1_args, impl2_args)) = get_impl_args(tcx, impl1_def_id, impl2_node) {
let impl2_def_id = impl2_node.def_id();
- debug!(?impl2_def_id, ?impl2_substs);
+ debug!(?impl2_def_id, ?impl2_args);
- let parent_substs = if impl2_node.is_from_trait() {
- impl2_substs.to_vec()
+ let parent_args = if impl2_node.is_from_trait() {
+ impl2_args.to_vec()
} else {
- unconstrained_parent_impl_substs(tcx, impl2_def_id, impl2_substs)
+ unconstrained_parent_impl_args(tcx, impl2_def_id, impl2_args)
};
check_constness(tcx, impl1_def_id, impl2_node, span);
- check_static_lifetimes(tcx, &parent_substs, span);
- check_duplicate_params(tcx, impl1_substs, &parent_substs, span);
- check_predicates(tcx, impl1_def_id, impl1_substs, impl2_node, impl2_substs, span);
+ check_static_lifetimes(tcx, &parent_args, span);
+ check_duplicate_params(tcx, impl1_args, &parent_args, span);
+ check_predicates(tcx, impl1_def_id, impl1_args, impl2_node, impl2_args, span);
}
}
@@ -167,23 +167,23 @@ fn check_constness(tcx: TyCtxt<'_>, impl1_def_id: LocalDefId, impl2_node: Node,
/// ```
///
/// Would return `S1 = [C]` and `S2 = [Vec<C>, C]`.
-fn get_impl_substs(
+fn get_impl_args(
tcx: TyCtxt<'_>,
impl1_def_id: LocalDefId,
impl2_node: Node,
-) -> Result<(SubstsRef<'_>, SubstsRef<'_>), ErrorGuaranteed> {
+) -> Result<(GenericArgsRef<'_>, GenericArgsRef<'_>), ErrorGuaranteed> {
let infcx = &tcx.infer_ctxt().build();
let ocx = ObligationCtxt::new(infcx);
let param_env = tcx.param_env(impl1_def_id);
let impl1_span = tcx.def_span(impl1_def_id);
let assumed_wf_types = ocx.assumed_wf_types_and_report_errors(param_env, impl1_def_id)?;
- let impl1_substs = InternalSubsts::identity_for_item(tcx, impl1_def_id);
- let impl2_substs = translate_substs_with_cause(
+ let impl1_args = GenericArgs::identity_for_item(tcx, impl1_def_id);
+ let impl2_args = translate_args_with_cause(
infcx,
param_env,
impl1_def_id.to_def_id(),
- impl1_substs,
+ impl1_args,
impl2_node,
|_, span| {
traits::ObligationCause::new(
@@ -203,12 +203,12 @@ fn get_impl_substs(
let implied_bounds = infcx.implied_bounds_tys(param_env, impl1_def_id, assumed_wf_types);
let outlives_env = OutlivesEnvironment::with_bounds(param_env, implied_bounds);
let _ = ocx.resolve_regions_and_report_errors(impl1_def_id, &outlives_env);
- let Ok(impl2_substs) = infcx.fully_resolve(impl2_substs) else {
+ let Ok(impl2_args) = infcx.fully_resolve(impl2_args) else {
let span = tcx.def_span(impl1_def_id);
let guar = tcx.sess.emit_err(SubstsOnOverriddenImpl { span });
return Err(guar);
};
- Ok((impl1_substs, impl2_substs))
+ Ok((impl1_args, impl2_args))
}
/// Returns a list of all of the unconstrained subst of the given impl.
@@ -217,17 +217,17 @@ fn get_impl_substs(
///
/// impl<'a, T, I> ... where &'a I: IntoIterator<Item=&'a T>
///
-/// This would return the substs corresponding to `['a, I]`, because knowing
+/// This would return the args corresponding to `['a, I]`, because knowing
/// `'a` and `I` determines the value of `T`.
-fn unconstrained_parent_impl_substs<'tcx>(
+fn unconstrained_parent_impl_args<'tcx>(
tcx: TyCtxt<'tcx>,
impl_def_id: DefId,
- impl_substs: SubstsRef<'tcx>,
+ impl_args: GenericArgsRef<'tcx>,
) -> Vec<GenericArg<'tcx>> {
let impl_generic_predicates = tcx.predicates_of(impl_def_id);
let mut unconstrained_parameters = FxHashSet::default();
let mut constrained_params = FxHashSet::default();
- let impl_trait_ref = tcx.impl_trait_ref(impl_def_id).map(ty::EarlyBinder::subst_identity);
+ let impl_trait_ref = tcx.impl_trait_ref(impl_def_id).map(ty::EarlyBinder::instantiate_identity);
// Unfortunately the functions in `constrained_generic_parameters` don't do
// what we want here. We want only a list of constrained parameters while
@@ -255,7 +255,7 @@ fn unconstrained_parent_impl_substs<'tcx>(
}
}
- impl_substs
+ impl_args
.iter()
.enumerate()
.filter(|&(idx, _)| !constrained_params.contains(&(idx as u32)))
@@ -264,7 +264,7 @@ fn unconstrained_parent_impl_substs<'tcx>(
}
/// Check that parameters of the derived impl don't occur more than once in the
-/// equated substs of the base impl.
+/// equated args of the base impl.
///
/// For example forbid the following:
///
@@ -280,21 +280,21 @@ fn unconstrained_parent_impl_substs<'tcx>(
/// impl<T> Tr<T> for Vec<T> { }
/// ```
///
-/// The substs for the parent impl here are `[T, Vec<T>]`, which repeats `T`,
-/// but `S` is constrained in the parent impl, so `parent_substs` is only
+/// The args for the parent impl here are `[T, Vec<T>]`, which repeats `T`,
+/// but `S` is constrained in the parent impl, so `parent_args` is only
/// `[Vec<T>]`. This means we allow this impl.
fn check_duplicate_params<'tcx>(
tcx: TyCtxt<'tcx>,
- impl1_substs: SubstsRef<'tcx>,
- parent_substs: &Vec<GenericArg<'tcx>>,
+ impl1_args: GenericArgsRef<'tcx>,
+ parent_args: &Vec<GenericArg<'tcx>>,
span: Span,
) {
- let mut base_params = cgp::parameters_for(parent_substs, true);
+ let mut base_params = cgp::parameters_for(parent_args, true);
base_params.sort_by_key(|param| param.0);
if let (_, [duplicate, ..]) = base_params.partition_dedup() {
- let param = impl1_substs[duplicate.0 as usize];
+ let param = impl1_args[duplicate.0 as usize];
tcx.sess
- .struct_span_err(span, format!("specializing impl repeats parameter `{}`", param))
+ .struct_span_err(span, format!("specializing impl repeats parameter `{param}`"))
.emit();
}
}
@@ -309,10 +309,10 @@ fn check_duplicate_params<'tcx>(
/// ```
fn check_static_lifetimes<'tcx>(
tcx: TyCtxt<'tcx>,
- parent_substs: &Vec<GenericArg<'tcx>>,
+ parent_args: &Vec<GenericArg<'tcx>>,
span: Span,
) {
- if tcx.any_free_region_meets(parent_substs, |r| r.is_static()) {
+ if tcx.any_free_region_meets(parent_args, |r| r.is_static()) {
tcx.sess.emit_err(errors::StaticSpecialize { span });
}
}
@@ -331,14 +331,14 @@ fn check_static_lifetimes<'tcx>(
fn check_predicates<'tcx>(
tcx: TyCtxt<'tcx>,
impl1_def_id: LocalDefId,
- impl1_substs: SubstsRef<'tcx>,
+ impl1_args: GenericArgsRef<'tcx>,
impl2_node: Node,
- impl2_substs: SubstsRef<'tcx>,
+ impl2_args: GenericArgsRef<'tcx>,
span: Span,
) {
let impl1_predicates: Vec<_> = traits::elaborate(
tcx,
- tcx.predicates_of(impl1_def_id).instantiate(tcx, impl1_substs).into_iter(),
+ tcx.predicates_of(impl1_def_id).instantiate(tcx, impl1_args).into_iter(),
)
.collect();
@@ -350,7 +350,7 @@ fn check_predicates<'tcx>(
traits::elaborate(
tcx,
tcx.predicates_of(impl2_node.def_id())
- .instantiate(tcx, impl2_substs)
+ .instantiate(tcx, impl2_args)
.into_iter()
.map(|(c, _s)| c.as_predicate()),
)
@@ -385,7 +385,7 @@ fn check_predicates<'tcx>(
.map(|(c, _span)| c.as_predicate());
// Include the well-formed predicates of the type parameters of the impl.
- for arg in tcx.impl_trait_ref(impl1_def_id).unwrap().subst_identity().substs {
+ for arg in tcx.impl_trait_ref(impl1_def_id).unwrap().instantiate_identity().args {
let infcx = &tcx.infer_ctxt().build();
let obligations =
wf::obligations(infcx, tcx.param_env(impl1_def_id), impl1_def_id, 0, arg, span)
@@ -431,45 +431,13 @@ fn check_predicates<'tcx>(
///
/// So we make that check in this function and try to raise a helpful error message.
fn trait_predicates_eq<'tcx>(
- tcx: TyCtxt<'tcx>,
+ _tcx: TyCtxt<'tcx>,
predicate1: ty::Predicate<'tcx>,
predicate2: ty::Predicate<'tcx>,
- span: Span,
+ _span: Span,
) -> bool {
- let pred1_kind = predicate1.kind().skip_binder();
- let pred2_kind = predicate2.kind().skip_binder();
- let (trait_pred1, trait_pred2) = match (pred1_kind, pred2_kind) {
- (
- ty::PredicateKind::Clause(ty::ClauseKind::Trait(pred1)),
- ty::PredicateKind::Clause(ty::ClauseKind::Trait(pred2)),
- ) => (pred1, pred2),
- // Just use plain syntactic equivalence if either of the predicates aren't
- // trait predicates or have bound vars.
- _ => return predicate1 == predicate2,
- };
-
- let predicates_equal_modulo_constness = {
- let pred1_unconsted =
- ty::TraitPredicate { constness: ty::BoundConstness::NotConst, ..trait_pred1 };
- let pred2_unconsted =
- ty::TraitPredicate { constness: ty::BoundConstness::NotConst, ..trait_pred2 };
- pred1_unconsted == pred2_unconsted
- };
-
- if !predicates_equal_modulo_constness {
- return false;
- }
-
- // Check that the predicate on the specializing impl is at least as const as
- // the one on the base.
- match (trait_pred2.constness, trait_pred1.constness) {
- (ty::BoundConstness::ConstIfConst, ty::BoundConstness::NotConst) => {
- tcx.sess.emit_err(errors::MissingTildeConst { span });
- }
- _ => {}
- }
-
- true
+ // FIXME(effects)
+ predicate1 == predicate2
}
#[instrument(level = "debug", skip(tcx))]
@@ -482,7 +450,6 @@ fn check_specialization_on<'tcx>(tcx: TyCtxt<'tcx>, predicate: ty::Predicate<'tc
// items.
ty::PredicateKind::Clause(ty::ClauseKind::Trait(ty::TraitPredicate {
trait_ref,
- constness: _,
polarity: _,
})) => {
if !matches!(
@@ -523,7 +490,7 @@ fn check_specialization_on<'tcx>(tcx: TyCtxt<'tcx>, predicate: ty::Predicate<'tc
}
_ => {
tcx.sess
- .struct_span_err(span, format!("cannot specialize on predicate `{}`", predicate))
+ .struct_span_err(span, format!("cannot specialize on predicate `{predicate}`"))
.emit();
}
}
@@ -536,7 +503,6 @@ fn trait_predicate_kind<'tcx>(
match predicate.kind().skip_binder() {
ty::PredicateKind::Clause(ty::ClauseKind::Trait(ty::TraitPredicate {
trait_ref,
- constness: _,
polarity: _,
})) => Some(tcx.trait_def(trait_ref.def_id).specialization_kind),
ty::PredicateKind::Clause(ty::ClauseKind::RegionOutlives(_))
diff --git a/compiler/rustc_hir_analysis/src/lib.rs b/compiler/rustc_hir_analysis/src/lib.rs
index a68832d96..4f95174f8 100644
--- a/compiler/rustc_hir_analysis/src/lib.rs
+++ b/compiler/rustc_hir_analysis/src/lib.rs
@@ -99,23 +99,20 @@ use rustc_errors::ErrorGuaranteed;
use rustc_errors::{DiagnosticMessage, SubdiagnosticMessage};
use rustc_fluent_macro::fluent_messages;
use rustc_hir as hir;
-use rustc_hir::Node;
use rustc_infer::infer::TyCtxtInferExt;
use rustc_middle::middle;
use rustc_middle::query::Providers;
use rustc_middle::ty::{self, Ty, TyCtxt};
use rustc_middle::util;
-use rustc_session::{config::EntryFnType, parse::feature_err};
-use rustc_span::def_id::{DefId, LocalDefId, CRATE_DEF_ID};
+use rustc_session::parse::feature_err;
use rustc_span::{symbol::sym, Span, DUMMY_SP};
use rustc_target::spec::abi::Abi;
use rustc_trait_selection::traits::error_reporting::TypeErrCtxtExt as _;
-use rustc_trait_selection::traits::{self, ObligationCause, ObligationCauseCode, ObligationCtxt};
-
-use std::ops::Not;
+use rustc_trait_selection::traits::{self, ObligationCause, ObligationCtxt};
use astconv::{AstConv, OnlySelfBounds};
use bounds::Bounds;
+use rustc_hir::def::DefKind;
fluent_messages! { "../messages.ftl" }
@@ -176,271 +173,11 @@ fn require_same_types<'tcx>(
}
}
-fn check_main_fn_ty(tcx: TyCtxt<'_>, main_def_id: DefId) {
- let main_fnsig = tcx.fn_sig(main_def_id).subst_identity();
- let main_span = tcx.def_span(main_def_id);
-
- fn main_fn_diagnostics_def_id(tcx: TyCtxt<'_>, def_id: DefId, sp: Span) -> LocalDefId {
- if let Some(local_def_id) = def_id.as_local() {
- let hir_type = tcx.type_of(local_def_id).subst_identity();
- if !matches!(hir_type.kind(), ty::FnDef(..)) {
- span_bug!(sp, "main has a non-function type: found `{}`", hir_type);
- }
- local_def_id
- } else {
- CRATE_DEF_ID
- }
- }
-
- fn main_fn_generics_params_span(tcx: TyCtxt<'_>, def_id: DefId) -> Option<Span> {
- if !def_id.is_local() {
- return None;
- }
- let hir_id = tcx.hir().local_def_id_to_hir_id(def_id.expect_local());
- match tcx.hir().find(hir_id) {
- Some(Node::Item(hir::Item { kind: hir::ItemKind::Fn(_, generics, _), .. })) => {
- generics.params.is_empty().not().then_some(generics.span)
- }
- _ => {
- span_bug!(tcx.def_span(def_id), "main has a non-function type");
- }
- }
- }
-
- fn main_fn_where_clauses_span(tcx: TyCtxt<'_>, def_id: DefId) -> Option<Span> {
- if !def_id.is_local() {
- return None;
- }
- let hir_id = tcx.hir().local_def_id_to_hir_id(def_id.expect_local());
- match tcx.hir().find(hir_id) {
- Some(Node::Item(hir::Item { kind: hir::ItemKind::Fn(_, generics, _), .. })) => {
- Some(generics.where_clause_span)
- }
- _ => {
- span_bug!(tcx.def_span(def_id), "main has a non-function type");
- }
- }
- }
-
- fn main_fn_asyncness_span(tcx: TyCtxt<'_>, def_id: DefId) -> Option<Span> {
- if !def_id.is_local() {
- return None;
- }
- Some(tcx.def_span(def_id))
- }
-
- fn main_fn_return_type_span(tcx: TyCtxt<'_>, def_id: DefId) -> Option<Span> {
- if !def_id.is_local() {
- return None;
- }
- let hir_id = tcx.hir().local_def_id_to_hir_id(def_id.expect_local());
- match tcx.hir().find(hir_id) {
- Some(Node::Item(hir::Item { kind: hir::ItemKind::Fn(fn_sig, _, _), .. })) => {
- Some(fn_sig.decl.output.span())
- }
- _ => {
- span_bug!(tcx.def_span(def_id), "main has a non-function type");
- }
- }
- }
-
- let mut error = false;
- let main_diagnostics_def_id = main_fn_diagnostics_def_id(tcx, main_def_id, main_span);
- let main_fn_generics = tcx.generics_of(main_def_id);
- let main_fn_predicates = tcx.predicates_of(main_def_id);
- if main_fn_generics.count() != 0 || !main_fnsig.bound_vars().is_empty() {
- let generics_param_span = main_fn_generics_params_span(tcx, main_def_id);
- tcx.sess.emit_err(errors::MainFunctionGenericParameters {
- span: generics_param_span.unwrap_or(main_span),
- label_span: generics_param_span,
- });
- error = true;
- } else if !main_fn_predicates.predicates.is_empty() {
- // generics may bring in implicit predicates, so we skip this check if generics is present.
- let generics_where_clauses_span = main_fn_where_clauses_span(tcx, main_def_id);
- tcx.sess.emit_err(errors::WhereClauseOnMain {
- span: generics_where_clauses_span.unwrap_or(main_span),
- generics_span: generics_where_clauses_span,
- });
- error = true;
- }
-
- let main_asyncness = tcx.asyncness(main_def_id);
- if let hir::IsAsync::Async = main_asyncness {
- let asyncness_span = main_fn_asyncness_span(tcx, main_def_id);
- tcx.sess.emit_err(errors::MainFunctionAsync { span: main_span, asyncness: asyncness_span });
- error = true;
- }
-
- for attr in tcx.get_attrs(main_def_id, sym::track_caller) {
- tcx.sess.emit_err(errors::TrackCallerOnMain { span: attr.span, annotated: main_span });
- error = true;
- }
-
- if !tcx.codegen_fn_attrs(main_def_id).target_features.is_empty()
- // Calling functions with `#[target_feature]` is not unsafe on WASM, see #84988
- && !tcx.sess.target.is_like_wasm
- && !tcx.sess.opts.actually_rustdoc
- {
- tcx.sess.emit_err(errors::TargetFeatureOnMain { main: main_span });
- error = true;
- }
-
- if error {
- return;
- }
-
- // Main should have no WC, so empty param env is OK here.
- let param_env = ty::ParamEnv::empty();
- let expected_return_type;
- if let Some(term_did) = tcx.lang_items().termination() {
- let return_ty = main_fnsig.output();
- let return_ty_span = main_fn_return_type_span(tcx, main_def_id).unwrap_or(main_span);
- if !return_ty.bound_vars().is_empty() {
- tcx.sess.emit_err(errors::MainFunctionReturnTypeGeneric { span: return_ty_span });
- error = true;
- }
- let return_ty = return_ty.skip_binder();
- let infcx = tcx.infer_ctxt().build();
- let cause = traits::ObligationCause::new(
- return_ty_span,
- main_diagnostics_def_id,
- ObligationCauseCode::MainFunctionType,
- );
- let ocx = traits::ObligationCtxt::new(&infcx);
- let norm_return_ty = ocx.normalize(&cause, param_env, return_ty);
- ocx.register_bound(cause, param_env, norm_return_ty, term_did);
- let errors = ocx.select_all_or_error();
- if !errors.is_empty() {
- infcx.err_ctxt().report_fulfillment_errors(&errors);
- error = true;
- }
- // now we can take the return type of the given main function
- expected_return_type = main_fnsig.output();
- } else {
- // standard () main return type
- expected_return_type = ty::Binder::dummy(Ty::new_unit(tcx));
- }
-
- if error {
- return;
- }
-
- let se_ty = Ty::new_fn_ptr(
- tcx,
- expected_return_type.map_bound(|expected_return_type| {
- tcx.mk_fn_sig([], expected_return_type, false, hir::Unsafety::Normal, Abi::Rust)
- }),
- );
-
- require_same_types(
- tcx,
- &ObligationCause::new(
- main_span,
- main_diagnostics_def_id,
- ObligationCauseCode::MainFunctionType,
- ),
- param_env,
- se_ty,
- Ty::new_fn_ptr(tcx, main_fnsig),
- );
-}
-fn check_start_fn_ty(tcx: TyCtxt<'_>, start_def_id: DefId) {
- let start_def_id = start_def_id.expect_local();
- let start_id = tcx.hir().local_def_id_to_hir_id(start_def_id);
- let start_span = tcx.def_span(start_def_id);
- let start_t = tcx.type_of(start_def_id).subst_identity();
- match start_t.kind() {
- ty::FnDef(..) => {
- if let Some(Node::Item(it)) = tcx.hir().find(start_id) {
- if let hir::ItemKind::Fn(sig, generics, _) = &it.kind {
- let mut error = false;
- if !generics.params.is_empty() {
- tcx.sess.emit_err(errors::StartFunctionParameters { span: generics.span });
- error = true;
- }
- if generics.has_where_clause_predicates {
- tcx.sess.emit_err(errors::StartFunctionWhere {
- span: generics.where_clause_span,
- });
- error = true;
- }
- if let hir::IsAsync::Async = sig.header.asyncness {
- let span = tcx.def_span(it.owner_id);
- tcx.sess.emit_err(errors::StartAsync { span: span });
- error = true;
- }
-
- let attrs = tcx.hir().attrs(start_id);
- for attr in attrs {
- if attr.has_name(sym::track_caller) {
- tcx.sess.emit_err(errors::StartTrackCaller {
- span: attr.span,
- start: start_span,
- });
- error = true;
- }
- if attr.has_name(sym::target_feature)
- // Calling functions with `#[target_feature]` is
- // not unsafe on WASM, see #84988
- && !tcx.sess.target.is_like_wasm
- && !tcx.sess.opts.actually_rustdoc
- {
- tcx.sess.emit_err(errors::StartTargetFeature {
- span: attr.span,
- start: start_span,
- });
- error = true;
- }
- }
-
- if error {
- return;
- }
- }
- }
-
- let se_ty = Ty::new_fn_ptr(
- tcx,
- ty::Binder::dummy(tcx.mk_fn_sig(
- [tcx.types.isize, Ty::new_imm_ptr(tcx, Ty::new_imm_ptr(tcx, tcx.types.u8))],
- tcx.types.isize,
- false,
- hir::Unsafety::Normal,
- Abi::Rust,
- )),
- );
-
- require_same_types(
- tcx,
- &ObligationCause::new(
- start_span,
- start_def_id,
- ObligationCauseCode::StartFunctionType,
- ),
- ty::ParamEnv::empty(), // start should not have any where bounds.
- se_ty,
- Ty::new_fn_ptr(tcx, tcx.fn_sig(start_def_id).subst_identity()),
- );
- }
- _ => {
- span_bug!(start_span, "start has a non-function type: found `{}`", start_t);
- }
- }
-}
-
-fn check_for_entry_fn(tcx: TyCtxt<'_>) {
- match tcx.entry_fn(()) {
- Some((def_id, EntryFnType::Main { .. })) => check_main_fn_ty(tcx, def_id),
- Some((def_id, EntryFnType::Start)) => check_start_fn_ty(tcx, def_id),
- _ => {}
- }
-}
-
pub fn provide(providers: &mut Providers) {
collect::provide(providers);
coherence::provide(providers);
check::provide(providers);
+ check_unused::provide(providers);
variance::provide(providers);
outlives::provide(providers);
impl_wf_check::provide(providers);
@@ -500,8 +237,18 @@ pub fn check_crate(tcx: TyCtxt<'_>) -> Result<(), ErrorGuaranteed> {
tcx.hir().for_each_module(|module| tcx.ensure().check_mod_item_types(module))
});
- check_unused::check_crate(tcx);
- check_for_entry_fn(tcx);
+ // FIXME: Remove this when we implement creating `DefId`s
+ // for anon constants during their parents' typeck.
+ // Typeck all body owners in parallel will produce queries
+ // cycle errors because it may typeck on anon constants directly.
+ tcx.hir().par_body_owners(|item_def_id| {
+ let def_kind = tcx.def_kind(item_def_id);
+ if !matches!(def_kind, DefKind::AnonConst) {
+ tcx.ensure().typeck(item_def_id);
+ }
+ });
+
+ tcx.ensure().check_unused_traits(());
if let Some(reported) = tcx.sess.has_errors() { Err(reported) } else { Ok(()) }
}
diff --git a/compiler/rustc_hir_analysis/src/outlives/implicit_infer.rs b/compiler/rustc_hir_analysis/src/outlives/implicit_infer.rs
index 71dca918f..c17925471 100644
--- a/compiler/rustc_hir_analysis/src/outlives/implicit_infer.rs
+++ b/compiler/rustc_hir_analysis/src/outlives/implicit_infer.rs
@@ -46,7 +46,7 @@ pub(super) fn infer_predicates(
// For field of type &'a T (reference) or Adt
// (struct/enum/union) there will be outlive
// requirements for adt_def.
- let field_ty = tcx.type_of(field_def.did).subst_identity();
+ let field_ty = tcx.type_of(field_def.did).instantiate_identity();
let field_span = tcx.def_span(field_def.did);
insert_required_predicates_to_be_wf(
tcx,
@@ -117,7 +117,7 @@ fn insert_required_predicates_to_be_wf<'tcx>(
// can load the current set of inferred and explicit
// predicates from `global_inferred_outlives` and filter the
// ones that are TypeOutlives.
- ty::Adt(def, substs) => {
+ ty::Adt(def, args) => {
// First check the inferred predicates
//
// Example 1:
@@ -146,7 +146,7 @@ fn insert_required_predicates_to_be_wf<'tcx>(
// get `T: 'a` (or `predicate`):
let predicate = unsubstituted_predicates
.rebind(*unsubstituted_predicate)
- .subst(tcx, substs);
+ .instantiate(tcx, args);
insert_outlives_predicate(
tcx,
predicate.0,
@@ -159,11 +159,11 @@ fn insert_required_predicates_to_be_wf<'tcx>(
// Check if the type has any explicit predicates that need
// to be added to `required_predicates`
- // let _: () = substs.region_at(0);
+ // let _: () = args.region_at(0);
check_explicit_predicates(
tcx,
def.did(),
- substs,
+ args,
required_predicates,
explicit_map,
None,
@@ -186,12 +186,11 @@ fn insert_required_predicates_to_be_wf<'tcx>(
// predicates in `check_explicit_predicates` we
// need to ignore checking the explicit_map for
// Self type.
- let substs =
- ex_trait_ref.with_self_ty(tcx, tcx.types.usize).skip_binder().substs;
+ let args = ex_trait_ref.with_self_ty(tcx, tcx.types.usize).skip_binder().args;
check_explicit_predicates(
tcx,
ex_trait_ref.skip_binder().def_id,
- substs,
+ args,
required_predicates,
explicit_map,
Some(tcx.types.self_param),
@@ -206,7 +205,7 @@ fn insert_required_predicates_to_be_wf<'tcx>(
check_explicit_predicates(
tcx,
tcx.parent(obj.def_id),
- obj.substs,
+ obj.args,
required_predicates,
explicit_map,
None,
@@ -239,18 +238,18 @@ fn insert_required_predicates_to_be_wf<'tcx>(
fn check_explicit_predicates<'tcx>(
tcx: TyCtxt<'tcx>,
def_id: DefId,
- substs: &[GenericArg<'tcx>],
+ args: &[GenericArg<'tcx>],
required_predicates: &mut RequiredPredicates<'tcx>,
explicit_map: &mut ExplicitPredicatesMap<'tcx>,
ignored_self_ty: Option<Ty<'tcx>>,
) {
debug!(
"check_explicit_predicates(def_id={:?}, \
- substs={:?}, \
+ args={:?}, \
explicit_map={:?}, \
required_predicates={:?}, \
ignored_self_ty={:?})",
- def_id, substs, explicit_map, required_predicates, ignored_self_ty,
+ def_id, args, explicit_map, required_predicates, ignored_self_ty,
);
let explicit_predicates = explicit_map.explicit_predicates_of(tcx, def_id);
@@ -278,10 +277,10 @@ fn check_explicit_predicates<'tcx>(
// that is represented by the `dyn Trait`, not to the `X` type parameter
// (or any other generic parameter) declared on `MyStruct`.
//
- // Note that we do this check for self **before** applying `substs`. In the
- // case that `substs` come from a `dyn Trait` type, our caller will have
+ // Note that we do this check for self **before** applying `args`. In the
+ // case that `args` come from a `dyn Trait` type, our caller will have
// included `Self = usize` as the value for `Self`. If we were
- // to apply the substs, and not filter this predicate, we might then falsely
+ // to apply the args, and not filter this predicate, we might then falsely
// conclude that e.g., `X: 'x` was a reasonable inferred requirement.
//
// Another similar case is where we have an inferred
@@ -299,7 +298,7 @@ fn check_explicit_predicates<'tcx>(
continue;
}
- let predicate = explicit_predicates.rebind(*outlives_predicate).subst(tcx, substs);
+ let predicate = explicit_predicates.rebind(*outlives_predicate).instantiate(tcx, args);
debug!("predicate = {:?}", &predicate);
insert_outlives_predicate(tcx, predicate.0, predicate.1, span, required_predicates);
}
diff --git a/compiler/rustc_hir_analysis/src/outlives/mod.rs b/compiler/rustc_hir_analysis/src/outlives/mod.rs
index 48624cefe..be9d076bd 100644
--- a/compiler/rustc_hir_analysis/src/outlives/mod.rs
+++ b/compiler/rustc_hir_analysis/src/outlives/mod.rs
@@ -2,7 +2,7 @@ use hir::Node;
use rustc_hir as hir;
use rustc_hir::def_id::LocalDefId;
use rustc_middle::query::Providers;
-use rustc_middle::ty::subst::GenericArgKind;
+use rustc_middle::ty::GenericArgKind;
use rustc_middle::ty::{self, CratePredicatesMap, ToPredicate, TyCtxt};
use rustc_span::symbol::sym;
use rustc_span::Span;
diff --git a/compiler/rustc_hir_analysis/src/outlives/utils.rs b/compiler/rustc_hir_analysis/src/outlives/utils.rs
index c5c5f63a1..a6410c944 100644
--- a/compiler/rustc_hir_analysis/src/outlives/utils.rs
+++ b/compiler/rustc_hir_analysis/src/outlives/utils.rs
@@ -1,6 +1,6 @@
use rustc_infer::infer::outlives::components::{push_outlives_components, Component};
-use rustc_middle::ty::subst::{GenericArg, GenericArgKind};
use rustc_middle::ty::{self, Region, Ty, TyCtxt};
+use rustc_middle::ty::{GenericArg, GenericArgKind};
use rustc_span::Span;
use smallvec::smallvec;
use std::collections::BTreeMap;
diff --git a/compiler/rustc_hir_analysis/src/structured_errors/wrong_number_of_generic_args.rs b/compiler/rustc_hir_analysis/src/structured_errors/wrong_number_of_generic_args.rs
index ee3457282..61b182b1b 100644
--- a/compiler/rustc_hir_analysis/src/structured_errors/wrong_number_of_generic_args.rs
+++ b/compiler/rustc_hir_analysis/src/structured_errors/wrong_number_of_generic_args.rs
@@ -360,9 +360,11 @@ impl<'a, 'tcx> WrongNumberOfGenericArgs<'a, 'tcx> {
&[]
};
ret.extend(params.iter().filter_map(|p| {
- let hir::GenericParamKind::Lifetime { kind: hir::LifetimeParamKind::Explicit }
- = p.kind
- else { return None };
+ let hir::GenericParamKind::Lifetime { kind: hir::LifetimeParamKind::Explicit } =
+ p.kind
+ else {
+ return None;
+ };
let hir::ParamName::Plain(name) = p.name else { return None };
Some(name.to_string())
}));
@@ -472,7 +474,7 @@ impl<'a, 'tcx> WrongNumberOfGenericArgs<'a, 'tcx> {
verb
)
} else {
- format!("missing generics for {} `{}`", def_kind, def_path)
+ format!("missing generics for {def_kind} `{def_path}`")
}
}
@@ -576,6 +578,9 @@ impl<'a, 'tcx> WrongNumberOfGenericArgs<'a, 'tcx> {
MissingTypesOrConsts { .. } => {
self.suggest_adding_type_and_const_args(err);
}
+ ExcessTypesOrConsts { .. } => {
+ // this can happen with `~const T` where T isn't a const_trait.
+ }
_ => unreachable!(),
}
}
@@ -597,7 +602,7 @@ impl<'a, 'tcx> WrongNumberOfGenericArgs<'a, 'tcx> {
let span = self.path_segment.ident.span;
// insert a suggestion of the form "Y<'a, 'b>"
- let sugg = format!("<{}>", suggested_args);
+ let sugg = format!("<{suggested_args}>");
debug!("sugg: {:?}", sugg);
err.span_suggestion_verbose(
@@ -622,7 +627,7 @@ impl<'a, 'tcx> WrongNumberOfGenericArgs<'a, 'tcx> {
let sugg_suffix =
if is_first && (has_non_lt_args || has_bindings) { ", " } else { "" };
- let sugg = format!("{}{}{}", sugg_prefix, suggested_args, sugg_suffix);
+ let sugg = format!("{sugg_prefix}{suggested_args}{sugg_suffix}");
debug!("sugg: {:?}", sugg);
err.span_suggestion_verbose(sugg_span, msg, sugg, Applicability::HasPlaceholders);
@@ -647,7 +652,7 @@ impl<'a, 'tcx> WrongNumberOfGenericArgs<'a, 'tcx> {
let span = self.path_segment.ident.span;
// insert a suggestion of the form "Y<T, U>"
- let sugg = format!("<{}>", suggested_args);
+ let sugg = format!("<{suggested_args}>");
debug!("sugg: {:?}", sugg);
err.span_suggestion_verbose(
@@ -680,7 +685,7 @@ impl<'a, 'tcx> WrongNumberOfGenericArgs<'a, 'tcx> {
let sugg_suffix =
if is_first && !self.gen_args.bindings.is_empty() { ", " } else { "" };
- let sugg = format!("{}{}{}", sugg_prefix, suggested_args, sugg_suffix);
+ let sugg = format!("{sugg_prefix}{suggested_args}{sugg_suffix}");
debug!("sugg: {:?}", sugg);
err.span_suggestion_verbose(sugg_span, msg, sugg, Applicability::HasPlaceholders);
@@ -793,29 +798,36 @@ impl<'a, 'tcx> WrongNumberOfGenericArgs<'a, 'tcx> {
num_trait_generics_except_self: usize,
) {
let sm = self.tcx.sess.source_map();
- let hir::ExprKind::MethodCall(_, rcvr, args, _) = expr.kind else { return; };
+ let hir::ExprKind::MethodCall(_, rcvr, args, _) = expr.kind else {
+ return;
+ };
if num_assoc_fn_excess_args != num_trait_generics_except_self {
return;
}
- let Some(gen_args) = self.gen_args.span_ext() else { return; };
- let Ok(generics) = sm.span_to_snippet(gen_args) else { return; };
- let Ok(rcvr) = sm.span_to_snippet(
- rcvr.span.find_ancestor_inside(expr.span).unwrap_or(rcvr.span)
- ) else { return; };
- let Ok(rest) =
- (match args {
- [] => Ok(String::new()),
- [arg] => sm.span_to_snippet(
- arg.span.find_ancestor_inside(expr.span).unwrap_or(arg.span),
- ),
- [first, .., last] => {
- let first_span =
- first.span.find_ancestor_inside(expr.span).unwrap_or(first.span);
- let last_span =
- last.span.find_ancestor_inside(expr.span).unwrap_or(last.span);
- sm.span_to_snippet(first_span.to(last_span))
- }
- }) else { return; };
+ let Some(gen_args) = self.gen_args.span_ext() else {
+ return;
+ };
+ let Ok(generics) = sm.span_to_snippet(gen_args) else {
+ return;
+ };
+ let Ok(rcvr) =
+ sm.span_to_snippet(rcvr.span.find_ancestor_inside(expr.span).unwrap_or(rcvr.span))
+ else {
+ return;
+ };
+ let Ok(rest) = (match args {
+ [] => Ok(String::new()),
+ [arg] => {
+ sm.span_to_snippet(arg.span.find_ancestor_inside(expr.span).unwrap_or(arg.span))
+ }
+ [first, .., last] => {
+ let first_span = first.span.find_ancestor_inside(expr.span).unwrap_or(first.span);
+ let last_span = last.span.find_ancestor_inside(expr.span).unwrap_or(last.span);
+ sm.span_to_snippet(first_span.to(last_span))
+ }
+ }) else {
+ return;
+ };
let comma = if args.len() > 0 { ", " } else { "" };
let trait_path = self.tcx.def_path_str(trait_def_id);
let method_name = self.tcx.item_name(self.def_id);
@@ -1015,7 +1027,7 @@ impl<'a, 'tcx> WrongNumberOfGenericArgs<'a, 'tcx> {
.collect::<Vec<_>>()
.join(", ");
- format!(": {}", params)
+ format!(": {params}")
};
format!(
diff --git a/compiler/rustc_hir_analysis/src/variance/constraints.rs b/compiler/rustc_hir_analysis/src/variance/constraints.rs
index 6f0afae1b..8a40509d7 100644
--- a/compiler/rustc_hir_analysis/src/variance/constraints.rs
+++ b/compiler/rustc_hir_analysis/src/variance/constraints.rs
@@ -6,8 +6,8 @@
use hir::def_id::{DefId, LocalDefId};
use rustc_hir as hir;
use rustc_hir::def::DefKind;
-use rustc_middle::ty::subst::{GenericArgKind, SubstsRef};
-use rustc_middle::ty::{self, Ty, TyCtxt};
+use rustc_middle::ty::{self, Ty, TyCtxt, TypeVisitableExt};
+use rustc_middle::ty::{GenericArgKind, GenericArgsRef};
use super::terms::VarianceTerm::*;
use super::terms::*;
@@ -78,6 +78,11 @@ pub fn add_constraints_from_crate<'a, 'tcx>(
}
}
DefKind::Fn | DefKind::AssocFn => constraint_cx.build_constraints_for_item(def_id),
+ DefKind::TyAlias { lazy }
+ if lazy || tcx.type_of(def_id).instantiate_identity().has_opaque_types() =>
+ {
+ constraint_cx.build_constraints_for_item(def_id)
+ }
_ => {}
}
}
@@ -101,7 +106,18 @@ impl<'a, 'tcx> ConstraintContext<'a, 'tcx> {
let inferred_start = self.terms_cx.inferred_starts[&def_id];
let current_item = &CurrentItem { inferred_start };
- match tcx.type_of(def_id).subst_identity().kind() {
+ let ty = tcx.type_of(def_id).instantiate_identity();
+
+ // The type as returned by `type_of` is the underlying type and generally not a weak projection.
+ // Therefore we need to check the `DefKind` first.
+ if let DefKind::TyAlias { lazy } = tcx.def_kind(def_id)
+ && (lazy || ty.has_opaque_types())
+ {
+ self.add_constraints_from_ty(current_item, ty, self.covariant);
+ return;
+ }
+
+ match ty.kind() {
ty::Adt(def, _) => {
// Not entirely obvious: constraints on structs/enums do not
// affect the variance of their type parameters. See discussion
@@ -112,7 +128,7 @@ impl<'a, 'tcx> ConstraintContext<'a, 'tcx> {
for field in def.all_fields() {
self.add_constraints_from_ty(
current_item,
- tcx.type_of(field.did).subst_identity(),
+ tcx.type_of(field.did).instantiate_identity(),
self.covariant,
);
}
@@ -121,12 +137,13 @@ impl<'a, 'tcx> ConstraintContext<'a, 'tcx> {
ty::FnDef(..) => {
self.add_constraints_from_sig(
current_item,
- tcx.fn_sig(def_id).subst_identity(),
+ tcx.fn_sig(def_id).instantiate_identity(),
self.covariant,
);
}
ty::Error(_) => {}
+
_ => {
span_bug!(
tcx.def_span(def_id),
@@ -175,16 +192,16 @@ impl<'a, 'tcx> ConstraintContext<'a, 'tcx> {
}
#[instrument(level = "debug", skip(self, current))]
- fn add_constraints_from_invariant_substs(
+ fn add_constraints_from_invariant_args(
&mut self,
current: &CurrentItem,
- substs: SubstsRef<'tcx>,
+ args: GenericArgsRef<'tcx>,
variance: VarianceTermPtr<'a>,
) {
// Trait are always invariant so we can take advantage of that.
let variance_i = self.invariant(variance);
- for k in substs {
+ for k in args {
match k.unpack() {
GenericArgKind::Lifetime(lt) => {
self.add_constraints_from_region(current, lt, variance_i)
@@ -248,12 +265,16 @@ impl<'a, 'tcx> ConstraintContext<'a, 'tcx> {
}
}
- ty::Adt(def, substs) => {
- self.add_constraints_from_substs(current, def.did(), substs, variance);
+ ty::Adt(def, args) => {
+ self.add_constraints_from_args(current, def.did(), args, variance);
+ }
+
+ ty::Alias(ty::Projection | ty::Inherent | ty::Opaque, ref data) => {
+ self.add_constraints_from_invariant_args(current, data.args, variance);
}
- ty::Alias(_, ref data) => {
- self.add_constraints_from_invariant_substs(current, data.substs, variance);
+ ty::Alias(ty::Weak, ref data) => {
+ self.add_constraints_from_args(current, data.def_id, data.args, variance);
}
ty::Dynamic(data, r, _) => {
@@ -261,9 +282,9 @@ impl<'a, 'tcx> ConstraintContext<'a, 'tcx> {
self.add_constraints_from_region(current, r, variance);
if let Some(poly_trait_ref) = data.principal() {
- self.add_constraints_from_invariant_substs(
+ self.add_constraints_from_invariant_args(
current,
- poly_trait_ref.skip_binder().substs,
+ poly_trait_ref.skip_binder().args,
variance,
);
}
@@ -305,20 +326,20 @@ impl<'a, 'tcx> ConstraintContext<'a, 'tcx> {
/// Adds constraints appropriate for a nominal type (enum, struct,
/// object, etc) appearing in a context with ambient variance `variance`
- fn add_constraints_from_substs(
+ fn add_constraints_from_args(
&mut self,
current: &CurrentItem,
def_id: DefId,
- substs: SubstsRef<'tcx>,
+ args: GenericArgsRef<'tcx>,
variance: VarianceTermPtr<'a>,
) {
debug!(
- "add_constraints_from_substs(def_id={:?}, substs={:?}, variance={:?})",
- def_id, substs, variance
+ "add_constraints_from_args(def_id={:?}, args={:?}, variance={:?})",
+ def_id, args, variance
);
// We don't record `inferred_starts` entries for empty generics.
- if substs.is_empty() {
+ if args.is_empty() {
return;
}
@@ -328,7 +349,7 @@ impl<'a, 'tcx> ConstraintContext<'a, 'tcx> {
(None, Some(self.tcx().variances_of(def_id)))
};
- for (i, k) in substs.iter().enumerate() {
+ for (i, k) in args.iter().enumerate() {
let variance_decl = if let Some(InferredIndex(start)) = local {
// Parameter on an item defined within current crate:
// variance not yet inferred, so return a symbolic
@@ -341,7 +362,7 @@ impl<'a, 'tcx> ConstraintContext<'a, 'tcx> {
};
let variance_i = self.xform(variance, variance_decl);
debug!(
- "add_constraints_from_substs: variance_decl={:?} variance_i={:?}",
+ "add_constraints_from_args: variance_decl={:?} variance_i={:?}",
variance_decl, variance_i
);
match k.unpack() {
@@ -368,7 +389,7 @@ impl<'a, 'tcx> ConstraintContext<'a, 'tcx> {
match &c.kind() {
ty::ConstKind::Unevaluated(uv) => {
- self.add_constraints_from_invariant_substs(current, uv.substs, variance);
+ self.add_constraints_from_invariant_args(current, uv.args, variance);
}
_ => {}
}
diff --git a/compiler/rustc_hir_analysis/src/variance/mod.rs b/compiler/rustc_hir_analysis/src/variance/mod.rs
index 066e74491..d91d9fcbc 100644
--- a/compiler/rustc_hir_analysis/src/variance/mod.rs
+++ b/compiler/rustc_hir_analysis/src/variance/mod.rs
@@ -7,8 +7,8 @@ use rustc_arena::DroplessArena;
use rustc_hir::def::DefKind;
use rustc_hir::def_id::{DefId, LocalDefId};
use rustc_middle::query::Providers;
-use rustc_middle::ty::{self, CrateVariancesMap, SubstsRef, Ty, TyCtxt};
-use rustc_middle::ty::{TypeSuperVisitable, TypeVisitable};
+use rustc_middle::ty::{self, CrateVariancesMap, GenericArgsRef, Ty, TyCtxt};
+use rustc_middle::ty::{TypeSuperVisitable, TypeVisitable, TypeVisitableExt};
use std::ops::ControlFlow;
/// Defines the `TermsContext` basically houses an arena where we can
@@ -56,7 +56,14 @@ fn variances_of(tcx: TyCtxt<'_>, item_def_id: LocalDefId) -> &[ty::Variance] {
let crate_map = tcx.crate_variances(());
return crate_map.variances.get(&item_def_id.to_def_id()).copied().unwrap_or(&[]);
}
- DefKind::OpaqueTy | DefKind::ImplTraitPlaceholder => {
+ DefKind::TyAlias { lazy }
+ if lazy || tcx.type_of(item_def_id).instantiate_identity().has_opaque_types() =>
+ {
+ // These are inferred.
+ let crate_map = tcx.crate_variances(());
+ return crate_map.variances.get(&item_def_id.to_def_id()).copied().unwrap_or(&[]);
+ }
+ DefKind::OpaqueTy => {
return variance_of_opaque(tcx, item_def_id);
}
_ => {}
@@ -83,17 +90,17 @@ fn variance_of_opaque(tcx: TyCtxt<'_>, item_def_id: LocalDefId) -> &[ty::Varianc
impl<'tcx> OpaqueTypeLifetimeCollector<'tcx> {
#[instrument(level = "trace", skip(self), ret)]
- fn visit_opaque(&mut self, def_id: DefId, substs: SubstsRef<'tcx>) -> ControlFlow<!> {
+ fn visit_opaque(&mut self, def_id: DefId, args: GenericArgsRef<'tcx>) -> ControlFlow<!> {
if def_id != self.root_def_id && self.tcx.is_descendant_of(def_id, self.root_def_id) {
let child_variances = self.tcx.variances_of(def_id);
- for (a, v) in substs.iter().zip(child_variances) {
+ for (a, v) in args.iter().zip(child_variances) {
if *v != ty::Bivariant {
a.visit_with(self)?;
}
}
ControlFlow::Continue(())
} else {
- substs.visit_with(self)
+ args.visit_with(self)
}
}
}
@@ -110,18 +117,10 @@ fn variance_of_opaque(tcx: TyCtxt<'_>, item_def_id: LocalDefId) -> &[ty::Varianc
#[instrument(level = "trace", skip(self), ret)]
fn visit_ty(&mut self, t: Ty<'tcx>) -> ControlFlow<Self::BreakTy> {
match t.kind() {
- ty::Alias(_, ty::AliasTy { def_id, substs, .. })
+ ty::Alias(_, ty::AliasTy { def_id, args, .. })
if matches!(self.tcx.def_kind(*def_id), DefKind::OpaqueTy) =>
{
- self.visit_opaque(*def_id, substs)
- }
- // FIXME(-Zlower-impl-trait-in-trait-to-assoc-ty) check whether this is necessary
- // at all for RPITITs.
- ty::Alias(_, ty::AliasTy { def_id, substs, .. })
- if self.tcx.is_impl_trait_in_trait(*def_id)
- && !self.tcx.lower_impl_trait_in_trait_to_assoc_ty() =>
- {
- self.visit_opaque(*def_id, substs)
+ self.visit_opaque(*def_id, args)
}
_ => t.super_visit_with(self),
}
@@ -152,30 +151,29 @@ fn variance_of_opaque(tcx: TyCtxt<'_>, item_def_id: LocalDefId) -> &[ty::Varianc
let mut collector =
OpaqueTypeLifetimeCollector { tcx, root_def_id: item_def_id.to_def_id(), variances };
- let id_substs = ty::InternalSubsts::identity_for_item(tcx, item_def_id);
- for (pred, _) in tcx.explicit_item_bounds(item_def_id).subst_iter_copied(tcx, id_substs) {
+ let id_args = ty::GenericArgs::identity_for_item(tcx, item_def_id);
+ for (pred, _) in tcx.explicit_item_bounds(item_def_id).iter_instantiated_copied(tcx, id_args) {
debug!(?pred);
- // We only ignore opaque type substs if the opaque type is the outermost type.
+ // We only ignore opaque type args if the opaque type is the outermost type.
// The opaque type may be nested within itself via recursion in e.g.
// type Foo<'a> = impl PartialEq<Foo<'a>>;
// which thus mentions `'a` and should thus accept hidden types that borrow 'a
// instead of requiring an additional `+ 'a`.
match pred.kind().skip_binder() {
ty::ClauseKind::Trait(ty::TraitPredicate {
- trait_ref: ty::TraitRef { def_id: _, substs, .. },
- constness: _,
+ trait_ref: ty::TraitRef { def_id: _, args, .. },
polarity: _,
}) => {
- for subst in &substs[1..] {
+ for subst in &args[1..] {
subst.visit_with(&mut collector);
}
}
ty::ClauseKind::Projection(ty::ProjectionPredicate {
- projection_ty: ty::AliasTy { substs, .. },
+ projection_ty: ty::AliasTy { args, .. },
term,
}) => {
- for subst in &substs[1..] {
+ for subst in &args[1..] {
subst.visit_with(&mut collector);
}
term.visit_with(&mut collector);
diff --git a/compiler/rustc_hir_analysis/src/variance/solve.rs b/compiler/rustc_hir_analysis/src/variance/solve.rs
index c27c176e3..54da32770 100644
--- a/compiler/rustc_hir_analysis/src/variance/solve.rs
+++ b/compiler/rustc_hir_analysis/src/variance/solve.rs
@@ -103,7 +103,7 @@ impl<'a, 'tcx> SolveContext<'a, 'tcx> {
self.enforce_const_invariance(generics, variances);
// Functions are permitted to have unused generic parameters: make those invariant.
- if let ty::FnDef(..) = tcx.type_of(def_id).subst_identity().kind() {
+ if let ty::FnDef(..) = tcx.type_of(def_id).instantiate_identity().kind() {
for variance in variances.iter_mut() {
if *variance == ty::Bivariant {
*variance = ty::Invariant;
diff --git a/compiler/rustc_hir_analysis/src/variance/terms.rs b/compiler/rustc_hir_analysis/src/variance/terms.rs
index 3b286bb9c..1a8ec5f08 100644
--- a/compiler/rustc_hir_analysis/src/variance/terms.rs
+++ b/compiler/rustc_hir_analysis/src/variance/terms.rs
@@ -12,7 +12,7 @@
use rustc_arena::DroplessArena;
use rustc_hir::def::DefKind;
use rustc_hir::def_id::{LocalDefId, LocalDefIdMap};
-use rustc_middle::ty::{self, TyCtxt};
+use rustc_middle::ty::{self, TyCtxt, TypeVisitableExt};
use std::fmt;
use self::VarianceTerm::*;
@@ -32,8 +32,8 @@ pub enum VarianceTerm<'a> {
impl<'a> fmt::Debug for VarianceTerm<'a> {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
match *self {
- ConstantTerm(c1) => write!(f, "{:?}", c1),
- TransformTerm(v1, v2) => write!(f, "({:?} \u{00D7} {:?})", v1, v2),
+ ConstantTerm(c1) => write!(f, "{c1:?}"),
+ TransformTerm(v1, v2) => write!(f, "({v1:?} \u{00D7} {v2:?})"),
InferredTerm(id) => write!(f, "[{}]", {
let InferredIndex(i) = id;
i
@@ -97,6 +97,11 @@ pub fn determine_parameters_to_be_inferred<'a, 'tcx>(
}
}
DefKind::Fn | DefKind::AssocFn => terms_cx.add_inferreds_for_item(def_id),
+ DefKind::TyAlias { lazy }
+ if lazy || tcx.type_of(def_id).instantiate_identity().has_opaque_types() =>
+ {
+ terms_cx.add_inferreds_for_item(def_id)
+ }
_ => {}
}
}
diff --git a/compiler/rustc_hir_pretty/src/lib.rs b/compiler/rustc_hir_pretty/src/lib.rs
index a699cd6c9..89efdc269 100644
--- a/compiler/rustc_hir_pretty/src/lib.rs
+++ b/compiler/rustc_hir_pretty/src/lib.rs
@@ -420,12 +420,13 @@ impl<'a> State<'a> {
fn print_associated_const(
&mut self,
ident: Ident,
+ generics: &hir::Generics<'_>,
ty: &hir::Ty<'_>,
default: Option<hir::BodyId>,
) {
- self.head("");
self.word_space("const");
self.print_ident(ident);
+ self.print_generic_params(generics.params);
self.word_space(":");
self.print_type(ty);
if let Some(expr) = default {
@@ -433,6 +434,7 @@ impl<'a> State<'a> {
self.word_space("=");
self.ann.nested(self, Nested::Body(expr));
}
+ self.print_where_clause(generics);
self.word(";")
}
@@ -532,9 +534,10 @@ impl<'a> State<'a> {
self.word(";");
self.end(); // end the outer cbox
}
- hir::ItemKind::Const(ty, expr) => {
+ hir::ItemKind::Const(ty, generics, expr) => {
self.head("const");
self.print_ident(item.ident);
+ self.print_generic_params(generics.params);
self.word_space(":");
self.print_type(ty);
self.space();
@@ -542,6 +545,7 @@ impl<'a> State<'a> {
self.word_space("=");
self.ann.nested(self, Nested::Body(expr));
+ self.print_where_clause(generics);
self.word(";");
self.end(); // end the outer cbox
}
@@ -622,7 +626,6 @@ impl<'a> State<'a> {
unsafety,
polarity,
defaultness,
- constness,
defaultness_span: _,
generics,
ref of_trait,
@@ -639,10 +642,6 @@ impl<'a> State<'a> {
self.space();
}
- if constness == hir::Constness::Const {
- self.word_nbsp("const");
- }
-
if let hir::ImplPolarity::Negative(_) = polarity {
self.word("!");
}
@@ -836,7 +835,7 @@ impl<'a> State<'a> {
self.print_outer_attributes(self.attrs(ti.hir_id()));
match ti.kind {
hir::TraitItemKind::Const(ty, default) => {
- self.print_associated_const(ti.ident, ty, default);
+ self.print_associated_const(ti.ident, ti.generics, ty, default);
}
hir::TraitItemKind::Fn(ref sig, hir::TraitFn::Required(arg_names)) => {
self.print_method_sig(ti.ident, sig, ti.generics, arg_names, None);
@@ -865,7 +864,7 @@ impl<'a> State<'a> {
match ii.kind {
hir::ImplItemKind::Const(ty, expr) => {
- self.print_associated_const(ii.ident, ty, Some(expr));
+ self.print_associated_const(ii.ident, ii.generics, ty, Some(expr));
}
hir::ImplItemKind::Fn(ref sig, body) => {
self.head("");
@@ -1522,7 +1521,7 @@ impl<'a> State<'a> {
self.word(".");
self.print_ident(ident);
}
- hir::ExprKind::Index(expr, index) => {
+ hir::ExprKind::Index(expr, index, _) => {
self.print_expr_maybe_paren(expr, parser::PREC_POSTFIX);
self.word("[");
self.print_expr(index);
@@ -2415,7 +2414,7 @@ fn contains_exterior_struct_lit(value: &hir::Expr<'_>) -> bool {
| hir::ExprKind::Cast(x, _)
| hir::ExprKind::Type(x, _)
| hir::ExprKind::Field(x, _)
- | hir::ExprKind::Index(x, _) => {
+ | hir::ExprKind::Index(x, _, _) => {
// `&X { y: 1 }, X { y: 1 }.y`
contains_exterior_struct_lit(x)
}
diff --git a/compiler/rustc_hir_typeck/Cargo.toml b/compiler/rustc_hir_typeck/Cargo.toml
index 13e1ea31c..ce91d023a 100644
--- a/compiler/rustc_hir_typeck/Cargo.toml
+++ b/compiler/rustc_hir_typeck/Cargo.toml
@@ -9,6 +9,7 @@ edition = "2021"
smallvec = { version = "1.8.1", features = ["union", "may_dangle"] }
tracing = "0.1"
rustc_ast = { path = "../rustc_ast" }
+rustc_attr = { path = "../rustc_attr" }
rustc_data_structures = { path = "../rustc_data_structures" }
rustc_errors = { path = "../rustc_errors" }
rustc_graphviz = { path = "../rustc_graphviz" }
diff --git a/compiler/rustc_hir_typeck/messages.ftl b/compiler/rustc_hir_typeck/messages.ftl
index 3d012a15a..2281343e2 100644
--- a/compiler/rustc_hir_typeck/messages.ftl
+++ b/compiler/rustc_hir_typeck/messages.ftl
@@ -77,6 +77,10 @@ hir_typeck_note_edition_guide = for more on editions, read https://doc.rust-lang
hir_typeck_op_trait_generic_params = `{$method_name}` must not have any generic parameters
+hir_typeck_option_result_asref = use `{$def_path}::as_ref` to convert `{$expected_ty}` to `{$expr_ty}`
+hir_typeck_option_result_cloned = use `{$def_path}::cloned` to clone the value inside the `{$def_path}`
+hir_typeck_option_result_copied = use `{$def_path}::copied` to copy the value inside the `{$def_path}`
+
hir_typeck_return_stmt_outside_of_fn_body =
{$statement_kind} statement outside of function body
.encl_body_label = the {$statement_kind} is part of this body...
diff --git a/compiler/rustc_hir_typeck/src/_match.rs b/compiler/rustc_hir_typeck/src/_match.rs
index e8720a5da..7ad9f51ba 100644
--- a/compiler/rustc_hir_typeck/src/_match.rs
+++ b/compiler/rustc_hir_typeck/src/_match.rs
@@ -41,7 +41,7 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> {
// #55810: Type check patterns first so we get types for all bindings.
let scrut_span = scrut.span.find_ancestor_inside(expr.span).unwrap_or(scrut.span);
for arm in arms {
- self.check_pat_top(&arm.pat, scrutinee_ty, Some(scrut_span), Some(scrut));
+ self.check_pat_top(&arm.pat, scrutinee_ty, Some(scrut_span), Some(scrut), None);
}
// Now typecheck the blocks.
@@ -107,7 +107,9 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> {
let (span, code) = match prior_arm {
// The reason for the first arm to fail is not that the match arms diverge,
// but rather that there's a prior obligation that doesn't hold.
- None => (arm_span, ObligationCauseCode::BlockTailExpression(arm.body.hir_id)),
+ None => {
+ (arm_span, ObligationCauseCode::BlockTailExpression(arm.body.hir_id, match_src))
+ }
Some((prior_arm_block_id, prior_arm_ty, prior_arm_span)) => (
expr.span,
ObligationCauseCode::MatchExpressionArm(Box::new(MatchExpressionArmCause {
@@ -120,7 +122,6 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> {
scrut_span: scrut.span,
source: match_src,
prior_arms: other_arms.clone(),
- scrut_hir_id: scrut.hir_id,
opt_suggest_box_span,
})),
),
@@ -136,15 +137,7 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> {
&cause,
Some(&arm.body),
arm_ty,
- Some(&mut |err| {
- self.suggest_removing_semicolon_for_coerce(
- err,
- expr,
- orig_expected,
- arm_ty,
- prior_arm,
- )
- }),
+ |err| self.suggest_removing_semicolon_for_coerce(err, expr, arm_ty, prior_arm),
false,
);
@@ -153,7 +146,12 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> {
other_arms.remove(0);
}
- prior_arm = Some((arm_block_id, arm_ty, arm_span));
+ if !arm_ty.is_never() {
+ // When a match arm has type `!`, then it doesn't influence the expected type for
+ // the following arm. If all of the prior arms are `!`, then the influence comes
+ // from elsewhere and we shouldn't point to any previous arm.
+ prior_arm = Some((arm_block_id, arm_ty, arm_span));
+ }
}
// If all of the arms in the `match` diverge,
@@ -181,18 +179,24 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> {
&self,
diag: &mut Diagnostic,
expr: &hir::Expr<'tcx>,
- expectation: Expectation<'tcx>,
arm_ty: Ty<'tcx>,
prior_arm: Option<(Option<hir::HirId>, Ty<'tcx>, Span)>,
) {
let hir = self.tcx.hir();
// First, check that we're actually in the tail of a function.
- let Some(body_id) = hir.maybe_body_owned_by(self.body_id) else { return; };
+ let Some(body_id) = hir.maybe_body_owned_by(self.body_id) else {
+ return;
+ };
let body = hir.body(body_id);
- let hir::ExprKind::Block(block, _) = body.value.kind else { return; };
- let Some(hir::Stmt { kind: hir::StmtKind::Semi(last_expr), .. })
- = block.innermost_block().stmts.last() else { return; };
+ let hir::ExprKind::Block(block, _) = body.value.kind else {
+ return;
+ };
+ let Some(hir::Stmt { kind: hir::StmtKind::Semi(last_expr), span: semi_span, .. }) =
+ block.innermost_block().stmts.last()
+ else {
+ return;
+ };
if last_expr.hir_id != expr.hir_id {
return;
}
@@ -201,8 +205,8 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> {
let Some(ret) = hir
.find_by_def_id(self.body_id)
.and_then(|owner| owner.fn_decl())
- .map(|decl| decl.output.span()) else { return; };
- let Expectation::IsLast(stmt) = expectation else {
+ .map(|decl| decl.output.span())
+ else {
return;
};
@@ -221,7 +225,7 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> {
return;
}
- let semi_span = expr.span.shrink_to_hi().with_hi(stmt.hi());
+ let semi_span = expr.span.shrink_to_hi().with_hi(semi_span.hi());
let mut ret_span: MultiSpan = semi_span.into();
ret_span.push_span_label(
expr.span,
@@ -269,7 +273,7 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> {
coercion.coerce_forced_unit(
self,
&cause,
- &mut |err| {
+ |err| {
if let Some((span, msg)) = &ret_reason {
err.span_label(*span, msg.clone());
} else if let ExprKind::Block(block, _) = &then_expr.kind
@@ -508,9 +512,14 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> {
span,
kind: TypeVariableOriginKind::OpaqueTypeInference(rpit_def_id),
..
- } = self.type_var_origin(expected)? else { return None; };
+ } = self.type_var_origin(expected)?
+ else {
+ return None;
+ };
- let Some(rpit_local_def_id) = rpit_def_id.as_local() else { return None; };
+ let Some(rpit_local_def_id) = rpit_def_id.as_local() else {
+ return None;
+ };
if !matches!(
self.tcx.hir().expect_item(rpit_local_def_id).expect_opaque_ty().origin,
hir::OpaqueTyOrigin::FnReturn(..)
@@ -520,12 +529,12 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> {
let sig = self.body_fn_sig()?;
- let substs = sig.output().walk().find_map(|arg| {
+ let args = sig.output().walk().find_map(|arg| {
if let ty::GenericArgKind::Type(ty) = arg.unpack()
- && let ty::Alias(ty::Opaque, ty::AliasTy { def_id, substs, .. }) = *ty.kind()
+ && let ty::Alias(ty::Opaque, ty::AliasTy { def_id, args, .. }) = *ty.kind()
&& def_id == rpit_def_id
{
- Some(substs)
+ Some(args)
} else {
None
}
@@ -539,23 +548,22 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> {
for (clause, _) in self
.tcx
.explicit_item_bounds(rpit_def_id)
- .subst_iter_copied(self.tcx, substs)
+ .iter_instantiated_copied(self.tcx, args)
{
let pred = clause.kind().rebind(match clause.kind().skip_binder() {
ty::ClauseKind::Trait(trait_pred) => {
- // FIXME(rpitit): This will need to be fixed when we move to associated types
assert!(matches!(
*trait_pred.trait_ref.self_ty().kind(),
- ty::Alias(_, ty::AliasTy { def_id, substs: alias_substs, .. })
- if def_id == rpit_def_id && substs == alias_substs
+ ty::Alias(ty::Opaque, ty::AliasTy { def_id, args: alias_args, .. })
+ if def_id == rpit_def_id && args == alias_args
));
ty::ClauseKind::Trait(trait_pred.with_self_ty(self.tcx, ty))
}
ty::ClauseKind::Projection(mut proj_pred) => {
assert!(matches!(
*proj_pred.projection_ty.self_ty().kind(),
- ty::Alias(_, ty::AliasTy { def_id, substs: alias_substs, .. })
- if def_id == rpit_def_id && substs == alias_substs
+ ty::Alias(ty::Opaque, ty::AliasTy { def_id, args: alias_args, .. })
+ if def_id == rpit_def_id && args == alias_args
));
proj_pred = proj_pred.with_self_ty(self.tcx, ty);
ty::ClauseKind::Projection(proj_pred)
diff --git a/compiler/rustc_hir_typeck/src/callee.rs b/compiler/rustc_hir_typeck/src/callee.rs
index f306653c1..02371f85a 100644
--- a/compiler/rustc_hir_typeck/src/callee.rs
+++ b/compiler/rustc_hir_typeck/src/callee.rs
@@ -21,7 +21,7 @@ use rustc_infer::{
use rustc_middle::ty::adjustment::{
Adjust, Adjustment, AllowTwoPhase, AutoBorrow, AutoBorrowMutability,
};
-use rustc_middle::ty::SubstsRef;
+use rustc_middle::ty::GenericArgsRef;
use rustc_middle::ty::{self, Ty, TyCtxt, TypeVisitableExt};
use rustc_span::def_id::LocalDefId;
use rustc_span::symbol::{sym, Ident};
@@ -149,14 +149,14 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> {
return Some(CallStep::Builtin(adjusted_ty));
}
- ty::Closure(def_id, substs) => {
+ ty::Closure(def_id, args) => {
let def_id = def_id.expect_local();
// Check whether this is a call to a closure where we
// haven't yet decided on whether the closure is fn vs
// fnmut vs fnonce. If so, we have to defer further processing.
- if self.closure_kind(substs).is_none() {
- let closure_sig = substs.as_closure().sig();
+ if self.closure_kind(args).is_none() {
+ let closure_sig = args.as_closure().sig();
let closure_sig = self.instantiate_binder_with_fresh_vars(
call_expr.span,
infer::FnCall,
@@ -171,7 +171,7 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> {
adjusted_ty,
adjustments,
fn_sig: closure_sig,
- closure_substs: substs,
+ closure_args: args,
},
);
return Some(CallStep::DeferredClosure(def_id, closure_sig));
@@ -380,16 +380,16 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> {
expected: Expectation<'tcx>,
) -> Ty<'tcx> {
let (fn_sig, def_id) = match *callee_ty.kind() {
- ty::FnDef(def_id, substs) => {
- self.enforce_context_effects(call_expr.hir_id, call_expr.span, def_id, substs);
- let fn_sig = self.tcx.fn_sig(def_id).subst(self.tcx, substs);
+ ty::FnDef(def_id, args) => {
+ self.enforce_context_effects(call_expr.hir_id, call_expr.span, def_id, args);
+ let fn_sig = self.tcx.fn_sig(def_id).instantiate(self.tcx, args);
// Unit testing: function items annotated with
// `#[rustc_evaluate_where_clauses]` trigger special output
// to let us test the trait evaluation system.
if self.tcx.has_attr(def_id, sym::rustc_evaluate_where_clauses) {
let predicates = self.tcx.predicates_of(def_id);
- let predicates = predicates.instantiate(self.tcx, substs);
+ let predicates = predicates.instantiate(self.tcx, args);
for (predicate, predicate_span) in predicates {
let obligation = Obligation::new(
self.tcx,
@@ -402,7 +402,7 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> {
.sess
.struct_span_err(
callee_expr.span,
- format!("evaluate({:?}) = {:?}", predicate, result),
+ format!("evaluate({predicate:?}) = {result:?}"),
)
.span_label(predicate_span, "predicate")
.emit();
@@ -499,15 +499,15 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> {
expected: Expectation<'tcx>,
) {
if let [callee_expr, rest @ ..] = arg_exprs {
- let Some(callee_ty) = self.typeck_results.borrow().expr_ty_adjusted_opt(callee_expr) else {
+ let Some(callee_ty) = self.typeck_results.borrow().expr_ty_adjusted_opt(callee_expr)
+ else {
return;
};
// First, do a probe with `IsSuggestion(true)` to avoid emitting
// any strange errors. If it's successful, then we'll do a true
// method lookup.
- let Ok(pick) = self
- .lookup_probe_for_diagnostic(
+ let Ok(pick) = self.lookup_probe_for_diagnostic(
segment.ident,
callee_ty,
call_expr,
@@ -531,8 +531,12 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> {
return;
}
- let up_to_rcvr_span = segment.ident.span.until(callee_expr.span);
- let rest_span = callee_expr.span.shrink_to_hi().to(call_expr.span.shrink_to_hi());
+ let Some(callee_expr_span) = callee_expr.span.find_ancestor_inside(call_expr.span)
+ else {
+ return;
+ };
+ let up_to_rcvr_span = segment.ident.span.until(callee_expr_span);
+ let rest_span = callee_expr_span.shrink_to_hi().to(call_expr.span.shrink_to_hi());
let rest_snippet = if let Some(first) = rest.first() {
self.tcx
.sess
@@ -581,12 +585,21 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> {
callee_ty: Ty<'tcx>,
arg_exprs: &'tcx [hir::Expr<'tcx>],
) -> ErrorGuaranteed {
+ // Callee probe fails when APIT references errors, so suppress those
+ // errors here.
+ if let Some((_, _, args)) = self.extract_callable_info(callee_ty)
+ && let Err(err) = args.error_reported()
+ {
+ return err;
+ }
+
let mut unit_variant = None;
if let hir::ExprKind::Path(qpath) = &callee_expr.kind
&& let Res::Def(def::DefKind::Ctor(kind, CtorKind::Const), _)
= self.typeck_results.borrow().qpath_res(qpath, callee_expr.hir_id)
// Only suggest removing parens if there are no arguments
&& arg_exprs.is_empty()
+ && call_expr.span.contains(callee_expr.span)
{
let descr = match kind {
def::CtorOf::Struct => "struct",
@@ -633,18 +646,6 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> {
self.typeck_results.borrow().qpath_res(qpath, callee_expr.hir_id)
}
hir::ExprKind::Call(ref inner_callee, _) => {
- // If the call spans more than one line and the callee kind is
- // itself another `ExprCall`, that's a clue that we might just be
- // missing a semicolon (Issue #51055)
- let call_is_multiline = self.tcx.sess.source_map().is_multiline(call_expr.span);
- if call_is_multiline {
- err.span_suggestion(
- callee_expr.span.shrink_to_hi(),
- "consider using a semicolon here",
- ";",
- Applicability::MaybeIncorrect,
- );
- }
if let hir::ExprKind::Path(ref inner_qpath) = inner_callee.kind {
inner_callee_path = Some(inner_qpath);
self.typeck_results.borrow().qpath_res(inner_qpath, inner_callee.hir_id)
@@ -656,6 +657,23 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> {
};
if !self.maybe_suggest_bad_array_definition(&mut err, call_expr, callee_expr) {
+ // If the call spans more than one line and the callee kind is
+ // itself another `ExprCall`, that's a clue that we might just be
+ // missing a semicolon (#51055, #106515).
+ let call_is_multiline = self
+ .tcx
+ .sess
+ .source_map()
+ .is_multiline(call_expr.span.with_lo(callee_expr.span.hi()))
+ && call_expr.span.ctxt() == callee_expr.span.ctxt();
+ if call_is_multiline {
+ err.span_suggestion(
+ callee_expr.span.shrink_to_hi(),
+ "consider using a semicolon here to finish the statement",
+ ";",
+ Applicability::MaybeIncorrect,
+ );
+ }
if let Some((maybe_def, output_ty, _)) = self.extract_callable_info(callee_ty)
&& !self.type_is_sized_modulo_regions(self.param_env, output_ty)
{
@@ -751,13 +769,19 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> {
call_expr_hir: HirId,
span: Span,
callee_did: DefId,
- callee_substs: SubstsRef<'tcx>,
+ callee_args: GenericArgsRef<'tcx>,
) {
let tcx = self.tcx;
- if !tcx.features().effects || tcx.sess.opts.unstable_opts.unleash_the_miri_inside_of_you {
- return;
- }
+ // fast-reject if callee doesn't have the host effect param (non-const)
+ let generics = tcx.generics_of(callee_did);
+ let Some(host_effect_index) = generics.host_effect_index else { return };
+
+ // if the callee does have the param, we need to equate the param to some const
+ // value no matter whether the effects feature is enabled in the local crate,
+ // because inference will fail if we don't.
+ let mut host_always_on =
+ !tcx.features().effects || tcx.sess.opts.unstable_opts.unleash_the_miri_inside_of_you;
// Compute the constness required by the context.
let context = tcx.hir().enclosing_body_owner(call_expr_hir);
@@ -768,33 +792,30 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> {
if tcx.has_attr(context.to_def_id(), sym::rustc_do_not_const_check) {
trace!("do not const check this context");
- return;
+ host_always_on = true;
}
let effect = match const_context {
+ _ if host_always_on => tcx.consts.true_,
Some(hir::ConstContext::Static(_) | hir::ConstContext::Const) => tcx.consts.false_,
Some(hir::ConstContext::ConstFn) => {
- let substs = ty::InternalSubsts::identity_for_item(tcx, context);
- substs.host_effect_param().expect("ConstContext::Maybe must have host effect param")
+ let args = ty::GenericArgs::identity_for_item(tcx, context);
+ args.host_effect_param().expect("ConstContext::Maybe must have host effect param")
}
None => tcx.consts.true_,
};
- let generics = tcx.generics_of(callee_did);
+ trace!(?effect, ?generics, ?callee_args);
- trace!(?effect, ?generics, ?callee_substs);
-
- if let Some(idx) = generics.host_effect_index {
- let param = callee_substs.const_at(idx);
- let cause = self.misc(span);
- match self.at(&cause, self.param_env).eq(infer::DefineOpaqueTypes::No, effect, param) {
- Ok(infer::InferOk { obligations, value: () }) => {
- self.register_predicates(obligations);
- }
- Err(e) => {
- // FIXME(effects): better diagnostic
- self.err_ctxt().report_mismatched_consts(&cause, effect, param, e).emit();
- }
+ let param = callee_args.const_at(host_effect_index);
+ let cause = self.misc(span);
+ match self.at(&cause, self.param_env).eq(infer::DefineOpaqueTypes::No, effect, param) {
+ Ok(infer::InferOk { obligations, value: () }) => {
+ self.register_predicates(obligations);
+ }
+ Err(e) => {
+ // FIXME(effects): better diagnostic
+ self.err_ctxt().report_mismatched_consts(&cause, effect, param, e).emit();
}
}
}
@@ -827,7 +848,7 @@ pub struct DeferredCallResolution<'tcx> {
adjusted_ty: Ty<'tcx>,
adjustments: Vec<Adjustment<'tcx>>,
fn_sig: ty::FnSig<'tcx>,
- closure_substs: SubstsRef<'tcx>,
+ closure_args: GenericArgsRef<'tcx>,
}
impl<'a, 'tcx> DeferredCallResolution<'tcx> {
@@ -836,7 +857,7 @@ impl<'a, 'tcx> DeferredCallResolution<'tcx> {
// we should not be invoked until the closure kind has been
// determined by upvar inference
- assert!(fcx.closure_kind(self.closure_substs).is_some());
+ assert!(fcx.closure_kind(self.closure_args).is_some());
// We may now know enough to figure out fn vs fnmut etc.
match fcx.try_overloaded_call_traits(self.call_expr, self.adjusted_ty, None) {
diff --git a/compiler/rustc_hir_typeck/src/cast.rs b/compiler/rustc_hir_typeck/src/cast.rs
index 633933317..31a03fabe 100644
--- a/compiler/rustc_hir_typeck/src/cast.rs
+++ b/compiler/rustc_hir_typeck/src/cast.rs
@@ -103,10 +103,10 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> {
Ok(match *t.kind() {
ty::Slice(_) | ty::Str => Some(PointerKind::Length),
ty::Dynamic(ref tty, _, ty::Dyn) => Some(PointerKind::VTable(tty.principal_def_id())),
- ty::Adt(def, substs) if def.is_struct() => match def.non_enum_variant().tail_opt() {
+ ty::Adt(def, args) if def.is_struct() => match def.non_enum_variant().tail_opt() {
None => Some(PointerKind::Thin),
Some(f) => {
- let field_ty = self.field_ty(span, f, substs);
+ let field_ty = self.field_ty(span, f, args);
self.pointer_kind(field_ty, span)?
}
},
@@ -144,7 +144,7 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> {
let reported = self
.tcx
.sess
- .delay_span_bug(span, format!("`{:?}` should be sized but is not?", t));
+ .delay_span_bug(span, format!("`{t:?}` should be sized but is not?"));
return Err(reported);
}
})
@@ -389,34 +389,26 @@ impl<'a, 'tcx> CastCheck<'tcx> {
if let ty::Ref(reg, cast_ty, mutbl) = *self.cast_ty.kind() {
if let ty::RawPtr(TypeAndMut { ty: expr_ty, .. }) = *self.expr_ty.kind()
&& fcx
- .try_coerce(
- self.expr,
+ .can_coerce(
Ty::new_ref(fcx.tcx,
fcx.tcx.lifetimes.re_erased,
TypeAndMut { ty: expr_ty, mutbl },
),
self.cast_ty,
- AllowTwoPhase::No,
- None,
)
- .is_ok()
{
sugg = Some((format!("&{}*", mutbl.prefix_str()), cast_ty == expr_ty));
} else if let ty::Ref(expr_reg, expr_ty, expr_mutbl) = *self.expr_ty.kind()
&& expr_mutbl == Mutability::Not
&& mutbl == Mutability::Mut
&& fcx
- .try_coerce(
- self.expr,
+ .can_coerce(
Ty::new_ref(fcx.tcx,
expr_reg,
TypeAndMut { ty: expr_ty, mutbl: Mutability::Mut },
),
self.cast_ty,
- AllowTwoPhase::No,
- None,
)
- .is_ok()
{
sugg_mutref = true;
}
@@ -424,30 +416,22 @@ impl<'a, 'tcx> CastCheck<'tcx> {
if !sugg_mutref
&& sugg == None
&& fcx
- .try_coerce(
- self.expr,
+ .can_coerce(
Ty::new_ref(fcx.tcx,reg, TypeAndMut { ty: self.expr_ty, mutbl }),
self.cast_ty,
- AllowTwoPhase::No,
- None,
)
- .is_ok()
{
sugg = Some((format!("&{}", mutbl.prefix_str()), false));
}
} else if let ty::RawPtr(TypeAndMut { mutbl, .. }) = *self.cast_ty.kind()
&& fcx
- .try_coerce(
- self.expr,
+ .can_coerce(
Ty::new_ref(fcx.tcx,
fcx.tcx.lifetimes.re_erased,
TypeAndMut { ty: self.expr_ty, mutbl },
),
self.cast_ty,
- AllowTwoPhase::No,
- None,
)
- .is_ok()
{
sugg = Some((format!("&{}", mutbl.prefix_str()), false));
}
@@ -644,12 +628,12 @@ impl<'a, 'tcx> CastCheck<'tcx> {
err.span_suggestion(
self.cast_span,
"try casting to a reference instead",
- format!("&{}{}", mtstr, s),
+ format!("&{mtstr}{s}"),
Applicability::MachineApplicable,
);
}
Err(_) => {
- let msg = format!("did you mean `&{}{}`?", mtstr, tstr);
+ let msg = format!("did you mean `&{mtstr}{tstr}`?");
err.span_help(self.cast_span, msg);
}
}
@@ -705,10 +689,10 @@ impl<'a, 'tcx> CastCheck<'tcx> {
)
}),
|lint| {
- lint.help(format!(
+ lint.help(
"cast can be replaced by coercion; this might \
- require a temporary variable"
- ))
+ require a temporary variable",
+ )
},
);
}
@@ -760,7 +744,7 @@ impl<'a, 'tcx> CastCheck<'tcx> {
ty::FnDef(..) => {
// Attempt a coercion to a fn pointer type.
let f = fcx.normalize(self.expr_span, self.expr_ty.fn_sig(fcx.tcx));
- let res = fcx.try_coerce(
+ let res = fcx.coerce(
self.expr,
self.expr_ty,
Ty::new_fn_ptr(fcx.tcx, f),
@@ -860,7 +844,7 @@ impl<'a, 'tcx> CastCheck<'tcx> {
(_, DynStar) => {
if fcx.tcx.features().dyn_star {
- bug!("should be handled by `try_coerce`")
+ bug!("should be handled by `coerce`")
} else {
Err(CastError::IllegalCast)
}
@@ -956,7 +940,7 @@ impl<'a, 'tcx> CastCheck<'tcx> {
// Coerce to a raw pointer so that we generate AddressOf in MIR.
let array_ptr_type = Ty::new_ptr(fcx.tcx, m_expr);
- fcx.try_coerce(self.expr, self.expr_ty, array_ptr_type, AllowTwoPhase::No, None)
+ fcx.coerce(self.expr, self.expr_ty, array_ptr_type, AllowTwoPhase::No, None)
.unwrap_or_else(|_| {
bug!(
"could not cast from reference to array to pointer to array ({:?} to {:?})",
@@ -992,7 +976,7 @@ impl<'a, 'tcx> CastCheck<'tcx> {
}
fn try_coercion_cast(&self, fcx: &FnCtxt<'a, 'tcx>) -> Result<(), ty::error::TypeError<'tcx>> {
- match fcx.try_coerce(self.expr, self.expr_ty, self.cast_ty, AllowTwoPhase::No, None) {
+ match fcx.coerce(self.expr, self.expr_ty, self.cast_ty, AllowTwoPhase::No, None) {
Ok(_) => Ok(()),
Err(err) => Err(err),
}
diff --git a/compiler/rustc_hir_typeck/src/check.rs b/compiler/rustc_hir_typeck/src/check.rs
index 8b57e311f..1fc1e5aca 100644
--- a/compiler/rustc_hir_typeck/src/check.rs
+++ b/compiler/rustc_hir_typeck/src/check.rs
@@ -80,7 +80,7 @@ pub(super) fn check_fn<'a, 'tcx>(
let va_list_did = tcx.require_lang_item(LangItem::VaList, Some(span));
let region = fcx.next_region_var(RegionVariableOrigin::MiscVariable(span));
- tcx.type_of(va_list_did).subst(tcx, &[region.into()])
+ tcx.type_of(va_list_did).instantiate(tcx, &[region.into()])
});
// Add formal parameters.
@@ -89,7 +89,7 @@ pub(super) fn check_fn<'a, 'tcx>(
for (idx, (param_ty, param)) in inputs_fn.chain(maybe_va_list).zip(body.params).enumerate() {
// Check the pattern.
let ty_span = try { inputs_hir?.get(idx)?.span };
- fcx.check_pat_top(&param.pat, param_ty, ty_span, None);
+ fcx.check_pat_top(&param.pat, param_ty, ty_span, None, None);
// Check that argument is Sized.
if !params_can_be_unsized {
diff --git a/compiler/rustc_hir_typeck/src/closure.rs b/compiler/rustc_hir_typeck/src/closure.rs
index 78a9ac49d..b19fb6da6 100644
--- a/compiler/rustc_hir_typeck/src/closure.rs
+++ b/compiler/rustc_hir_typeck/src/closure.rs
@@ -10,8 +10,8 @@ use rustc_infer::infer::type_variable::{TypeVariableOrigin, TypeVariableOriginKi
use rustc_infer::infer::{DefineOpaqueTypes, LateBoundRegionConversionTime};
use rustc_infer::infer::{InferOk, InferResult};
use rustc_macros::{TypeFoldable, TypeVisitable};
-use rustc_middle::ty::subst::InternalSubsts;
use rustc_middle::ty::visit::{TypeVisitable, TypeVisitableExt};
+use rustc_middle::ty::GenericArgs;
use rustc_middle::ty::{self, Ty, TyCtxt, TypeSuperVisitable, TypeVisitor};
use rustc_span::def_id::LocalDefId;
use rustc_span::source_map::Span;
@@ -81,7 +81,7 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> {
debug!(?bound_sig, ?liberated_sig);
- let mut fcx = FnCtxt::new(self, self.param_env.without_const(), closure.def_id);
+ let mut fcx = FnCtxt::new(self, self.param_env, closure.def_id);
let generator_types = check_fn(
&mut fcx,
liberated_sig,
@@ -93,7 +93,7 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> {
false,
);
- let parent_substs = InternalSubsts::identity_for_item(
+ let parent_args = GenericArgs::identity_for_item(
self.tcx,
self.tcx.typeck_root_def_id(expr_def_id.to_def_id()),
);
@@ -105,10 +105,10 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> {
if let Some(GeneratorTypes { resume_ty, yield_ty, interior, movability }) = generator_types
{
- let generator_substs = ty::GeneratorSubsts::new(
+ let generator_args = ty::GeneratorArgs::new(
self.tcx,
- ty::GeneratorSubstsParts {
- parent_substs,
+ ty::GeneratorArgsParts {
+ parent_args,
resume_ty,
yield_ty,
return_ty: liberated_sig.output(),
@@ -120,7 +120,7 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> {
return Ty::new_generator(
self.tcx,
expr_def_id.to_def_id(),
- generator_substs.substs,
+ generator_args.args,
movability,
);
}
@@ -151,17 +151,17 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> {
}),
};
- let closure_substs = ty::ClosureSubsts::new(
+ let closure_args = ty::ClosureArgs::new(
self.tcx,
- ty::ClosureSubstsParts {
- parent_substs,
+ ty::ClosureArgsParts {
+ parent_args,
closure_kind_ty,
closure_sig_as_fn_ptr_ty: Ty::new_fn_ptr(self.tcx, sig),
tupled_upvars_ty,
},
);
- Ty::new_closure(self.tcx, expr_def_id.to_def_id(), closure_substs.substs)
+ Ty::new_closure(self.tcx, expr_def_id.to_def_id(), closure_args.args)
}
/// Given the expected type, figures out what it can about this closure we
@@ -172,12 +172,12 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> {
expected_ty: Ty<'tcx>,
) -> (Option<ExpectedSig<'tcx>>, Option<ty::ClosureKind>) {
match *expected_ty.kind() {
- ty::Alias(ty::Opaque, ty::AliasTy { def_id, substs, .. }) => self
+ ty::Alias(ty::Opaque, ty::AliasTy { def_id, args, .. }) => self
.deduce_closure_signature_from_predicates(
expected_ty,
self.tcx
.explicit_item_bounds(def_id)
- .subst_iter_copied(self.tcx, substs)
+ .iter_instantiated_copied(self.tcx, args)
.map(|(c, s)| (c.as_predicate(), s)),
),
ty::Dynamic(ref object_type, ..) => {
@@ -315,7 +315,7 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> {
}
let input_tys = if is_fn {
- let arg_param_ty = projection.skip_binder().projection_ty.substs.type_at(1);
+ let arg_param_ty = projection.skip_binder().projection_ty.args.type_at(1);
let arg_param_ty = self.resolve_vars_if_possible(arg_param_ty);
debug!(?arg_param_ty);
@@ -711,38 +711,32 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> {
}
};
+ let span = self.tcx.def_span(expr_def_id);
+
let output_ty = match *ret_ty.kind() {
ty::Infer(ty::TyVar(ret_vid)) => {
self.obligations_for_self_ty(ret_vid).find_map(|obligation| {
get_future_output(obligation.predicate, obligation.cause.span)
})?
}
- ty::Alias(ty::Opaque, ty::AliasTy { def_id, substs, .. }) => self
+ ty::Alias(ty::Opaque, ty::AliasTy { def_id, args, .. }) => self
.tcx
.explicit_item_bounds(def_id)
- .subst_iter_copied(self.tcx, substs)
+ .iter_instantiated_copied(self.tcx, args)
.find_map(|(p, s)| get_future_output(p.as_predicate(), s))?,
ty::Error(_) => return None,
- ty::Alias(ty::Projection, proj) if self.tcx.is_impl_trait_in_trait(proj.def_id) => self
- .tcx
- .explicit_item_bounds(proj.def_id)
- .subst_iter_copied(self.tcx, proj.substs)
- .find_map(|(p, s)| get_future_output(p.as_predicate(), s))?,
_ => span_bug!(
- self.tcx.def_span(expr_def_id),
+ span,
"async fn generator return type not an inference variable: {ret_ty}"
),
};
+ let output_ty = self.normalize(span, output_ty);
+
// async fn that have opaque types in their return type need to redo the conversion to inference variables
// as they fetch the still opaque version from the signature.
let InferOk { value: output_ty, obligations } = self
- .replace_opaque_types_with_inference_vars(
- output_ty,
- body_def_id,
- self.tcx.def_span(expr_def_id),
- self.param_env,
- );
+ .replace_opaque_types_with_inference_vars(output_ty, body_def_id, span, self.param_env);
self.register_predicates(obligations);
Some(output_ty)
@@ -800,7 +794,7 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> {
/// Converts the types that the user supplied, in case that doing
/// so should yield an error, but returns back a signature where
- /// all parameters are of type `TyErr`.
+ /// all parameters are of type `ty::Error`.
fn error_sig_of_closure(
&self,
decl: &hir::FnDecl<'_>,
diff --git a/compiler/rustc_hir_typeck/src/coercion.rs b/compiler/rustc_hir_typeck/src/coercion.rs
index dc58d99ed..fca675ea9 100644
--- a/compiler/rustc_hir_typeck/src/coercion.rs
+++ b/compiler/rustc_hir_typeck/src/coercion.rs
@@ -46,13 +46,14 @@ use rustc_infer::infer::type_variable::{TypeVariableOrigin, TypeVariableOriginKi
use rustc_infer::infer::{Coercion, DefineOpaqueTypes, InferOk, InferResult};
use rustc_infer::traits::{Obligation, PredicateObligation};
use rustc_middle::lint::in_external_macro;
+use rustc_middle::traits::BuiltinImplSource;
use rustc_middle::ty::adjustment::{
Adjust, Adjustment, AllowTwoPhase, AutoBorrow, AutoBorrowMutability, PointerCoercion,
};
use rustc_middle::ty::error::TypeError;
use rustc_middle::ty::relate::RelateResult;
-use rustc_middle::ty::subst::SubstsRef;
use rustc_middle::ty::visit::TypeVisitableExt;
+use rustc_middle::ty::GenericArgsRef;
use rustc_middle::ty::{self, Ty, TypeAndMut};
use rustc_session::parse::feature_err;
use rustc_span::symbol::sym;
@@ -251,11 +252,11 @@ impl<'f, 'tcx> Coerce<'f, 'tcx> {
// unsafe qualifier.
self.coerce_from_fn_pointer(a, a_f, b)
}
- ty::Closure(closure_def_id_a, substs_a) => {
+ ty::Closure(closure_def_id_a, args_a) => {
// Non-capturing closures are coercible to
// function pointers or unsafe function pointers.
// It cannot convert closures that require unsafe.
- self.coerce_closure_to_fn(a, closure_def_id_a, substs_a, b)
+ self.coerce_closure_to_fn(a, closure_def_id_a, args_a, b)
}
_ => {
// Otherwise, just use unification rules.
@@ -509,9 +510,11 @@ impl<'f, 'tcx> Coerce<'f, 'tcx> {
success(adjustments, ty, obligations)
}
- // &[T; n] or &mut [T; n] -> &[T]
- // or &mut [T; n] -> &mut [T]
- // or &Concrete -> &Trait, etc.
+ /// Performs [unsized coercion] by emulating a fulfillment loop on a
+ /// `CoerceUnsized` goal until all `CoerceUnsized` and `Unsize` goals
+ /// are successfully selected.
+ ///
+ /// [unsized coercion](https://doc.rust-lang.org/reference/type-coercions.html#unsized-coercions)
#[instrument(skip(self), level = "debug")]
fn coerce_unsized(&self, mut source: Ty<'tcx>, mut target: Ty<'tcx>) -> CoerceResult<'tcx> {
source = self.shallow_resolve(source);
@@ -636,21 +639,6 @@ impl<'f, 'tcx> Coerce<'f, 'tcx> {
Some(ty::PredicateKind::Clause(ty::ClauseKind::Trait(trait_pred)))
if traits.contains(&trait_pred.def_id()) =>
{
- if unsize_did == trait_pred.def_id() {
- let self_ty = trait_pred.self_ty();
- let unsize_ty = trait_pred.trait_ref.substs[1].expect_ty();
- if let (ty::Dynamic(ref data_a, ..), ty::Dynamic(ref data_b, ..)) =
- (self_ty.kind(), unsize_ty.kind())
- && data_a.principal_def_id() != data_b.principal_def_id()
- {
- debug!("coerce_unsized: found trait upcasting coercion");
- has_trait_upcasting_coercion = Some((self_ty, unsize_ty));
- }
- if let ty::Tuple(..) = unsize_ty.kind() {
- debug!("coerce_unsized: found unsized tuple coercion");
- has_unsized_tuple_coercion = true;
- }
- }
trait_pred
}
_ => {
@@ -658,13 +646,13 @@ impl<'f, 'tcx> Coerce<'f, 'tcx> {
continue;
}
};
+ let trait_pred = self.resolve_vars_if_possible(trait_pred);
match selcx.select(&obligation.with(selcx.tcx(), trait_pred)) {
// Uncertain or unimplemented.
Ok(None) => {
if trait_pred.def_id() == unsize_did {
- let trait_pred = self.resolve_vars_if_possible(trait_pred);
let self_ty = trait_pred.self_ty();
- let unsize_ty = trait_pred.trait_ref.substs[1].expect_ty();
+ let unsize_ty = trait_pred.trait_ref.args[1].expect_ty();
debug!("coerce_unsized: ambiguous unsize case for {:?}", trait_pred);
match (self_ty.kind(), unsize_ty.kind()) {
(&ty::Infer(ty::TyVar(v)), ty::Dynamic(..))
@@ -701,20 +689,28 @@ impl<'f, 'tcx> Coerce<'f, 'tcx> {
// be silent, as it causes a type mismatch later.
}
- Ok(Some(impl_source)) => queue.extend(impl_source.nested_obligations()),
+ Ok(Some(impl_source)) => {
+ // Some builtin coercions are still unstable so we detect
+ // these here and emit a feature error if coercion doesn't fail
+ // due to another reason.
+ match impl_source {
+ traits::ImplSource::Builtin(
+ BuiltinImplSource::TraitUpcasting { .. },
+ _,
+ ) => {
+ has_trait_upcasting_coercion =
+ Some((trait_pred.self_ty(), trait_pred.trait_ref.args.type_at(1)));
+ }
+ traits::ImplSource::Builtin(BuiltinImplSource::TupleUnsizing, _) => {
+ has_unsized_tuple_coercion = true;
+ }
+ _ => {}
+ }
+ queue.extend(impl_source.nested_obligations())
+ }
}
}
- if has_unsized_tuple_coercion && !self.tcx.features().unsized_tuple_coercion {
- feature_err(
- &self.tcx.sess.parse_sess,
- sym::unsized_tuple_coercion,
- self.cause.span,
- "unsized tuple coercion is not stable enough for use and is subject to change",
- )
- .emit();
- }
-
if let Some((sub, sup)) = has_trait_upcasting_coercion
&& !self.tcx().features().trait_upcasting
{
@@ -730,6 +726,16 @@ impl<'f, 'tcx> Coerce<'f, 'tcx> {
err.emit();
}
+ if has_unsized_tuple_coercion && !self.tcx.features().unsized_tuple_coercion {
+ feature_err(
+ &self.tcx.sess.parse_sess,
+ sym::unsized_tuple_coercion,
+ self.cause.span,
+ "unsized tuple coercion is not stable enough for use and is subject to change",
+ )
+ .emit();
+ }
+
Ok(coercion)
}
@@ -916,7 +922,7 @@ impl<'f, 'tcx> Coerce<'f, 'tcx> {
&self,
a: Ty<'tcx>,
closure_def_id_a: DefId,
- substs_a: SubstsRef<'tcx>,
+ args_a: GenericArgsRef<'tcx>,
b: Ty<'tcx>,
) -> CoerceResult<'tcx> {
//! Attempts to coerce from the type of a non-capturing closure
@@ -927,7 +933,7 @@ impl<'f, 'tcx> Coerce<'f, 'tcx> {
match b.kind() {
// At this point we haven't done capture analysis, which means
- // that the ClosureSubsts just contains an inference variable instead
+ // that the ClosureArgs just contains an inference variable instead
// of tuple of captured types.
//
// All we care here is if any variable is being captured and not the exact paths,
@@ -944,7 +950,7 @@ impl<'f, 'tcx> Coerce<'f, 'tcx> {
// `fn(arg0,arg1,...) -> _`
// or
// `unsafe fn(arg0,arg1,...) -> _`
- let closure_sig = substs_a.as_closure().sig();
+ let closure_sig = args_a.as_closure().sig();
let unsafety = fn_ty.unsafety();
let pointer_ty =
Ty::new_fn_ptr(self.tcx, self.tcx.signature_unclosure(closure_sig, unsafety));
@@ -999,15 +1005,21 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> {
/// adjusted type of the expression, if successful.
/// Adjustments are only recorded if the coercion succeeded.
/// The expressions *must not* have any preexisting adjustments.
- pub fn try_coerce(
+ pub fn coerce(
&self,
expr: &hir::Expr<'_>,
expr_ty: Ty<'tcx>,
- target: Ty<'tcx>,
+ mut target: Ty<'tcx>,
allow_two_phase: AllowTwoPhase,
cause: Option<ObligationCause<'tcx>>,
) -> RelateResult<'tcx, Ty<'tcx>> {
let source = self.try_structurally_resolve_type(expr.span, expr_ty);
+ if self.next_trait_solver() {
+ target = self.try_structurally_resolve_type(
+ cause.as_ref().map_or(expr.span, |cause| cause.span),
+ target,
+ );
+ }
debug!("coercion::try({:?}: {:?} -> {:?})", expr, source, target);
let cause =
@@ -1024,11 +1036,12 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> {
})
}
- /// Same as `try_coerce()`, but without side-effects.
+ /// Same as `coerce()`, but without side-effects.
///
/// Returns false if the coercion creates any obligations that result in
/// errors.
pub fn can_coerce(&self, expr_ty: Ty<'tcx>, target: Ty<'tcx>) -> bool {
+ // FIXME(-Ztrait-solver=next): We need to structurally resolve both types here.
let source = self.resolve_vars_with_obligations(expr_ty);
debug!("coercion::can_with_predicates({:?} -> {:?})", source, target);
@@ -1093,8 +1106,8 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> {
where
E: AsCoercionSite,
{
- let prev_ty = self.resolve_vars_with_obligations(prev_ty);
- let new_ty = self.resolve_vars_with_obligations(new_ty);
+ let prev_ty = self.try_structurally_resolve_type(cause.span, prev_ty);
+ let new_ty = self.try_structurally_resolve_type(new.span, new_ty);
debug!(
"coercion::try_find_coercion_lub({:?}, {:?}, exprs={:?} exprs)",
prev_ty,
@@ -1109,10 +1122,10 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> {
}
// Special-case that coercion alone cannot handle:
- // Function items or non-capturing closures of differing IDs or InternalSubsts.
+ // Function items or non-capturing closures of differing IDs or GenericArgs.
let (a_sig, b_sig) = {
let is_capturing_closure = |ty: Ty<'tcx>| {
- if let &ty::Closure(closure_def_id, _substs) = ty.kind() {
+ if let &ty::Closure(closure_def_id, _args) = ty.kind() {
self.tcx.upvars_mentioned(closure_def_id.expect_local()).is_some()
} else {
false
@@ -1139,30 +1152,30 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> {
}
}
}
- (ty::Closure(_, substs), ty::FnDef(..)) => {
+ (ty::Closure(_, args), ty::FnDef(..)) => {
let b_sig = new_ty.fn_sig(self.tcx);
- let a_sig = self
- .tcx
- .signature_unclosure(substs.as_closure().sig(), b_sig.unsafety());
+ let a_sig =
+ self.tcx.signature_unclosure(args.as_closure().sig(), b_sig.unsafety());
(Some(a_sig), Some(b_sig))
}
- (ty::FnDef(..), ty::Closure(_, substs)) => {
+ (ty::FnDef(..), ty::Closure(_, args)) => {
let a_sig = prev_ty.fn_sig(self.tcx);
- let b_sig = self
- .tcx
- .signature_unclosure(substs.as_closure().sig(), a_sig.unsafety());
+ let b_sig =
+ self.tcx.signature_unclosure(args.as_closure().sig(), a_sig.unsafety());
(Some(a_sig), Some(b_sig))
}
- (ty::Closure(_, substs_a), ty::Closure(_, substs_b)) => (
- Some(self.tcx.signature_unclosure(
- substs_a.as_closure().sig(),
- hir::Unsafety::Normal,
- )),
- Some(self.tcx.signature_unclosure(
- substs_b.as_closure().sig(),
- hir::Unsafety::Normal,
- )),
- ),
+ (ty::Closure(_, args_a), ty::Closure(_, args_b)) => {
+ (
+ Some(self.tcx.signature_unclosure(
+ args_a.as_closure().sig(),
+ hir::Unsafety::Normal,
+ )),
+ Some(self.tcx.signature_unclosure(
+ args_b.as_closure().sig(),
+ hir::Unsafety::Normal,
+ )),
+ )
+ }
_ => (None, None),
}
}
@@ -1414,7 +1427,7 @@ impl<'tcx, 'exprs, E: AsCoercionSite> CoerceMany<'tcx, 'exprs, E> {
expression: &'tcx hir::Expr<'tcx>,
expression_ty: Ty<'tcx>,
) {
- self.coerce_inner(fcx, cause, Some(expression), expression_ty, None, false)
+ self.coerce_inner(fcx, cause, Some(expression), expression_ty, |_| {}, false)
}
/// Indicates that one of the inputs is a "forced unit". This
@@ -1433,7 +1446,7 @@ impl<'tcx, 'exprs, E: AsCoercionSite> CoerceMany<'tcx, 'exprs, E> {
&mut self,
fcx: &FnCtxt<'a, 'tcx>,
cause: &ObligationCause<'tcx>,
- augment_error: &mut dyn FnMut(&mut Diagnostic),
+ augment_error: impl FnOnce(&mut Diagnostic),
label_unit_as_expected: bool,
) {
self.coerce_inner(
@@ -1441,7 +1454,7 @@ impl<'tcx, 'exprs, E: AsCoercionSite> CoerceMany<'tcx, 'exprs, E> {
cause,
None,
Ty::new_unit(fcx.tcx),
- Some(augment_error),
+ augment_error,
label_unit_as_expected,
)
}
@@ -1456,7 +1469,7 @@ impl<'tcx, 'exprs, E: AsCoercionSite> CoerceMany<'tcx, 'exprs, E> {
cause: &ObligationCause<'tcx>,
expression: Option<&'tcx hir::Expr<'tcx>>,
mut expression_ty: Ty<'tcx>,
- augment_error: Option<&mut dyn FnMut(&mut Diagnostic)>,
+ augment_error: impl FnOnce(&mut Diagnostic),
label_expression_as_expected: bool,
) {
// Incorporate whatever type inference information we have
@@ -1481,7 +1494,7 @@ impl<'tcx, 'exprs, E: AsCoercionSite> CoerceMany<'tcx, 'exprs, E> {
// Special-case the first expression we are coercing.
// To be honest, I'm not entirely sure why we do this.
// We don't allow two-phase borrows, see comment in try_find_coercion_lub for why
- fcx.try_coerce(
+ fcx.coerce(
expression,
expression_ty,
self.expected_ty,
@@ -1590,7 +1603,7 @@ impl<'tcx, 'exprs, E: AsCoercionSite> CoerceMany<'tcx, 'exprs, E> {
);
err.span_label(cause.span, "return type is not `()`");
}
- ObligationCauseCode::BlockTailExpression(blk_id) => {
+ ObligationCauseCode::BlockTailExpression(blk_id, ..) => {
let parent_id = fcx.tcx.hir().parent_id(blk_id);
err = self.report_return_mismatched_types(
cause,
@@ -1635,14 +1648,9 @@ impl<'tcx, 'exprs, E: AsCoercionSite> CoerceMany<'tcx, 'exprs, E> {
}
}
- if let Some(augment_error) = augment_error {
- augment_error(&mut err);
- }
-
- let is_insufficiently_polymorphic =
- matches!(coercion_error, TypeError::RegionsInsufficientlyPolymorphic(..));
+ augment_error(&mut err);
- if !is_insufficiently_polymorphic && let Some(expr) = expression {
+ if let Some(expr) = expression {
fcx.emit_coerce_suggestions(
&mut err,
expr,
@@ -1670,7 +1678,9 @@ impl<'tcx, 'exprs, E: AsCoercionSite> CoerceMany<'tcx, 'exprs, E> {
expr: &hir::Expr<'tcx>,
ret_exprs: &Vec<&'tcx hir::Expr<'tcx>>,
) {
- let hir::ExprKind::Loop(_, _, _, loop_span) = expr.kind else { return;};
+ let hir::ExprKind::Loop(_, _, _, loop_span) = expr.kind else {
+ return;
+ };
let mut span: MultiSpan = vec![loop_span].into();
span.push_span_label(loop_span, "this might have zero elements to iterate on");
const MAXITER: usize = 3;
@@ -1738,7 +1748,7 @@ impl<'tcx, 'exprs, E: AsCoercionSite> CoerceMany<'tcx, 'exprs, E> {
) && !in_external_macro(fcx.tcx.sess, cond_expr.span)
&& !matches!(
cond_expr.kind,
- hir::ExprKind::Match(.., hir::MatchSource::TryDesugar)
+ hir::ExprKind::Match(.., hir::MatchSource::TryDesugar(_))
)
{
err.span_label(cond_expr.span, "expected this to be `()`");
@@ -1791,8 +1801,7 @@ impl<'tcx, 'exprs, E: AsCoercionSite> CoerceMany<'tcx, 'exprs, E> {
err.span_note(
sp,
format!(
- "return type inferred to be `{}` here",
- expected
+ "return type inferred to be `{expected}` here"
),
);
}
diff --git a/compiler/rustc_hir_typeck/src/demand.rs b/compiler/rustc_hir_typeck/src/demand.rs
index cc8198aab..2c16f21b4 100644
--- a/compiler/rustc_hir_typeck/src/demand.rs
+++ b/compiler/rustc_hir_typeck/src/demand.rs
@@ -15,7 +15,7 @@ use rustc_middle::ty::error::{ExpectedFound, TypeError};
use rustc_middle::ty::fold::BottomUpFolder;
use rustc_middle::ty::print::with_no_trimmed_paths;
use rustc_middle::ty::{self, Article, AssocItem, Ty, TypeAndMut, TypeFoldable};
-use rustc_span::symbol::{sym, Symbol};
+use rustc_span::symbol::sym;
use rustc_span::{BytePos, Span, DUMMY_SP};
use rustc_trait_selection::infer::InferCtxtExt as _;
use rustc_trait_selection::traits::ObligationCause;
@@ -53,7 +53,7 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> {
|| self.suggest_no_capture_closure(err, expected, expr_ty)
|| self.suggest_boxing_when_appropriate(err, expr.span, expr.hir_id, expected, expr_ty)
|| self.suggest_block_to_brackets_peeling_refs(err, expr, expr_ty, expected)
- || self.suggest_copied_or_cloned(err, expr, expr_ty, expected)
+ || self.suggest_copied_cloned_or_as_ref(err, expr, expr_ty, expected)
|| self.suggest_clone_for_ref(err, expr, expr_ty, expected)
|| self.suggest_into(err, expr, expr_ty, expected)
|| self.suggest_floating_point_literal(err, expr, expected)
@@ -84,6 +84,13 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> {
self.annotate_expected_due_to_let_ty(err, expr, error);
+ // FIXME(#73154): For now, we do leak check when coercing function
+ // pointers in typeck, instead of only during borrowck. This can lead
+ // to these `RegionsInsufficientlyPolymorphic` errors that aren't helpful.
+ if matches!(error, Some(TypeError::RegionsInsufficientlyPolymorphic(..))) {
+ return;
+ }
+
if self.is_destruct_assignment_desugaring(expr) {
return;
}
@@ -102,7 +109,9 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> {
found_expr: &mut &'tcx hir::Expr<'tcx>,
expected_expr: &mut Option<&'tcx hir::Expr<'tcx>>,
) {
- let Some(expected_expr) = expected_expr else { return; };
+ let Some(expected_expr) = expected_expr else {
+ return;
+ };
if !found_expr.span.eq_ctxt(expected_expr.span) {
return;
@@ -121,11 +130,17 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> {
let hir::ExprKind::Unary(
hir::UnOp::Deref,
hir::Expr { kind: hir::ExprKind::Path(found_path), .. },
- ) = found_expr.kind else { return; };
+ ) = found_expr.kind
+ else {
+ return;
+ };
let hir::ExprKind::Unary(
hir::UnOp::Deref,
hir::Expr { kind: hir::ExprKind::Path(expected_path), .. },
- ) = expected_expr.kind else { return; };
+ ) = expected_expr.kind
+ else {
+ return;
+ };
for (path, name, idx, var) in [
(expected_path, "left_val", 0, expected_expr),
@@ -239,7 +254,7 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> {
) -> (Ty<'tcx>, Option<DiagnosticBuilder<'tcx, ErrorGuaranteed>>) {
let expected = self.resolve_vars_with_obligations(expected);
- let e = match self.try_coerce(expr, checked_ty, expected, allow_two_phase, None) {
+ let e = match self.coerce(expr, checked_ty, expected, allow_two_phase, None) {
Ok(ty) => return (ty, None),
Err(e) => e,
};
@@ -252,25 +267,10 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> {
));
let expr = expr.peel_drop_temps();
let cause = self.misc(expr.span);
- let expr_ty = self.resolve_vars_with_obligations(checked_ty);
+ let expr_ty = self.resolve_vars_if_possible(checked_ty);
let mut err = self.err_ctxt().report_mismatched_types(&cause, expected, expr_ty, e);
- let is_insufficiently_polymorphic =
- matches!(e, TypeError::RegionsInsufficientlyPolymorphic(..));
-
- // FIXME(#73154): For now, we do leak check when coercing function
- // pointers in typeck, instead of only during borrowck. This can lead
- // to these `RegionsInsufficientlyPolymorphic` errors that aren't helpful.
- if !is_insufficiently_polymorphic {
- self.emit_coerce_suggestions(
- &mut err,
- expr,
- expr_ty,
- expected,
- expected_ty_expr,
- Some(e),
- );
- }
+ self.emit_coerce_suggestions(&mut err, expr, expr_ty, expected, expected_ty_expr, Some(e));
(expected, Some(err))
}
@@ -285,16 +285,26 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> {
) -> bool {
let hir = self.tcx.hir();
- let hir::ExprKind::Path(hir::QPath::Resolved(None, p)) = expr.kind else { return false; };
- let [hir::PathSegment { ident, args: None, .. }] = p.segments else { return false; };
- let hir::def::Res::Local(local_hir_id) = p.res else { return false; };
- let hir::Node::Pat(pat) = hir.get(local_hir_id) else { return false; };
+ let hir::ExprKind::Path(hir::QPath::Resolved(None, p)) = expr.kind else {
+ return false;
+ };
+ let [hir::PathSegment { ident, args: None, .. }] = p.segments else {
+ return false;
+ };
+ let hir::def::Res::Local(local_hir_id) = p.res else {
+ return false;
+ };
+ let hir::Node::Pat(pat) = hir.get(local_hir_id) else {
+ return false;
+ };
let (init_ty_hir_id, init) = match hir.get_parent(pat.hir_id) {
hir::Node::Local(hir::Local { ty: Some(ty), init, .. }) => (ty.hir_id, *init),
hir::Node::Local(hir::Local { init: Some(init), .. }) => (init.hir_id, Some(*init)),
_ => return false,
};
- let Some(init_ty) = self.node_ty_opt(init_ty_hir_id) else { return false; };
+ let Some(init_ty) = self.node_ty_opt(init_ty_hir_id) else {
+ return false;
+ };
// Locate all the usages of the relevant binding.
struct FindExprs<'tcx> {
@@ -413,14 +423,18 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> {
// Bindings always update their recorded type after the fact, so we
// need to look at the *following* usage's type to see when the
// binding became incompatible.
- let [binding, next_usage] = *window else { continue; };
+ let [binding, next_usage] = *window else {
+ continue;
+ };
// Don't go past the binding (always gonna be a nonsense label if so)
if binding.hir_id == expr.hir_id {
break;
}
- let Some(next_use_ty) = self.node_ty_opt(next_usage.hir_id) else { continue; };
+ let Some(next_use_ty) = self.node_ty_opt(next_usage.hir_id) else {
+ continue;
+ };
// If the type is not constrained in a way making it not possible to
// equate with `expected_ty` by this point, skip.
@@ -461,7 +475,7 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> {
{
let Some(arg_ty) = self.node_ty_opt(arg_expr.hir_id) else { continue; };
let arg_ty = arg_ty.fold_with(&mut fudger);
- let _ = self.try_coerce(
+ let _ = self.coerce(
arg_expr,
arg_ty,
*expected_arg_ty,
@@ -599,7 +613,7 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> {
// is in a different line, so we point at both.
err.span_label(secondary_span, "expected due to the type of this binding");
err.span_label(primary_span, format!("expected due to this{post_message}"));
- } else if post_message == "" {
+ } else if post_message.is_empty() {
// We are pointing at either the assignment lhs or the binding def pattern.
err.span_label(primary_span, "expected due to the type of this binding");
} else {
@@ -634,27 +648,36 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> {
error: Option<TypeError<'tcx>>,
) {
let parent = self.tcx.hir().parent_id(expr.hir_id);
- let Some(TypeError::Sorts(ExpectedFound { expected, .. })) = error else {return;};
- let Some(hir::Node::Expr(hir::Expr {
- kind: hir::ExprKind::Assign(lhs, rhs, _), ..
- })) = self.tcx.hir().find(parent) else {return; };
+ let Some(TypeError::Sorts(ExpectedFound { expected, .. })) = error else {
+ return;
+ };
+ let Some(hir::Node::Expr(hir::Expr { kind: hir::ExprKind::Assign(lhs, rhs, _), .. })) =
+ self.tcx.hir().find(parent)
+ else {
+ return;
+ };
if rhs.hir_id != expr.hir_id || expected.is_closure() {
return;
}
- let hir::ExprKind::Unary(hir::UnOp::Deref, deref) = lhs.kind else { return; };
- let hir::ExprKind::MethodCall(path, base, args, _) = deref.kind else { return; };
- let Some(self_ty) = self.typeck_results.borrow().expr_ty_adjusted_opt(base) else { return; };
+ let hir::ExprKind::Unary(hir::UnOp::Deref, deref) = lhs.kind else {
+ return;
+ };
+ let hir::ExprKind::MethodCall(path, base, args, _) = deref.kind else {
+ return;
+ };
+ let Some(self_ty) = self.typeck_results.borrow().expr_ty_adjusted_opt(base) else {
+ return;
+ };
- let Ok(pick) = self
- .lookup_probe_for_diagnostic(
- path.ident,
- self_ty,
- deref,
- probe::ProbeScope::TraitsInScope,
- None,
- ) else {
- return;
- };
+ let Ok(pick) = self.lookup_probe_for_diagnostic(
+ path.ident,
+ self_ty,
+ deref,
+ probe::ProbeScope::TraitsInScope,
+ None,
+ ) else {
+ return;
+ };
let in_scope_methods = self.probe_for_name_many(
probe::Mode::MethodCall,
path.ident,
@@ -681,7 +704,7 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> {
.filter(|c| c.item.def_id != pick.item.def_id)
.map(|c| {
let m = c.item;
- let substs = ty::InternalSubsts::for_item(self.tcx, m.def_id, |param, _| {
+ let generic_args = ty::GenericArgs::for_item(self.tcx, m.def_id, |param, _| {
self.var_for_def(deref.span, param)
});
let mutability =
@@ -696,7 +719,7 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> {
format!(
"{}({}",
with_no_trimmed_paths!(
- self.tcx.def_path_str_with_substs(m.def_id, substs,)
+ self.tcx.def_path_str_with_args(m.def_id, generic_args,)
),
mutability,
),
@@ -793,8 +816,12 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> {
expected: Ty<'tcx>,
found: Ty<'tcx>,
) -> bool {
- let ty::Adt(e, substs_e) = expected.kind() else { return false; };
- let ty::Adt(f, substs_f) = found.kind() else { return false; };
+ let ty::Adt(e, args_e) = expected.kind() else {
+ return false;
+ };
+ let ty::Adt(f, args_f) = found.kind() else {
+ return false;
+ };
if e.did() != f.did() {
return false;
}
@@ -811,8 +838,8 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> {
} else {
return false;
}
- let e = substs_e.type_at(1);
- let f = substs_f.type_at(1);
+ let e = args_e.type_at(1);
+ let f = args_f.type_at(1);
if self
.infcx
.type_implements_trait(
@@ -845,7 +872,10 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> {
expected: Ty<'tcx>,
expr_ty: Ty<'tcx>,
) -> bool {
- if let ty::Adt(expected_adt, substs) = expected.kind() {
+ if in_external_macro(self.tcx.sess, expr.span) {
+ return false;
+ }
+ if let ty::Adt(expected_adt, args) = expected.kind() {
if let hir::ExprKind::Field(base, ident) = expr.kind {
let base_ty = self.typeck_results.borrow().expr_ty(base);
if self.can_eq(self.param_env, base_ty, expected)
@@ -944,7 +974,7 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> {
let note_about_variant_field_privacy = (field_is_local && !field_is_accessible)
.then(|| " (its field is private, but it's local to this crate and its privacy can be changed)".to_string());
- let sole_field_ty = sole_field.ty(self.tcx, substs);
+ let sole_field_ty = sole_field.ty(self.tcx, args);
if self.can_coerce(expr_ty, sole_field_ty) {
let variant_path =
with_no_trimmed_paths!(self.tcx.def_path_str(variant.def_id));
@@ -962,7 +992,7 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> {
.collect();
let suggestions_for = |variant: &_, ctor_kind, field_name| {
- let prefix = match self.maybe_get_struct_pattern_shorthand_field(expr) {
+ let prefix = match self.tcx.hir().maybe_get_struct_pattern_shorthand_field(expr) {
Some(ident) => format!("{ident}: "),
None => String::new(),
};
@@ -1037,9 +1067,11 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> {
let tcx = self.tcx;
let (adt, unwrap) = match expected.kind() {
// In case Option<NonZero*> is wanted, but * is provided, suggest calling new
- ty::Adt(adt, substs) if tcx.is_diagnostic_item(sym::Option, adt.did()) => {
+ ty::Adt(adt, args) if tcx.is_diagnostic_item(sym::Option, adt.did()) => {
// Unwrap option
- let ty::Adt(adt, _) = substs.type_at(0).kind() else { return false; };
+ let ty::Adt(adt, _) = args.type_at(0).kind() else {
+ return false;
+ };
(adt, "")
}
@@ -1061,10 +1093,11 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> {
(sym::NonZeroI128, tcx.types.i128),
];
- let Some((s, _)) = map
- .iter()
- .find(|&&(s, t)| self.tcx.is_diagnostic_item(s, adt.did()) && self.can_coerce(expr_ty, t))
- else { return false; };
+ let Some((s, _)) = map.iter().find(|&&(s, t)| {
+ self.tcx.is_diagnostic_item(s, adt.did()) && self.can_coerce(expr_ty, t)
+ }) else {
+ return false;
+ };
let path = self.tcx.def_path_str(adt.non_enum_variant().def_id);
@@ -1152,7 +1185,9 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> {
};
let local_parent = self.tcx.hir().parent_id(local_id);
- let Some(Node::Param(hir::Param { hir_id: param_hir_id, .. })) = self.tcx.hir().find(local_parent) else {
+ let Some(Node::Param(hir::Param { hir_id: param_hir_id, .. })) =
+ self.tcx.hir().find(local_parent)
+ else {
return None;
};
@@ -1161,7 +1196,8 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> {
hir_id: expr_hir_id,
kind: hir::ExprKind::Closure(hir::Closure { fn_decl: closure_fn_decl, .. }),
..
- })) = self.tcx.hir().find(param_parent) else {
+ })) = self.tcx.hir().find(param_parent)
+ else {
return None;
};
@@ -1174,7 +1210,8 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> {
..
})),
1,
- ) = (hir, closure_params_len) else {
+ ) = (hir, closure_params_len)
+ else {
return None;
};
@@ -1198,39 +1235,6 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> {
}
}
- pub(crate) fn maybe_get_struct_pattern_shorthand_field(
- &self,
- expr: &hir::Expr<'_>,
- ) -> Option<Symbol> {
- let hir = self.tcx.hir();
- let local = match expr {
- hir::Expr {
- kind:
- hir::ExprKind::Path(hir::QPath::Resolved(
- None,
- hir::Path {
- res: hir::def::Res::Local(_),
- segments: [hir::PathSegment { ident, .. }],
- ..
- },
- )),
- ..
- } => Some(ident),
- _ => None,
- }?;
-
- match hir.find_parent(expr.hir_id)? {
- Node::ExprField(field) => {
- if field.ident.name == local.name && field.is_shorthand {
- return Some(local.name);
- }
- }
- _ => {}
- }
-
- None
- }
-
/// If the given `HirId` corresponds to a block with a trailing expression, return that expression
pub(crate) fn maybe_get_block_expr(
&self,
@@ -1425,7 +1429,7 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> {
));
}
- let prefix = match self.maybe_get_struct_pattern_shorthand_field(expr) {
+ let prefix = match self.tcx.hir().maybe_get_struct_pattern_shorthand_field(expr) {
Some(ident) => format!("{ident}: "),
None => String::new(),
};
@@ -1619,7 +1623,7 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> {
)
};
- let prefix = match self.maybe_get_struct_pattern_shorthand_field(expr) {
+ let prefix = match self.tcx.hir().maybe_get_struct_pattern_shorthand_field(expr) {
Some(ident) => format!("{ident}: "),
None => String::new(),
};
@@ -2028,11 +2032,11 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> {
if !hir::is_range_literal(expr) {
return;
}
- let hir::ExprKind::Struct(
- hir::QPath::LangItem(LangItem::Range, ..),
- [start, end],
- _,
- ) = expr.kind else { return; };
+ let hir::ExprKind::Struct(hir::QPath::LangItem(LangItem::Range, ..), [start, end], _) =
+ expr.kind
+ else {
+ return;
+ };
let parent = self.tcx.hir().parent_id(expr.hir_id);
if let Some(hir::Node::ExprField(_)) = self.tcx.hir().find(parent) {
// Ignore `Foo { field: a..Default::default() }`
@@ -2048,8 +2052,12 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> {
// cannot guide the method probe.
expectation = None;
}
- let hir::ExprKind::Call(method_name, _) = expr.kind else { return; };
- let ty::Adt(adt, _) = checked_ty.kind() else { return; };
+ let hir::ExprKind::Call(method_name, _) = expr.kind else {
+ return;
+ };
+ let ty::Adt(adt, _) = checked_ty.kind() else {
+ return;
+ };
if self.tcx.lang_items().range_struct() != Some(adt.did()) {
return;
}
@@ -2059,8 +2067,12 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> {
return;
}
// Check if start has method named end.
- let hir::ExprKind::Path(hir::QPath::Resolved(None, p)) = method_name.kind else { return; };
- let [hir::PathSegment { ident, .. }] = p.segments else { return; };
+ let hir::ExprKind::Path(hir::QPath::Resolved(None, p)) = method_name.kind else {
+ return;
+ };
+ let [hir::PathSegment { ident, .. }] = p.segments else {
+ return;
+ };
let self_ty = self.typeck_results.borrow().expr_ty(start.expr);
let Ok(_pick) = self.lookup_probe_for_diagnostic(
*ident,
@@ -2068,7 +2080,9 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> {
expr,
probe::ProbeScope::AllTraits,
expectation,
- ) else { return; };
+ ) else {
+ return;
+ };
let mut sugg = ".";
let mut span = start.expr.span.between(end.expr.span);
if span.lo() + BytePos(2) == span.hi() {
@@ -2097,17 +2111,23 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> {
if !checked_ty.is_unit() {
return;
}
- let hir::ExprKind::Path(hir::QPath::Resolved(None, path)) = expr.kind else { return; };
- let hir::def::Res::Local(hir_id) = path.res else { return; };
+ let hir::ExprKind::Path(hir::QPath::Resolved(None, path)) = expr.kind else {
+ return;
+ };
+ let hir::def::Res::Local(hir_id) = path.res else {
+ return;
+ };
let Some(hir::Node::Pat(pat)) = self.tcx.hir().find(hir_id) else {
return;
};
- let Some(hir::Node::Local(hir::Local {
- ty: None,
- init: Some(init),
- ..
- })) = self.tcx.hir().find_parent(pat.hir_id) else { return; };
- let hir::ExprKind::Block(block, None) = init.kind else { return; };
+ let Some(hir::Node::Local(hir::Local { ty: None, init: Some(init), .. })) =
+ self.tcx.hir().find_parent(pat.hir_id)
+ else {
+ return;
+ };
+ let hir::ExprKind::Block(block, None) = init.kind else {
+ return;
+ };
if block.expr.is_some() {
return;
}
@@ -2115,8 +2135,12 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> {
err.span_label(block.span, "this empty block is missing a tail expression");
return;
};
- let hir::StmtKind::Semi(tail_expr) = stmt.kind else { return; };
- let Some(ty) = self.node_ty_opt(tail_expr.hir_id) else { return; };
+ let hir::StmtKind::Semi(tail_expr) = stmt.kind else {
+ return;
+ };
+ let Some(ty) = self.node_ty_opt(tail_expr.hir_id) else {
+ return;
+ };
if self.can_eq(self.param_env, expected_ty, ty) {
err.span_suggestion_short(
stmt.span.with_lo(tail_expr.span.hi()),
@@ -2135,7 +2159,9 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> {
expr: &hir::Expr<'_>,
checked_ty: Ty<'tcx>,
) {
- let Some(hir::Node::Expr(parent_expr)) = self.tcx.hir().find_parent(expr.hir_id) else { return; };
+ let Some(hir::Node::Expr(parent_expr)) = self.tcx.hir().find_parent(expr.hir_id) else {
+ return;
+ };
enum CallableKind {
Function,
Method,
@@ -2151,7 +2177,12 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> {
return;
}
let fn_sig = fn_ty.fn_sig(self.tcx).skip_binder();
- let Some(&arg) = fn_sig.inputs().get(arg_idx + if matches!(kind, CallableKind::Method) { 1 } else { 0 }) else { return; };
+ let Some(&arg) = fn_sig
+ .inputs()
+ .get(arg_idx + if matches!(kind, CallableKind::Method) { 1 } else { 0 })
+ else {
+ return;
+ };
if matches!(arg.kind(), ty::Param(_))
&& fn_sig.output().contains(arg)
&& self.node_ty(args[arg_idx].hir_id) == checked_ty
@@ -2185,8 +2216,12 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> {
};
match parent_expr.kind {
hir::ExprKind::Call(fun, args) => {
- let hir::ExprKind::Path(hir::QPath::Resolved(_, path)) = fun.kind else { return; };
- let hir::def::Res::Def(kind, def_id) = path.res else { return; };
+ let hir::ExprKind::Path(hir::QPath::Resolved(_, path)) = fun.kind else {
+ return;
+ };
+ let hir::def::Res::Def(kind, def_id) = path.res else {
+ return;
+ };
let callable_kind = if matches!(kind, hir::def::DefKind::Ctor(_, _)) {
CallableKind::Constructor
} else {
@@ -2195,7 +2230,11 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> {
maybe_emit_help(def_id, path.segments[0].ident, args, callable_kind);
}
hir::ExprKind::MethodCall(method, _receiver, args, _span) => {
- let Some(def_id) = self.typeck_results.borrow().type_dependent_def_id(parent_expr.hir_id) else { return; };
+ let Some(def_id) =
+ self.typeck_results.borrow().type_dependent_def_id(parent_expr.hir_id)
+ else {
+ return;
+ };
maybe_emit_help(def_id, method.ident, args, CallableKind::Method)
}
_ => return,
diff --git a/compiler/rustc_hir_typeck/src/errors.rs b/compiler/rustc_hir_typeck/src/errors.rs
index 05906a4b9..054d23c71 100644
--- a/compiler/rustc_hir_typeck/src/errors.rs
+++ b/compiler/rustc_hir_typeck/src/errors.rs
@@ -252,6 +252,46 @@ impl HelpUseLatestEdition {
}
}
+#[derive(Subdiagnostic)]
+pub enum OptionResultRefMismatch {
+ #[suggestion(
+ hir_typeck_option_result_copied,
+ code = ".copied()",
+ style = "verbose",
+ applicability = "machine-applicable"
+ )]
+ Copied {
+ #[primary_span]
+ span: Span,
+ def_path: String,
+ },
+ #[suggestion(
+ hir_typeck_option_result_cloned,
+ code = ".cloned()",
+ style = "verbose",
+ applicability = "machine-applicable"
+ )]
+ Cloned {
+ #[primary_span]
+ span: Span,
+ def_path: String,
+ },
+ // FIXME: #114050
+ // #[suggestion(
+ // hir_typeck_option_result_asref,
+ // code = ".as_ref()",
+ // style = "verbose",
+ // applicability = "machine-applicable"
+ // )]
+ // AsRef {
+ // #[primary_span]
+ // span: Span,
+ // def_path: String,
+ // expected_ty: Ty<'tcx>,
+ // expr_ty: Ty<'tcx>,
+ // },
+}
+
#[derive(Diagnostic)]
#[diag(hir_typeck_const_select_must_be_const)]
#[help]
diff --git a/compiler/rustc_hir_typeck/src/expectation.rs b/compiler/rustc_hir_typeck/src/expectation.rs
index 4f086cf59..35e5fb769 100644
--- a/compiler/rustc_hir_typeck/src/expectation.rs
+++ b/compiler/rustc_hir_typeck/src/expectation.rs
@@ -21,8 +21,6 @@ pub enum Expectation<'tcx> {
/// This rvalue expression will be wrapped in `&` or `Box` and coerced
/// to `&Ty` or `Box<Ty>`, respectively. `Ty` is `[A]` or `Trait`.
ExpectRvalueLikeUnsized(Ty<'tcx>),
-
- IsLast(Span),
}
impl<'a, 'tcx> Expectation<'tcx> {
@@ -88,13 +86,12 @@ impl<'a, 'tcx> Expectation<'tcx> {
ExpectCastableToType(t) => ExpectCastableToType(fcx.resolve_vars_if_possible(t)),
ExpectHasType(t) => ExpectHasType(fcx.resolve_vars_if_possible(t)),
ExpectRvalueLikeUnsized(t) => ExpectRvalueLikeUnsized(fcx.resolve_vars_if_possible(t)),
- IsLast(sp) => IsLast(sp),
}
}
pub(super) fn to_option(self, fcx: &FnCtxt<'a, 'tcx>) -> Option<Ty<'tcx>> {
match self.resolve(fcx) {
- NoExpectation | IsLast(_) => None,
+ NoExpectation => None,
ExpectCastableToType(ty) | ExpectHasType(ty) | ExpectRvalueLikeUnsized(ty) => Some(ty),
}
}
@@ -106,9 +103,7 @@ impl<'a, 'tcx> Expectation<'tcx> {
pub(super) fn only_has_type(self, fcx: &FnCtxt<'a, 'tcx>) -> Option<Ty<'tcx>> {
match self {
ExpectHasType(ty) => Some(fcx.resolve_vars_if_possible(ty)),
- NoExpectation | ExpectCastableToType(_) | ExpectRvalueLikeUnsized(_) | IsLast(_) => {
- None
- }
+ NoExpectation | ExpectCastableToType(_) | ExpectRvalueLikeUnsized(_) => None,
}
}
diff --git a/compiler/rustc_hir_typeck/src/expr.rs b/compiler/rustc_hir_typeck/src/expr.rs
index 72b29f7b6..7cea40fdd 100644
--- a/compiler/rustc_hir_typeck/src/expr.rs
+++ b/compiler/rustc_hir_typeck/src/expr.rs
@@ -13,7 +13,7 @@ use crate::errors::{
YieldExprOutsideOfGenerator,
};
use crate::fatally_break_rust;
-use crate::method::SelfSource;
+use crate::method::{MethodCallComponents, SelfSource};
use crate::type_error_struct;
use crate::Expectation::{self, ExpectCastableToType, ExpectHasType, NoExpectation};
use crate::{
@@ -44,7 +44,7 @@ use rustc_infer::traits::ObligationCause;
use rustc_middle::middle::stability;
use rustc_middle::ty::adjustment::{Adjust, Adjustment, AllowTwoPhase};
use rustc_middle::ty::error::TypeError::FieldMisMatch;
-use rustc_middle::ty::subst::SubstsRef;
+use rustc_middle::ty::GenericArgsRef;
use rustc_middle::ty::{self, AdtKind, Ty, TypeVisitableExt};
use rustc_session::errors::ExprParenthesesNeeded;
use rustc_session::parse::feature_err;
@@ -60,28 +60,13 @@ use rustc_trait_selection::traits::ObligationCtxt;
use rustc_trait_selection::traits::{self, ObligationCauseCode};
impl<'a, 'tcx> FnCtxt<'a, 'tcx> {
- fn check_expr_eq_type(&self, expr: &'tcx hir::Expr<'tcx>, expected: Ty<'tcx>) {
- let ty = self.check_expr_with_hint(expr, expected);
- self.demand_eqtype(expr.span, expected, ty);
- }
-
pub fn check_expr_has_type_or_error(
&self,
expr: &'tcx hir::Expr<'tcx>,
- expected: Ty<'tcx>,
- extend_err: impl FnMut(&mut Diagnostic),
- ) -> Ty<'tcx> {
- self.check_expr_meets_expectation_or_error(expr, ExpectHasType(expected), extend_err)
- }
-
- fn check_expr_meets_expectation_or_error(
- &self,
- expr: &'tcx hir::Expr<'tcx>,
- expected: Expectation<'tcx>,
- mut extend_err: impl FnMut(&mut Diagnostic),
+ expected_ty: Ty<'tcx>,
+ extend_err: impl FnOnce(&mut Diagnostic),
) -> Ty<'tcx> {
- let expected_ty = expected.to_option(&self).unwrap_or(self.tcx.types.bool);
- let mut ty = self.check_expr_with_expectation(expr, expected);
+ let mut ty = self.check_expr_with_expectation(expr, ExpectHasType(expected_ty));
// While we don't allow *arbitrary* coercions here, we *do* allow
// coercions from ! to `expected`.
@@ -341,9 +326,10 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> {
}
ExprKind::Cast(e, t) => self.check_expr_cast(e, t, expr),
ExprKind::Type(e, t) => {
- let ty = self.to_ty_saving_user_provided_ty(&t);
- self.check_expr_eq_type(&e, ty);
- ty
+ let ascribed_ty = self.to_ty_saving_user_provided_ty(&t);
+ let ty = self.check_expr_with_hint(e, ascribed_ty);
+ self.demand_eqtype(e.span, ascribed_ty, ty);
+ ascribed_ty
}
ExprKind::If(cond, then_expr, opt_else_expr) => {
self.check_then_else(cond, then_expr, opt_else_expr, expr.span, expected)
@@ -359,7 +345,9 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> {
self.check_expr_struct(expr, expected, qpath, fields, base_expr)
}
ExprKind::Field(base, field) => self.check_field(expr, &base, field, expected),
- ExprKind::Index(base, idx) => self.check_expr_index(base, idx, expr),
+ ExprKind::Index(base, idx, brackets_span) => {
+ self.check_expr_index(base, idx, expr, brackets_span)
+ }
ExprKind::Yield(value, ref src) => self.check_expr_yield(value, expr, src),
hir::ExprKind::Err(guar) => Ty::new_error(tcx, guar),
}
@@ -593,8 +581,8 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> {
// We always require that the type provided as the value for
// a type parameter outlives the moment of instantiation.
- let substs = self.typeck_results.borrow().node_substs(expr.hir_id);
- self.add_wf_bounds(substs, expr);
+ let args = self.typeck_results.borrow().node_args(expr.hir_id);
+ self.add_wf_bounds(args, expr);
ty
}
@@ -650,7 +638,8 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> {
let mut enclosing_breakables = self.enclosing_breakables.borrow_mut();
let Some(ctxt) = enclosing_breakables.opt_find_breakable(target_id) else {
// Avoid ICE when `break` is inside a closure (#65383).
- return Ty::new_error_with_message(tcx,
+ return Ty::new_error_with_message(
+ tcx,
expr.span,
"break was outside loop, but no error was emitted",
);
@@ -665,7 +654,7 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> {
coerce.coerce_forced_unit(
self,
&cause,
- &mut |mut err| {
+ |mut err| {
self.suggest_mismatched_types_on_tail(
&mut err, expr, ty, e_ty, target_id,
);
@@ -761,7 +750,7 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> {
coercion.coerce_forced_unit(
self,
&cause,
- &mut |db| {
+ |db| {
let span = fn_decl.output.span();
if let Ok(snippet) = self.tcx.sess.source_map().span_to_snippet(span) {
db.span_label(
@@ -773,7 +762,7 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> {
true,
);
} else {
- coercion.coerce_forced_unit(self, &cause, &mut |_| (), true);
+ coercion.coerce_forced_unit(self, &cause, |_| (), true);
}
}
self.tcx.types.never
@@ -1280,7 +1269,7 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> {
// We could add a "consider `foo::<params>`" suggestion here, but I wasn't able to
// trigger this codepath causing `structurally_resolve_type` to emit an error.
- self.enforce_context_effects(expr.hir_id, expr.span, method.def_id, method.substs);
+ self.enforce_context_effects(expr.hir_id, expr.span, method.def_id, method.args);
self.write_method_call(expr.hir_id, method);
Ok(method)
}
@@ -1292,7 +1281,7 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> {
segment.ident,
SelfSource::MethodCall(rcvr),
error,
- Some((rcvr, args)),
+ Some(MethodCallComponents { receiver: rcvr, args, full_expr: expr }),
expected,
false,
) {
@@ -1333,7 +1322,7 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> {
t_cast,
t.span,
expr.span,
- self.param_env.constness(),
+ hir::Constness::NotConst,
) {
Ok(cast_check) => {
debug!(
@@ -1390,11 +1379,13 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> {
let parent_node = self.tcx.hir().parent_iter(expr.hir_id).find(|(_, node)| {
!matches!(node, hir::Node::Expr(hir::Expr { kind: hir::ExprKind::AddrOf(..), .. }))
});
- let Some((_,
+ let Some((
+ _,
hir::Node::Local(hir::Local { ty: Some(ty), .. })
- | hir::Node::Item(hir::Item { kind: hir::ItemKind::Const(ty, _), .. }))
- ) = parent_node else {
- return
+ | hir::Node::Item(hir::Item { kind: hir::ItemKind::Const(ty, _, _), .. }),
+ )) = parent_node
+ else {
+ return;
};
if let hir::TyKind::Array(_, length) = ty.peel_refs().kind
&& let hir::ArrayLen::Body(hir::AnonConst { hir_id, .. }) = length
@@ -1425,7 +1416,7 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> {
// Create a new function context.
let def_id = block.def_id;
- let fcx = FnCtxt::new(self, self.param_env.with_const(), def_id);
+ let fcx = FnCtxt::new(self, self.param_env, def_id);
crate::GatherLocalsVisitor::new(&fcx).visit_body(body);
let ty = fcx.check_expr_with_expectation(&body.value, expected);
@@ -1617,7 +1608,7 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> {
// re-link the regions that EIfEO can erase.
self.demand_eqtype(span, adt_ty_hint, adt_ty);
- let ty::Adt(adt, substs) = adt_ty.kind() else {
+ let ty::Adt(adt, args) = adt_ty.kind() else {
span_bug!(span, "non-ADT passed to check_expr_struct_fields");
};
let adt_kind = adt.adt_kind();
@@ -1646,7 +1637,7 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> {
tcx.check_stability(v_field.did, Some(expr_id), field.span, None);
}
- self.field_ty(field.span, v_field, substs)
+ self.field_ty(field.span, v_field, args)
} else {
error_happened = true;
let guar = if let Some(prev_span) = seen_fields.get(&ident) {
@@ -1678,7 +1669,7 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> {
if let Some(mut diag) = diag {
if idx == ast_fields.len() - 1 {
if remaining_fields.is_empty() {
- self.suggest_fru_from_range(field, variant, substs, &mut diag);
+ self.suggest_fru_from_range(field, variant, args, &mut diag);
diag.emit();
} else {
diag.stash(field.span, StashKey::MaybeFruTypo);
@@ -1718,7 +1709,7 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> {
let fru_tys = if self.tcx.features().type_changing_struct_update {
if adt.is_struct() {
// Make some fresh substitutions for our ADT type.
- let fresh_substs = self.fresh_substs_for_item(base_expr.span, adt.did());
+ let fresh_args = self.fresh_args_for_item(base_expr.span, adt.did());
// We do subtyping on the FRU fields first, so we can
// learn exactly what types we expect the base expr
// needs constrained to be compatible with the struct
@@ -1727,13 +1718,11 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> {
.fields
.iter()
.map(|f| {
- let fru_ty = self.normalize(
- expr_span,
- self.field_ty(base_expr.span, f, fresh_substs),
- );
+ let fru_ty = self
+ .normalize(expr_span, self.field_ty(base_expr.span, f, fresh_args));
let ident = self.tcx.adjust_ident(f.ident(self.tcx), variant.def_id);
if let Some(_) = remaining_fields.remove(&ident) {
- let target_ty = self.field_ty(base_expr.span, f, substs);
+ let target_ty = self.field_ty(base_expr.span, f, args);
let cause = self.misc(base_expr.span);
match self.at(&cause, self.param_env).sup(
DefineOpaqueTypes::No,
@@ -1760,7 +1749,7 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> {
self.resolve_vars_if_possible(fru_ty)
})
.collect();
- // The use of fresh substs that we have subtyped against
+ // The use of fresh args that we have subtyped against
// our base ADT type's fields allows us to guide inference
// along so that, e.g.
// ```
@@ -1778,7 +1767,7 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> {
// `MyStruct<'a, _, F2, C>`, as opposed to just `_`...
// This is important to allow coercions to happen in
// `other_struct` itself. See `coerce-in-base-expr.rs`.
- let fresh_base_ty = Ty::new_adt(self.tcx, *adt, fresh_substs);
+ let fresh_base_ty = Ty::new_adt(self.tcx, *adt, fresh_args);
self.check_expr_has_type_or_error(
base_expr,
self.resolve_vars_if_possible(fresh_base_ty),
@@ -1810,10 +1799,10 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> {
}
});
match adt_ty.kind() {
- ty::Adt(adt, substs) if adt.is_struct() => variant
+ ty::Adt(adt, args) if adt.is_struct() => variant
.fields
.iter()
- .map(|f| self.normalize(expr_span, f.ty(self.tcx, substs)))
+ .map(|f| self.normalize(expr_span, f.ty(self.tcx, args)))
.collect(),
_ => {
self.tcx
@@ -1841,7 +1830,7 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> {
remaining_fields,
variant,
ast_fields,
- substs,
+ args,
);
}
}
@@ -1878,7 +1867,7 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> {
remaining_fields: FxHashMap<Ident, (FieldIdx, &ty::FieldDef)>,
variant: &'tcx ty::VariantDef,
ast_fields: &'tcx [hir::ExprField<'tcx>],
- substs: SubstsRef<'tcx>,
+ args: GenericArgsRef<'tcx>,
) {
let len = remaining_fields.len();
@@ -1889,7 +1878,7 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> {
let mut truncated_fields_error = String::new();
let remaining_fields_names = match &displayable_field_names[..] {
- [field1] => format!("`{}`", field1),
+ [field1] => format!("`{field1}`"),
[field1, field2] => format!("`{field1}` and `{field2}`"),
[field1, field2, field3] => format!("`{field1}`, `{field2}` and `{field3}`"),
_ => {
@@ -1917,7 +1906,7 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> {
err.span_label(span, format!("missing {remaining_fields_names}{truncated_fields_error}"));
if let Some(last) = ast_fields.last() {
- self.suggest_fru_from_range(last, variant, substs, &mut err);
+ self.suggest_fru_from_range(last, variant, args, &mut err);
}
err.emit();
@@ -1929,7 +1918,7 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> {
&self,
last_expr_field: &hir::ExprField<'tcx>,
variant: &ty::VariantDef,
- substs: SubstsRef<'tcx>,
+ args: GenericArgsRef<'tcx>,
err: &mut Diagnostic,
) {
// I don't use 'is_range_literal' because only double-sided, half-open ranges count.
@@ -1942,7 +1931,7 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> {
variant.fields.iter().find(|field| field.ident(self.tcx) == last_expr_field.ident)
&& let range_def_id = self.tcx.lang_items().range_struct()
&& variant_field
- .and_then(|field| field.ty(self.tcx, substs).ty_adt_def())
+ .and_then(|field| field.ty(self.tcx, args).ty_adt_def())
.map(|adt| adt.did())
!= range_def_id
{
@@ -2116,16 +2105,12 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> {
);
}
_ => {
- err.span_label(variant_ident_span, format!("`{adt}` defined here", adt = ty));
+ err.span_label(variant_ident_span, format!("`{ty}` defined here"));
err.span_label(field.ident.span, "field does not exist");
err.span_suggestion_verbose(
expr_span,
- format!(
- "`{adt}` is a tuple {kind_name}, use the appropriate syntax",
- adt = ty,
- kind_name = kind_name,
- ),
- format!("{adt}(/* fields */)", adt = ty),
+ format!("`{ty}` is a tuple {kind_name}, use the appropriate syntax",),
+ format!("{ty}(/* fields */)"),
Applicability::HasPlaceholders,
);
}
@@ -2242,7 +2227,7 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> {
// dynamic limit, to never omit just one field
let limit = if names.len() == 6 { 6 } else { 5 };
let mut display =
- names.iter().take(limit).map(|n| format!("`{}`", n)).collect::<Vec<_>>().join(", ");
+ names.iter().take(limit).map(|n| format!("`{n}`")).collect::<Vec<_>>().join(", ");
if names.len() > limit {
display = format!("{} ... and {} others", display, names.len() - limit);
}
@@ -2265,7 +2250,7 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> {
while let Some((deref_base_ty, _)) = autoderef.next() {
debug!("deref_base_ty: {:?}", deref_base_ty);
match deref_base_ty.kind() {
- ty::Adt(base_def, substs) if !base_def.is_enum() => {
+ ty::Adt(base_def, args) if !base_def.is_enum() => {
debug!("struct named {:?}", deref_base_ty);
let body_hir_id = self.tcx.hir().local_def_id_to_hir_id(self.body_id);
let (ident, def_scope) =
@@ -2275,7 +2260,7 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> {
.iter_enumerated()
.find(|(_, f)| f.ident(self.tcx).normalize_to_macros_2_0() == ident)
{
- let field_ty = self.field_ty(expr.span, field, substs);
+ let field_ty = self.field_ty(expr.span, field, args);
// Save the index of all fields regardless of their visibility in case
// of error recovery.
self.write_field_index(expr.hir_id, index);
@@ -2416,7 +2401,9 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> {
base: &'tcx hir::Expr<'tcx>,
ty: Ty<'tcx>,
) {
- let Some(output_ty) = self.get_impl_future_output_ty(ty) else { return; };
+ let Some(output_ty) = self.get_impl_future_output_ty(ty) else {
+ return;
+ };
let mut add_label = true;
if let ty::Adt(def, _) = output_ty.kind() {
// no field access on enum type
@@ -2711,7 +2698,7 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> {
// try to add a suggestion in case the field is a nested field of a field of the Adt
let mod_id = self.tcx.parent_module(id).to_def_id();
- if let Some((fields, substs)) =
+ if let Some((fields, args)) =
self.get_field_candidates_considering_privacy(span, expr_t, mod_id)
{
let candidate_fields: Vec<_> = fields
@@ -2720,7 +2707,7 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> {
span,
&|candidate_field, _| candidate_field.ident(self.tcx()) == field,
candidate_field,
- substs,
+ args,
vec![],
mod_id,
)
@@ -2775,12 +2762,12 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> {
span: Span,
base_ty: Ty<'tcx>,
mod_id: DefId,
- ) -> Option<(impl Iterator<Item = &'tcx ty::FieldDef> + 'tcx, SubstsRef<'tcx>)> {
+ ) -> Option<(impl Iterator<Item = &'tcx ty::FieldDef> + 'tcx, GenericArgsRef<'tcx>)> {
debug!("get_field_candidates(span: {:?}, base_t: {:?}", span, base_ty);
for (base_t, _) in self.autoderef(span, base_ty) {
match base_t.kind() {
- ty::Adt(base_def, substs) if !base_def.is_enum() => {
+ ty::Adt(base_def, args) if !base_def.is_enum() => {
let tcx = self.tcx;
let fields = &base_def.non_enum_variant().fields;
// Some struct, e.g. some that impl `Deref`, have all private fields
@@ -2795,7 +2782,7 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> {
.filter(move |field| field.vis.is_accessible_from(mod_id, tcx))
// For compile-time reasons put a limit on number of fields we search
.take(100),
- substs,
+ args,
));
}
_ => {}
@@ -2811,7 +2798,7 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> {
span: Span,
matches: &impl Fn(&ty::FieldDef, Ty<'tcx>) -> bool,
candidate_field: &ty::FieldDef,
- subst: SubstsRef<'tcx>,
+ subst: GenericArgsRef<'tcx>,
mut field_path: Vec<Ident>,
mod_id: DefId,
) -> Option<Vec<Ident>> {
@@ -2855,6 +2842,7 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> {
base: &'tcx hir::Expr<'tcx>,
idx: &'tcx hir::Expr<'tcx>,
expr: &'tcx hir::Expr<'tcx>,
+ brackets_span: Span,
) -> Ty<'tcx> {
let base_t = self.check_expr(&base);
let idx_t = self.check_expr(&idx);
@@ -2888,7 +2876,7 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> {
let mut err = type_error_struct!(
self.tcx.sess,
- expr.span,
+ brackets_span,
base_t,
E0608,
"cannot index into a value of type `{base_t}`",
@@ -2902,16 +2890,14 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> {
&& let ast::LitKind::Int(i, ast::LitIntType::Unsuffixed) = lit.node
&& i < types.len().try_into().expect("expected tuple index to be < usize length")
{
- let snip = self.tcx.sess.source_map().span_to_snippet(base.span);
- if let Ok(snip) = snip {
- err.span_suggestion(
- expr.span,
- "to access tuple elements, use",
- format!("{snip}.{i}"),
- Applicability::MachineApplicable,
- );
- needs_note = false;
- }
+
+ err.span_suggestion(
+ brackets_span,
+ "to access tuple elements, use",
+ format!(".{i}"),
+ Applicability::MachineApplicable,
+ );
+ needs_note = false;
} else if let ExprKind::Path(..) = idx.peel_borrows().kind {
err.span_label(idx.span, "cannot access tuple elements at a variable index");
}
@@ -2970,9 +2956,9 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> {
self.commit_if_ok(|_| {
let ocx = ObligationCtxt::new(self);
- let impl_substs = self.fresh_substs_for_item(base_expr.span, impl_def_id);
+ let impl_args = self.fresh_args_for_item(base_expr.span, impl_def_id);
let impl_trait_ref =
- self.tcx.impl_trait_ref(impl_def_id).unwrap().subst(self.tcx, impl_substs);
+ self.tcx.impl_trait_ref(impl_def_id).unwrap().instantiate(self.tcx, impl_args);
let cause = self.misc(base_expr.span);
// Match the impl self type against the base ty. If this fails,
@@ -2989,7 +2975,6 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> {
ty::Binder::dummy(ty::TraitPredicate {
trait_ref: impl_trait_ref,
polarity: ty::ImplPolarity::Positive,
- constness: ty::BoundConstness::NotConst,
}),
|derived| {
traits::ImplDerivedObligation(Box::new(
@@ -3004,7 +2989,7 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> {
)
},
self.param_env,
- self.tcx.predicates_of(impl_def_id).instantiate(self.tcx, impl_substs),
+ self.tcx.predicates_of(impl_def_id).instantiate(self.tcx, impl_args),
));
// Normalize the output type, which we can use later on as the
@@ -3012,7 +2997,7 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> {
let element_ty = ocx.normalize(
&cause,
self.param_env,
- Ty::new_projection(self.tcx, index_trait_output_def_id, impl_trait_ref.substs),
+ Ty::new_projection(self.tcx, index_trait_output_def_id, impl_trait_ref.args),
);
let errors = ocx.select_where_possible();
@@ -3020,7 +3005,7 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> {
// will still delay a span bug in `report_fulfillment_errors`.
Ok::<_, NoSolution>((
self.err_ctxt().report_fulfillment_errors(&errors),
- impl_trait_ref.substs.type_at(1),
+ impl_trait_ref.args.type_at(1),
element_ty,
))
})
@@ -3152,7 +3137,7 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> {
let container = self.structurally_resolve_type(expr.span, current_container);
match container.kind() {
- ty::Adt(container_def, substs) if !container_def.is_enum() => {
+ ty::Adt(container_def, args) if !container_def.is_enum() => {
let block = self.tcx.hir().local_def_id_to_hir_id(self.body_id);
let (ident, def_scope) =
self.tcx.adjust_ident_and_get_scope(field, container_def.did(), block);
@@ -3162,7 +3147,7 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> {
.iter_enumerated()
.find(|(_, f)| f.ident(self.tcx).normalize_to_macros_2_0() == ident)
{
- let field_ty = self.field_ty(expr.span, field, substs);
+ let field_ty = self.field_ty(expr.span, field, args);
// FIXME: DSTs with static alignment should be allowed
self.require_type_is_sized(field_ty, expr.span, traits::MiscObligation);
diff --git a/compiler/rustc_hir_typeck/src/expr_use_visitor.rs b/compiler/rustc_hir_typeck/src/expr_use_visitor.rs
index 0d2e0602e..840910732 100644
--- a/compiler/rustc_hir_typeck/src/expr_use_visitor.rs
+++ b/compiler/rustc_hir_typeck/src/expr_use_visitor.rs
@@ -211,7 +211,7 @@ impl<'a, 'tcx> ExprUseVisitor<'a, 'tcx> {
self.select_from_expr(base);
}
- hir::ExprKind::Index(lhs, rhs) => {
+ hir::ExprKind::Index(lhs, rhs, _) => {
// lhs[rhs]
self.select_from_expr(lhs);
self.consume_expr(rhs);
@@ -549,7 +549,7 @@ impl<'a, 'tcx> ExprUseVisitor<'a, 'tcx> {
// Select just those fields of the `with`
// expression that will actually be used
match with_place.place.ty().kind() {
- ty::Adt(adt, substs) if adt.is_struct() => {
+ ty::Adt(adt, args) if adt.is_struct() => {
// Consume those fields of the with expression that are needed.
for (f_index, with_field) in adt.non_enum_variant().fields.iter_enumerated() {
let is_mentioned = fields
@@ -559,7 +559,7 @@ impl<'a, 'tcx> ExprUseVisitor<'a, 'tcx> {
let field_place = self.mc.cat_projection(
&*with_expr,
with_place.clone(),
- with_field.ty(self.tcx(), substs),
+ with_field.ty(self.tcx(), args),
ProjectionKind::Field(f_index, FIRST_VARIANT),
);
self.delegate_consume(&field_place, field_place.hir_id);
diff --git a/compiler/rustc_hir_typeck/src/fallback.rs b/compiler/rustc_hir_typeck/src/fallback.rs
index a76db6e73..5b5986a34 100644
--- a/compiler/rustc_hir_typeck/src/fallback.rs
+++ b/compiler/rustc_hir_typeck/src/fallback.rs
@@ -1,8 +1,8 @@
use crate::FnCtxt;
use rustc_data_structures::{
- fx::{FxHashMap, FxHashSet},
graph::WithSuccessors,
graph::{iterate::DepthFirstSearch, vec_graph::VecGraph},
+ unord::{UnordBag, UnordMap, UnordSet},
};
use rustc_middle::ty::{self, Ty};
@@ -83,7 +83,7 @@ impl<'tcx> FnCtxt<'_, 'tcx> {
fn fallback_if_possible(
&self,
ty: Ty<'tcx>,
- diverging_fallback: &FxHashMap<Ty<'tcx>, Ty<'tcx>>,
+ diverging_fallback: &UnordMap<Ty<'tcx>, Ty<'tcx>>,
) {
// Careful: we do NOT shallow-resolve `ty`. We know that `ty`
// is an unsolved variable, and we determine its fallback
@@ -193,7 +193,7 @@ impl<'tcx> FnCtxt<'_, 'tcx> {
fn calculate_diverging_fallback(
&self,
unsolved_variables: &[Ty<'tcx>],
- ) -> FxHashMap<Ty<'tcx>, Ty<'tcx>> {
+ ) -> UnordMap<Ty<'tcx>, Ty<'tcx>> {
debug!("calculate_diverging_fallback({:?})", unsolved_variables);
// Construct a coercion graph where an edge `A -> B` indicates
@@ -210,10 +210,10 @@ impl<'tcx> FnCtxt<'_, 'tcx> {
//
// These variables are the ones that are targets for fallback to
// either `!` or `()`.
- let diverging_roots: FxHashSet<ty::TyVid> = self
+ let diverging_roots: UnordSet<ty::TyVid> = self
.diverging_type_vars
.borrow()
- .iter()
+ .items()
.map(|&ty| self.shallow_resolve(ty))
.filter_map(|ty| ty.ty_vid())
.map(|vid| self.root_var(vid))
@@ -284,8 +284,7 @@ impl<'tcx> FnCtxt<'_, 'tcx> {
// For each diverging variable, figure out whether it can
// reach a member of N. If so, it falls back to `()`. Else
// `!`.
- let mut diverging_fallback = FxHashMap::default();
- diverging_fallback.reserve(diverging_vids.len());
+ let mut diverging_fallback = UnordMap::with_capacity(diverging_vids.len());
for &diverging_vid in &diverging_vids {
let diverging_ty = Ty::new_var(self.tcx, diverging_vid);
let root_vid = self.root_var(diverging_vid);
@@ -293,14 +292,19 @@ impl<'tcx> FnCtxt<'_, 'tcx> {
.depth_first_search(root_vid)
.any(|n| roots_reachable_from_non_diverging.visited(n));
- let mut found_infer_var_info = ty::InferVarInfo { self_in_trait: false, output: false };
+ let infer_var_infos: UnordBag<_> = self
+ .inh
+ .infer_var_info
+ .borrow()
+ .items()
+ .filter(|&(vid, _)| self.infcx.root_var(*vid) == root_vid)
+ .map(|(_, info)| *info)
+ .collect();
- for (vid, info) in self.inh.infer_var_info.borrow().iter() {
- if self.infcx.root_var(*vid) == root_vid {
- found_infer_var_info.self_in_trait |= info.self_in_trait;
- found_infer_var_info.output |= info.output;
- }
- }
+ let found_infer_var_info = ty::InferVarInfo {
+ self_in_trait: infer_var_infos.items().any(|info| info.self_in_trait),
+ output: infer_var_infos.items().any(|info| info.output),
+ };
if found_infer_var_info.self_in_trait && found_infer_var_info.output {
// This case falls back to () to ensure that the code pattern in
diff --git a/compiler/rustc_hir_typeck/src/fn_ctxt/_impl.rs b/compiler/rustc_hir_typeck/src/fn_ctxt/_impl.rs
index 9a80a9c93..28fe2e062 100644
--- a/compiler/rustc_hir_typeck/src/fn_ctxt/_impl.rs
+++ b/compiler/rustc_hir_typeck/src/fn_ctxt/_impl.rs
@@ -12,7 +12,7 @@ use rustc_hir::def_id::DefId;
use rustc_hir::lang_items::LangItem;
use rustc_hir::{ExprKind, GenericArg, Node, QPath};
use rustc_hir_analysis::astconv::generics::{
- check_generic_arg_count_for_call, create_substs_for_generic_args,
+ check_generic_arg_count_for_call, create_args_for_parent_generic_args,
};
use rustc_hir_analysis::astconv::{
AstConv, CreateSubstsForGenericArgsCtxt, ExplicitLateBound, GenericArgCountMismatch,
@@ -28,7 +28,7 @@ use rustc_middle::ty::visit::{TypeVisitable, TypeVisitableExt};
use rustc_middle::ty::{
self, AdtKind, CanonicalUserType, GenericParamDefKind, Ty, TyCtxt, UserType,
};
-use rustc_middle::ty::{GenericArgKind, SubstsRef, UserSelfTy, UserSubsts};
+use rustc_middle::ty::{GenericArgKind, GenericArgsRef, UserArgs, UserSelfTy};
use rustc_session::lint;
use rustc_span::def_id::LocalDefId;
use rustc_span::hygiene::DesugaringKind;
@@ -61,7 +61,7 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> {
debug!("warn_if_unreachable: id={:?} span={:?} kind={}", id, span, kind);
- let msg = format!("unreachable {}", kind);
+ let msg = format!("unreachable {kind}");
self.tcx().struct_span_lint_hir(
lint::builtin::UNREACHABLE_CODE,
id,
@@ -85,16 +85,8 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> {
/// to get more type information.
// FIXME(-Ztrait-solver=next): A lot of the calls to this method should
// probably be `try_structurally_resolve_type` or `structurally_resolve_type` instead.
- pub(in super::super) fn resolve_vars_with_obligations(&self, ty: Ty<'tcx>) -> Ty<'tcx> {
- self.resolve_vars_with_obligations_and_mutate_fulfillment(ty, |_| {})
- }
-
- #[instrument(skip(self, mutate_fulfillment_errors), level = "debug", ret)]
- pub(in super::super) fn resolve_vars_with_obligations_and_mutate_fulfillment(
- &self,
- mut ty: Ty<'tcx>,
- mutate_fulfillment_errors: impl Fn(&mut Vec<traits::FulfillmentError<'tcx>>),
- ) -> Ty<'tcx> {
+ #[instrument(skip(self), level = "debug", ret)]
+ pub(in super::super) fn resolve_vars_with_obligations(&self, mut ty: Ty<'tcx>) -> Ty<'tcx> {
// No Infer()? Nothing needs doing.
if !ty.has_non_region_infer() {
debug!("no inference var, nothing needs doing");
@@ -112,7 +104,7 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> {
// possible. This can help substantially when there are
// indirect dependencies that don't seem worth tracking
// precisely.
- self.select_obligations_where_possible(mutate_fulfillment_errors);
+ self.select_obligations_where_possible(|_| {});
self.resolve_vars_if_possible(ty)
}
@@ -134,7 +126,7 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> {
}
pub fn tag(&self) -> String {
- format!("{:p}", self)
+ format!("{self:p}")
}
pub fn local_ty(&self, span: Span, nid: hir::HirId) -> Ty<'tcx> {
@@ -169,18 +161,18 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> {
#[instrument(level = "debug", skip(self))]
pub fn write_method_call(&self, hir_id: hir::HirId, method: MethodCallee<'tcx>) {
self.write_resolution(hir_id, Ok((DefKind::AssocFn, method.def_id)));
- self.write_substs(hir_id, method.substs);
+ self.write_args(hir_id, method.args);
}
- pub fn write_substs(&self, node_id: hir::HirId, substs: SubstsRef<'tcx>) {
- if !substs.is_empty() {
- debug!("write_substs({:?}, {:?}) in fcx {}", node_id, substs, self.tag());
+ pub fn write_args(&self, node_id: hir::HirId, args: GenericArgsRef<'tcx>) {
+ if !args.is_empty() {
+ debug!("write_args({:?}, {:?}) in fcx {}", node_id, args, self.tag());
- self.typeck_results.borrow_mut().node_substs_mut().insert(node_id, substs);
+ self.typeck_results.borrow_mut().node_args_mut().insert(node_id, args);
}
}
- /// Given the substs that we just converted from the HIR, try to
+ /// Given the args that we just converted from the HIR, try to
/// canonicalize them and store them as user-given substitutions
/// (i.e., substitutions that must be respected by the NLL check).
///
@@ -188,19 +180,19 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> {
/// occurred**, so that annotations like `Vec<_>` are preserved
/// properly.
#[instrument(skip(self), level = "debug")]
- pub fn write_user_type_annotation_from_substs(
+ pub fn write_user_type_annotation_from_args(
&self,
hir_id: hir::HirId,
def_id: DefId,
- substs: SubstsRef<'tcx>,
+ args: GenericArgsRef<'tcx>,
user_self_ty: Option<UserSelfTy<'tcx>>,
) {
debug!("fcx {}", self.tag());
- if Self::can_contain_user_lifetime_bounds((substs, user_self_ty)) {
+ if Self::can_contain_user_lifetime_bounds((args, user_self_ty)) {
let canonicalized = self.canonicalize_user_type_annotation(UserType::TypeOf(
def_id,
- UserSubsts { substs, user_self_ty },
+ UserArgs { args, user_self_ty },
));
debug!(?canonicalized);
self.write_user_type_annotation(hir_id, canonicalized);
@@ -221,7 +213,7 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> {
.user_provided_types_mut()
.insert(hir_id, canonical_user_type_annotation);
} else {
- debug!("skipping identity substs");
+ debug!("skipping identity args");
}
}
@@ -306,12 +298,12 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> {
&self,
span: Span,
def_id: DefId,
- substs: SubstsRef<'tcx>,
+ args: GenericArgsRef<'tcx>,
) -> ty::InstantiatedPredicates<'tcx> {
let bounds = self.tcx.predicates_of(def_id);
- let result = bounds.instantiate(self.tcx, substs);
+ let result = bounds.instantiate(self.tcx, args);
let result = self.normalize(span, result);
- debug!("instantiate_bounds(bounds={:?}, substs={:?}) = {:?}", bounds, substs, result);
+ debug!("instantiate_bounds(bounds={:?}, args={:?}) = {:?}", bounds, args, result);
result
}
@@ -397,11 +389,11 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> {
ty.normalized
}
- pub(super) fn user_substs_for_adt(ty: RawTy<'tcx>) -> UserSubsts<'tcx> {
+ pub(super) fn user_args_for_adt(ty: RawTy<'tcx>) -> UserArgs<'tcx> {
match (ty.raw.kind(), ty.normalized.kind()) {
- (ty::Adt(_, substs), _) => UserSubsts { substs, user_self_ty: None },
- (_, ty::Adt(adt, substs)) => UserSubsts {
- substs,
+ (ty::Adt(_, args), _) => UserArgs { args, user_self_ty: None },
+ (_, ty::Adt(adt, args)) => UserArgs {
+ args,
user_self_ty: Some(UserSelfTy { impl_def_id: adt.did(), self_ty: ty.raw }),
},
_ => bug!("non-adt type {:?}", ty),
@@ -489,9 +481,9 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> {
));
}
- /// Registers obligations that all `substs` are well-formed.
- pub fn add_wf_bounds(&self, substs: SubstsRef<'tcx>, expr: &hir::Expr<'_>) {
- for arg in substs.iter().filter(|arg| {
+ /// Registers obligations that all `args` are well-formed.
+ pub fn add_wf_bounds(&self, args: GenericArgsRef<'tcx>, expr: &hir::Expr<'_>) {
+ for arg in args.iter().filter(|arg| {
matches!(arg.unpack(), GenericArgKind::Type(..) | GenericArgKind::Const(..))
}) {
self.register_wf_obligation(arg, expr.span, traits::WellFormed(None));
@@ -505,9 +497,9 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> {
&self,
span: Span,
field: &'tcx ty::FieldDef,
- substs: SubstsRef<'tcx>,
+ args: GenericArgsRef<'tcx>,
) -> Ty<'tcx> {
- self.normalize(span, field.ty(self.tcx, substs))
+ self.normalize(span, field.ty(self.tcx, args))
}
pub(in super::super) fn resolve_rvalue_scopes(&self, def_id: DefId) {
@@ -554,11 +546,11 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> {
debug!(?expr_def_id);
// Create the `GeneratorWitness` type that we will unify with `interior`.
- let substs = ty::InternalSubsts::identity_for_item(
+ let args = ty::GenericArgs::identity_for_item(
self.tcx,
self.tcx.typeck_root_def_id(expr_def_id.to_def_id()),
);
- let witness = Ty::new_generator_witness_mir(self.tcx, expr_def_id.to_def_id(), substs);
+ let witness = Ty::new_generator_witness_mir(self.tcx, expr_def_id.to_def_id(), args);
// Unify `interior` with `witness` and collect all the resulting obligations.
let span = self.tcx.hir().body(body_id).value.span;
@@ -626,8 +618,6 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> {
match *self_ty.kind() {
ty::Infer(ty::TyVar(found_vid)) => {
- // FIXME: consider using `sub_root_var` here so we
- // can see through subtyping.
let found_vid = self.root_var(found_vid);
debug!("self_type_matches_expected_vid - found_vid={:?}", found_vid);
expected_vid == found_vid
@@ -642,8 +632,6 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> {
self_ty: ty::TyVid,
) -> impl DoubleEndedIterator<Item = traits::PredicateObligation<'tcx>> + Captures<'tcx> + 'b
{
- // FIXME: consider using `sub_root_var` here so we
- // can see through subtyping.
let ty_var_root = self.root_var(self_ty);
trace!("pending_obligations = {:#?}", self.fulfillment_cx.borrow().pending_obligations());
@@ -737,7 +725,7 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> {
// tests/ui/impl-trait/hidden-type-is-opaque-2.rs for examples that hit this path.
if formal_ret.has_infer_types() {
for ty in ret_ty.walk() {
- if let ty::subst::GenericArgKind::Type(ty) = ty.unpack()
+ if let ty::GenericArgKind::Type(ty) = ty.unpack()
&& let ty::Alias(ty::Opaque, ty::AliasTy { def_id, .. }) = *ty.kind()
&& let Some(def_id) = def_id.as_local()
&& self.opaque_type_origin(def_id).is_some() {
@@ -784,8 +772,8 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> {
} else {
self.tcx.type_of(def_id)
};
- let substs = self.fresh_substs_for_item(span, def_id);
- let ty = item_ty.subst(self.tcx, substs);
+ let args = self.fresh_args_for_item(span, def_id);
+ let ty = item_ty.instantiate(self.tcx, args);
self.write_resolution(hir_id, Ok((def_kind, def_id)));
@@ -802,9 +790,9 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> {
_ => None,
};
if let Some(code) = code {
- self.add_required_obligations_with_code(span, def_id, substs, move |_, _| code.clone());
+ self.add_required_obligations_with_code(span, def_id, args, move |_, _| code.clone());
} else {
- self.add_required_obligations_for_hir(span, def_id, substs, hir_id);
+ self.add_required_obligations_for_hir(span, def_id, args, hir_id);
}
(Res::Def(def_kind, def_id), ty)
@@ -860,7 +848,7 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> {
.resolve_fully_qualified_call(span, item_name, ty.normalized, qself.span, hir_id)
.and_then(|r| {
// lint bare trait if the method is found in the trait
- if span.edition().rust_2021() && let Some(mut diag) = self.tcx.sess.diagnostic().steal_diagnostic(qself.span, StashKey::TraitMissingMethod) {
+ if span.edition().at_least_rust_2021() && let Some(mut diag) = self.tcx.sess.diagnostic().steal_diagnostic(qself.span, StashKey::TraitMissingMethod) {
diag.emit();
}
Ok(r)
@@ -890,7 +878,7 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> {
}
// emit or cancel the diagnostic for bare traits
- if span.edition().rust_2021() && let Some(mut diag) = self.tcx.sess.diagnostic().steal_diagnostic(qself.span, StashKey::TraitMissingMethod) {
+ if span.edition().at_least_rust_2021() && let Some(mut diag) = self.tcx.sess.diagnostic().steal_diagnostic(qself.span, StashKey::TraitMissingMethod) {
if trait_missing_method {
// cancel the diag for bare traits when meeting `MyTrait::missing_method`
diag.cancel();
@@ -908,7 +896,7 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> {
error,
None,
Expectation::NoExpectation,
- trait_missing_method && span.edition().rust_2021(), // emits missing method for trait only after edition 2021
+ trait_missing_method && span.edition().at_least_rust_2021(), // emits missing method for trait only after edition 2021
) {
e.emit();
}
@@ -1069,7 +1057,7 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> {
} else if let ExprKind::MethodCall(..) = rcvr.kind {
err.span_note(
sp,
- modifies_rcvr_note.clone() + ", it is not meant to be used in method chains.",
+ modifies_rcvr_note + ", it is not meant to be used in method chains.",
);
} else {
err.span_note(sp, modifies_rcvr_note);
@@ -1205,8 +1193,9 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> {
let has_self =
path_segs.last().is_some_and(|PathSeg(def_id, _)| tcx.generics_of(*def_id).has_self);
- let (res, self_ctor_substs) = if let Res::SelfCtor(impl_def_id) = res {
- let ty = self.handle_raw_ty(span, tcx.at(span).type_of(impl_def_id).subst_identity());
+ let (res, self_ctor_args) = if let Res::SelfCtor(impl_def_id) = res {
+ let ty =
+ self.handle_raw_ty(span, tcx.at(span).type_of(impl_def_id).instantiate_identity());
match ty.normalized.ty_adt_def() {
Some(adt_def) if adt_def.has_ctor() => {
let (ctor_kind, ctor_def_id) = adt_def.non_enum_variant().ctor.unwrap();
@@ -1217,9 +1206,9 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> {
.emit_err(CtorIsPrivate { span, def: tcx.def_path_str(adt_def.did()) });
}
let new_res = Res::Def(DefKind::Ctor(CtorOf::Struct, ctor_kind), ctor_def_id);
- let user_substs = Self::user_substs_for_adt(ty);
- user_self_ty = user_substs.user_self_ty;
- (new_res, Some(user_substs.substs))
+ let user_args = Self::user_args_for_adt(ty);
+ user_self_ty = user_args.user_self_ty;
+ (new_res, Some(user_args.args))
}
_ => {
let mut err = tcx.sess.struct_span_err(
@@ -1324,7 +1313,7 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> {
fn inferred_kind(
&mut self,
- substs: Option<&[ty::GenericArg<'tcx>]>,
+ args: Option<&[ty::GenericArg<'tcx>]>,
param: &ty::GenericParamDef,
infer_args: bool,
) -> ty::GenericArg<'tcx> {
@@ -1338,7 +1327,7 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> {
// If we have a default, then we it doesn't matter that we're not
// inferring the type arguments: we provide the default where any
// is missing.
- tcx.type_of(param.def_id).subst(tcx, substs.unwrap()).into()
+ tcx.type_of(param.def_id).instantiate(tcx, args.unwrap()).into()
} else {
// If no type arguments were provided, we have to infer them.
// This case also occurs as a result of some malformed input, e.g.
@@ -1348,8 +1337,13 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> {
}
}
GenericParamDefKind::Const { has_default } => {
- if !infer_args && has_default {
- tcx.const_param_default(param.def_id).subst(tcx, substs.unwrap()).into()
+ if !infer_args
+ && has_default
+ && !tcx.has_attr(param.def_id, sym::rustc_host)
+ {
+ tcx.const_param_default(param.def_id)
+ .instantiate(tcx, args.unwrap())
+ .into()
} else {
self.fcx.var_for_def(self.span, param)
}
@@ -1358,8 +1352,8 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> {
}
}
- let substs_raw = self_ctor_substs.unwrap_or_else(|| {
- create_substs_for_generic_args(
+ let args_raw = self_ctor_args.unwrap_or_else(|| {
+ create_args_for_parent_generic_args(
tcx,
def_id,
&[],
@@ -1376,20 +1370,20 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> {
)
});
- // First, store the "user substs" for later.
- self.write_user_type_annotation_from_substs(hir_id, def_id, substs_raw, user_self_ty);
+ // First, store the "user args" for later.
+ self.write_user_type_annotation_from_args(hir_id, def_id, args_raw, user_self_ty);
// Normalize only after registering type annotations.
- let substs = self.normalize(span, substs_raw);
+ let args = self.normalize(span, args_raw);
- self.add_required_obligations_for_hir(span, def_id, &substs, hir_id);
+ self.add_required_obligations_for_hir(span, def_id, &args, hir_id);
// Substitute the values for the type parameters into the type of
// the referenced item.
let ty = tcx.type_of(def_id);
- assert!(!substs.has_escaping_bound_vars());
+ assert!(!args.has_escaping_bound_vars());
assert!(!ty.skip_binder().has_escaping_bound_vars());
- let ty_substituted = self.normalize(span, ty.subst(tcx, substs));
+ let ty_substituted = self.normalize(span, ty.instantiate(tcx, args));
if let Some(UserSelfTy { impl_def_id, self_ty }) = user_self_ty {
// In the case of `Foo<T>::method` and `<Foo<T>>::method`, if `method`
@@ -1397,7 +1391,7 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> {
// type parameters, which we can infer by unifying the provided `Self`
// with the substituted impl type.
// This also occurs for an enum variant on a type alias.
- let impl_ty = self.normalize(span, tcx.type_of(impl_def_id).subst(tcx, substs));
+ let impl_ty = self.normalize(span, tcx.type_of(impl_def_id).instantiate(tcx, args));
let self_ty = self.normalize(span, self_ty);
match self.at(&self.misc(span), self.param_env).eq(
DefineOpaqueTypes::No,
@@ -1409,9 +1403,7 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> {
self.tcx.sess.delay_span_bug(
span,
format!(
- "instantiate_value_path: (UFCS) {:?} was a subtype of {:?} but now is not?",
- self_ty,
- impl_ty,
+ "instantiate_value_path: (UFCS) {self_ty:?} was a subtype of {impl_ty:?} but now is not?",
),
);
}
@@ -1419,7 +1411,7 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> {
}
debug!("instantiate_value_path: type of {:?} is {:?}", hir_id, ty_substituted);
- self.write_substs(hir_id, substs);
+ self.write_args(hir_id, args);
(ty_substituted, res)
}
@@ -1429,10 +1421,10 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> {
&self,
span: Span,
def_id: DefId,
- substs: SubstsRef<'tcx>,
+ args: GenericArgsRef<'tcx>,
hir_id: hir::HirId,
) {
- self.add_required_obligations_with_code(span, def_id, substs, |idx, span| {
+ self.add_required_obligations_with_code(span, def_id, args, |idx, span| {
if span.is_dummy() {
ObligationCauseCode::ExprItemObligation(def_id, hir_id, idx)
} else {
@@ -1441,17 +1433,17 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> {
})
}
- #[instrument(level = "debug", skip(self, code, span, substs))]
+ #[instrument(level = "debug", skip(self, code, span, args))]
fn add_required_obligations_with_code(
&self,
span: Span,
def_id: DefId,
- substs: SubstsRef<'tcx>,
+ args: GenericArgsRef<'tcx>,
code: impl Fn(usize, Span) -> ObligationCauseCode<'tcx>,
) {
let param_env = self.param_env;
- let bounds = self.instantiate_bounds(span, def_id, &substs);
+ let bounds = self.instantiate_bounds(span, def_id, &args);
for obligation in traits::predicates_for_generics(
|idx, predicate_span| {
@@ -1460,10 +1452,7 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> {
param_env,
bounds,
) {
- // N.B. We are remapping all predicates to non-const since we don't know if we just
- // want them as function pointers or we are calling them from a const-context. The
- // actual checking will occur in `rustc_const_eval::transform::check_consts`.
- self.register_predicate(obligation.without_const(self.tcx));
+ self.register_predicate(obligation);
}
}
@@ -1476,7 +1465,7 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> {
let ty = self.resolve_vars_with_obligations(ty);
if self.next_trait_solver()
- && let ty::Alias(ty::Projection, _) = ty.kind()
+ && let ty::Alias(ty::Projection | ty::Inherent | ty::Weak, _) = ty.kind()
{
match self
.at(&self.misc(sp), self.param_env)
diff --git a/compiler/rustc_hir_typeck/src/fn_ctxt/adjust_fulfillment_errors.rs b/compiler/rustc_hir_typeck/src/fn_ctxt/adjust_fulfillment_errors.rs
index ed9bb4945..c44d12e61 100644
--- a/compiler/rustc_hir_typeck/src/fn_ctxt/adjust_fulfillment_errors.rs
+++ b/compiler/rustc_hir_typeck/src/fn_ctxt/adjust_fulfillment_errors.rs
@@ -2,7 +2,7 @@ use crate::FnCtxt;
use rustc_hir as hir;
use rustc_hir::def::Res;
use rustc_hir::def_id::DefId;
-use rustc_infer::traits::ObligationCauseCode;
+use rustc_infer::{infer::type_variable::TypeVariableOriginKind, traits::ObligationCauseCode};
use rustc_middle::ty::{self, Ty, TyCtxt, TypeSuperVisitable, TypeVisitable, TypeVisitor};
use rustc_span::{self, symbol::kw, Span};
use rustc_trait_selection::traits;
@@ -14,19 +14,32 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> {
&self,
error: &mut traits::FulfillmentError<'tcx>,
) -> bool {
- let (traits::ExprItemObligation(def_id, hir_id, idx) | traits::ExprBindingObligation(def_id, _, hir_id, idx))
- = *error.obligation.cause.code().peel_derives() else { return false; };
+ let (traits::ExprItemObligation(def_id, hir_id, idx)
+ | traits::ExprBindingObligation(def_id, _, hir_id, idx)) =
+ *error.obligation.cause.code().peel_derives()
+ else {
+ return false;
+ };
let hir = self.tcx.hir();
- let hir::Node::Expr(expr) = hir.get(hir_id) else { return false; };
+ let hir::Node::Expr(expr) = hir.get(hir_id) else {
+ return false;
+ };
- let Some(unsubstituted_pred) =
- self.tcx.predicates_of(def_id).instantiate_identity(self.tcx).predicates.into_iter().nth(idx)
- else { return false; };
+ let Some(unsubstituted_pred) = self
+ .tcx
+ .predicates_of(def_id)
+ .instantiate_identity(self.tcx)
+ .predicates
+ .into_iter()
+ .nth(idx)
+ else {
+ return false;
+ };
let generics = self.tcx.generics_of(def_id);
- let predicate_substs = match unsubstituted_pred.kind().skip_binder() {
- ty::ClauseKind::Trait(pred) => pred.trait_ref.substs.to_vec(),
- ty::ClauseKind::Projection(pred) => pred.projection_ty.substs.to_vec(),
+ let predicate_args = match unsubstituted_pred.kind().skip_binder() {
+ ty::ClauseKind::Trait(pred) => pred.trait_ref.args.to_vec(),
+ ty::ClauseKind::Projection(pred) => pred.projection_ty.args.to_vec(),
ty::ClauseKind::ConstArgHasType(arg, ty) => {
vec![ty.into(), arg.into()]
}
@@ -35,7 +48,7 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> {
};
let find_param_matching = |matches: &dyn Fn(ty::ParamTerm) -> bool| {
- predicate_substs.iter().find_map(|arg| {
+ predicate_args.iter().find_map(|arg| {
arg.walk().find_map(|arg| {
if let ty::GenericArgKind::Type(ty) = arg.unpack()
&& let ty::Param(param_ty) = *ty.kind()
@@ -225,18 +238,18 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> {
param_to_point_at: ty::GenericArg<'tcx>,
segment: &hir::PathSegment<'tcx>,
) -> bool {
- let own_substs = self
+ let own_args = self
.tcx
.generics_of(def_id)
- .own_substs(ty::InternalSubsts::identity_for_item(self.tcx, def_id));
- let Some((index, _)) = own_substs
- .iter()
- .enumerate()
- .find(|(_, arg)| **arg == param_to_point_at) else { return false };
- let Some(arg) = segment
- .args()
- .args
- .get(index) else { return false; };
+ .own_args(ty::GenericArgs::identity_for_item(self.tcx, def_id));
+ let Some((index, _)) =
+ own_args.iter().enumerate().find(|(_, arg)| **arg == param_to_point_at)
+ else {
+ return false;
+ };
+ let Some(arg) = segment.args().args.get(index) else {
+ return false;
+ };
error.obligation.cause.span = arg
.span()
.find_ancestor_in_same_ctxt(error.obligation.cause.span)
@@ -254,11 +267,10 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> {
type BreakTy = ty::GenericArg<'tcx>;
fn visit_ty(&mut self, ty: Ty<'tcx>) -> std::ops::ControlFlow<Self::BreakTy> {
if let Some(origin) = self.0.type_var_origin(ty)
- && let rustc_infer::infer::type_variable::TypeVariableOriginKind::TypeParameterDefinition(_, def_id) =
- origin.kind
+ && let TypeVariableOriginKind::TypeParameterDefinition(_, def_id) = origin.kind
&& let generics = self.0.tcx.generics_of(self.1)
&& let Some(index) = generics.param_def_id_to_index(self.0.tcx, def_id)
- && let Some(subst) = ty::InternalSubsts::identity_for_item(self.0.tcx, self.1)
+ && let Some(subst) = ty::GenericArgs::identity_for_item(self.0.tcx, self.1)
.get(index as usize)
{
ControlFlow::Break(*subst)
@@ -298,13 +310,13 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> {
) -> Option<(&'tcx hir::Expr<'tcx>, Ty<'tcx>)> {
let def = self.tcx.adt_def(def_id);
- let identity_substs = ty::InternalSubsts::identity_for_item(self.tcx, def_id);
+ let identity_args = ty::GenericArgs::identity_for_item(self.tcx, def_id);
let fields_referencing_param: Vec<_> = def
.variant_with_id(variant_def_id)
.fields
.iter()
.filter(|field| {
- let field_ty = field.ty(self.tcx, identity_substs);
+ let field_ty = field.ty(self.tcx, identity_args);
find_param_in_ty(field_ty.into(), param_to_point_at)
})
.collect();
@@ -315,7 +327,10 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> {
// same rules that check_expr_struct uses for macro hygiene.
if self.tcx.adjust_ident(expr_field.ident, variant_def_id) == field.ident(self.tcx)
{
- return Some((expr_field.expr, self.tcx.type_of(field.did).subst_identity()));
+ return Some((
+ expr_field.expr,
+ self.tcx.type_of(field.did).instantiate_identity(),
+ ));
}
}
}
@@ -342,7 +357,7 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> {
receiver: Option<&'tcx hir::Expr<'tcx>>,
args: &'tcx [hir::Expr<'tcx>],
) -> bool {
- let ty = self.tcx.type_of(def_id).subst_identity();
+ let ty = self.tcx.type_of(def_id).instantiate_identity();
if !ty.is_fn() {
return false;
}
@@ -484,7 +499,7 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> {
ty::TraitRef::new(
self.tcx,
obligation.impl_or_alias_def_id,
- ty::InternalSubsts::identity_for_item(self.tcx, obligation.impl_or_alias_def_id),
+ ty::GenericArgs::identity_for_item(self.tcx, obligation.impl_or_alias_def_id),
)
} else {
self.tcx
@@ -573,9 +588,11 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> {
// Find out which of `in_ty_elements` refer to `param`.
// FIXME: It may be better to take the first if there are multiple,
// just so that the error points to a smaller expression.
- let Some((drill_expr, drill_ty)) = is_iterator_singleton(expr_elements.iter().zip( in_ty_elements.iter()).filter(|(_expr_elem, in_ty_elem)| {
- find_param_in_ty((*in_ty_elem).into(), param)
- })) else {
+ let Some((drill_expr, drill_ty)) =
+ is_iterator_singleton(expr_elements.iter().zip(in_ty_elements.iter()).filter(
+ |(_expr_elem, in_ty_elem)| find_param_in_ty((*in_ty_elem).into(), param),
+ ))
+ else {
// The param is not mentioned, or it is mentioned in multiple indexes.
return Err(expr);
};
@@ -594,7 +611,9 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> {
{
// First, confirm that this struct is the same one as in the types, and if so,
// find the right variant.
- let Res::Def(expr_struct_def_kind, expr_struct_def_id) = self.typeck_results.borrow().qpath_res(expr_struct_path, expr.hir_id) else {
+ let Res::Def(expr_struct_def_kind, expr_struct_def_id) =
+ self.typeck_results.borrow().qpath_res(expr_struct_path, expr.hir_id)
+ else {
return Err(expr);
};
@@ -621,16 +640,14 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> {
// We need to know which of the generic parameters mentions our target param.
// We expect that at least one of them does, since it is expected to be mentioned.
- let Some((drill_generic_index, generic_argument_type)) =
- is_iterator_singleton(
- in_ty_adt_generic_args.iter().enumerate().filter(
- |(_index, in_ty_generic)| {
- find_param_in_ty(*in_ty_generic, param)
- },
- ),
- ) else {
- return Err(expr);
- };
+ let Some((drill_generic_index, generic_argument_type)) = is_iterator_singleton(
+ in_ty_adt_generic_args
+ .iter()
+ .enumerate()
+ .filter(|(_index, in_ty_generic)| find_param_in_ty(*in_ty_generic, param)),
+ ) else {
+ return Err(expr);
+ };
let struct_generic_parameters: &ty::Generics = self.tcx.generics_of(in_ty_adt.did());
if drill_generic_index >= struct_generic_parameters.params.len() {
@@ -703,7 +720,9 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> {
};
// This is (possibly) a constructor call, like `Some(...)` or `MyStruct(a, b, c)`.
- let Res::Def(expr_struct_def_kind, expr_ctor_def_id) = self.typeck_results.borrow().qpath_res(expr_callee_path, expr_callee.hir_id) else {
+ let Res::Def(expr_struct_def_kind, expr_ctor_def_id) =
+ self.typeck_results.borrow().qpath_res(expr_callee_path, expr_callee.hir_id)
+ else {
return Err(expr);
};
@@ -744,16 +763,14 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> {
// We need to know which of the generic parameters mentions our target param.
// We expect that at least one of them does, since it is expected to be mentioned.
- let Some((drill_generic_index, generic_argument_type)) =
- is_iterator_singleton(
- in_ty_adt_generic_args.iter().enumerate().filter(
- |(_index, in_ty_generic)| {
- find_param_in_ty(*in_ty_generic, param)
- },
- ),
- ) else {
- return Err(expr);
- };
+ let Some((drill_generic_index, generic_argument_type)) = is_iterator_singleton(
+ in_ty_adt_generic_args
+ .iter()
+ .enumerate()
+ .filter(|(_index, in_ty_generic)| find_param_in_ty(*in_ty_generic, param)),
+ ) else {
+ return Err(expr);
+ };
let struct_generic_parameters: &ty::Generics = self.tcx.generics_of(in_ty_adt.did());
if drill_generic_index >= struct_generic_parameters.params.len() {
@@ -794,7 +811,7 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> {
.iter()
.map(|field| field.ty(self.tcx, *in_ty_adt_generic_args))
.enumerate()
- .filter(|(_index, field_type)| find_param_in_ty((*field_type).into(), param))
+ .filter(|(_index, field_type)| find_param_in_ty((*field_type).into(), param)),
) else {
return Err(expr);
};
@@ -846,7 +863,7 @@ fn find_param_in_ty<'tcx>(
// This logic may seem a bit strange, but typically when
// we have a projection type in a function signature, the
// argument that's being passed into that signature is
- // not actually constraining that projection's substs in
+ // not actually constraining that projection's args in
// a meaningful way. So we skip it, and see improvements
// in some UI tests.
walk.skip_current_subtree();
diff --git a/compiler/rustc_hir_typeck/src/fn_ctxt/checks.rs b/compiler/rustc_hir_typeck/src/fn_ctxt/checks.rs
index 41f5fafe7..4def78673 100644
--- a/compiler/rustc_hir_typeck/src/fn_ctxt/checks.rs
+++ b/compiler/rustc_hir_typeck/src/fn_ctxt/checks.rs
@@ -45,12 +45,7 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> {
debug!("FnCtxt::check_casts: {} deferred checks", deferred_cast_checks.len());
for cast in deferred_cast_checks.drain(..) {
- let prev_env = self.param_env;
- self.param_env = self.param_env.with_constness(cast.constness);
-
cast.check(self);
-
- self.param_env = prev_env;
}
*self.deferred_cast_checks.borrow_mut() = deferred_cast_checks;
@@ -93,7 +88,7 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> {
expected: Expectation<'tcx>,
) -> Ty<'tcx> {
let has_error = match method {
- Ok(method) => method.substs.references_error() || method.sig.references_error(),
+ Ok(method) => method.args.references_error() || method.sig.references_error(),
Err(_) => true,
};
if has_error {
@@ -265,9 +260,8 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> {
// fulfillment error to be more accurate.
let coerced_ty = self.resolve_vars_with_obligations(coerced_ty);
- let coerce_error = self
- .try_coerce(provided_arg, checked_ty, coerced_ty, AllowTwoPhase::Yes, None)
- .err();
+ let coerce_error =
+ self.coerce(provided_arg, checked_ty, coerced_ty, AllowTwoPhase::Yes, None).err();
if coerce_error.is_some() {
return Compatibility::Incompatible(coerce_error);
@@ -524,7 +518,7 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> {
// suggestions and labels are (more) correct when an arg is a
// macro invocation.
let normalize_span = |span: Span| -> Span {
- let normalized_span = span.find_ancestor_inside(error_span).unwrap_or(span);
+ let normalized_span = span.find_ancestor_inside_same_ctxt(error_span).unwrap_or(span);
// Sometimes macros mess up the spans, so do not normalize the
// arg span to equal the error span, because that's less useful
// than pointing out the arg expr in the wrong context.
@@ -689,7 +683,7 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> {
);
err.span_label(
full_call_span,
- format!("arguments to this {} are incorrect", call_name),
+ format!("arguments to this {call_name} are incorrect"),
);
} else {
err = tcx.sess.struct_span_err_with_code(
@@ -753,11 +747,11 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> {
}
errors.retain(|error| {
- let Error::Invalid(
- provided_idx,
- expected_idx,
- Compatibility::Incompatible(Some(e)),
- ) = error else { return true };
+ let Error::Invalid(provided_idx, expected_idx, Compatibility::Incompatible(Some(e))) =
+ error
+ else {
+ return true;
+ };
let (provided_ty, provided_span) = provided_arg_tys[*provided_idx];
let trace =
mk_trace(provided_span, formal_and_expected_inputs[*expected_idx], provided_ty);
@@ -796,10 +790,7 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> {
None,
None,
);
- err.span_label(
- full_call_span,
- format!("arguments to this {} are incorrect", call_name),
- );
+ err.span_label(full_call_span, format!("arguments to this {call_name} are incorrect"));
if let hir::ExprKind::MethodCall(_, rcvr, _, _) = call_expr.kind
&& provided_idx.as_usize() == expected_idx.as_usize()
@@ -874,7 +865,7 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> {
if ty.is_unit() {
"()".to_string()
} else if ty.is_suggestable(tcx, false) {
- format!("/* {} */", ty)
+ format!("/* {ty} */")
} else if let Some(fn_def_id) = fn_def_id
&& self.tcx.def_kind(fn_def_id).is_fn_like()
&& let self_implicit =
@@ -931,14 +922,15 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> {
let (provided_ty, provided_span) = provided_arg_tys[arg_idx];
let provided_ty_name = if !has_error_or_infer([provided_ty]) {
// FIXME: not suggestable, use something else
- format!(" of type `{}`", provided_ty)
+ format!(" of type `{provided_ty}`")
} else {
"".to_string()
};
- labels
- .push((provided_span, format!("unexpected argument{}", provided_ty_name)));
+ labels.push((provided_span, format!("unexpected argument{provided_ty_name}")));
let mut span = provided_span;
- if span.can_be_used_for_suggestions() {
+ if span.can_be_used_for_suggestions()
+ && error_span.can_be_used_for_suggestions()
+ {
if arg_idx.index() > 0
&& let Some((_, prev)) = provided_arg_tys
.get(ProvidedIdx::from_usize(arg_idx.index() - 1)
@@ -1009,11 +1001,11 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> {
args_span
};
let rendered = if !has_error_or_infer([input_ty]) {
- format!(" of type `{}`", input_ty)
+ format!(" of type `{input_ty}`")
} else {
"".to_string()
};
- labels.push((span, format!("an argument{} is missing", rendered)));
+ labels.push((span, format!("an argument{rendered} is missing")));
suggestion_text = match suggestion_text {
SuggestionText::None => SuggestionText::Provide(false),
SuggestionText::Provide(_) => SuggestionText::Provide(true),
@@ -1034,13 +1026,12 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> {
let rendered =
if !has_error_or_infer([first_expected_ty, second_expected_ty]) {
format!(
- " of type `{}` and `{}`",
- first_expected_ty, second_expected_ty
+ " of type `{first_expected_ty}` and `{second_expected_ty}`"
)
} else {
"".to_string()
};
- labels.push((span, format!("two arguments{} are missing", rendered)));
+ labels.push((span, format!("two arguments{rendered} are missing")));
suggestion_text = match suggestion_text {
SuggestionText::None | SuggestionText::Provide(_) => {
SuggestionText::Provide(true)
@@ -1066,13 +1057,12 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> {
third_expected_ty,
]) {
format!(
- " of type `{}`, `{}`, and `{}`",
- first_expected_ty, second_expected_ty, third_expected_ty
+ " of type `{first_expected_ty}`, `{second_expected_ty}`, and `{third_expected_ty}`"
)
} else {
"".to_string()
};
- labels.push((span, format!("three arguments{} are missing", rendered)));
+ labels.push((span, format!("three arguments{rendered} are missing")));
suggestion_text = match suggestion_text {
SuggestionText::None | SuggestionText::Provide(_) => {
SuggestionText::Provide(true)
@@ -1113,25 +1103,25 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> {
let (first_provided_ty, first_span) = provided_arg_tys[first_provided_idx];
let (_, first_expected_ty) = formal_and_expected_inputs[first_expected_idx];
let first_provided_ty_name = if !has_error_or_infer([first_provided_ty]) {
- format!(", found `{}`", first_provided_ty)
+ format!(", found `{first_provided_ty}`")
} else {
String::new()
};
labels.push((
first_span,
- format!("expected `{}`{}", first_expected_ty, first_provided_ty_name),
+ format!("expected `{first_expected_ty}`{first_provided_ty_name}"),
));
let (second_provided_ty, second_span) = provided_arg_tys[second_provided_idx];
let (_, second_expected_ty) = formal_and_expected_inputs[second_expected_idx];
let second_provided_ty_name = if !has_error_or_infer([second_provided_ty]) {
- format!(", found `{}`", second_provided_ty)
+ format!(", found `{second_provided_ty}`")
} else {
String::new()
};
labels.push((
second_span,
- format!("expected `{}`{}", second_expected_ty, second_provided_ty_name),
+ format!("expected `{second_expected_ty}`{second_provided_ty_name}"),
));
suggestion_text = match suggestion_text {
@@ -1144,13 +1134,13 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> {
let (_, expected_ty) = formal_and_expected_inputs[dst_arg];
let (provided_ty, provided_span) = provided_arg_tys[dest_input];
let provided_ty_name = if !has_error_or_infer([provided_ty]) {
- format!(", found `{}`", provided_ty)
+ format!(", found `{provided_ty}`")
} else {
String::new()
};
labels.push((
provided_span,
- format!("expected `{}`{}", expected_ty, provided_ty_name),
+ format!("expected `{expected_ty}`{provided_ty_name}"),
));
}
@@ -1231,22 +1221,23 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> {
};
if let Some(suggestion_text) = suggestion_text {
let source_map = self.sess().source_map();
- let (mut suggestion, suggestion_span) =
- if let Some(call_span) = full_call_span.find_ancestor_inside(error_span) {
- ("(".to_string(), call_span.shrink_to_hi().to(error_span.shrink_to_hi()))
- } else {
- (
- format!(
- "{}(",
- source_map.span_to_snippet(full_call_span).unwrap_or_else(|_| {
- fn_def_id.map_or("".to_string(), |fn_def_id| {
- tcx.item_name(fn_def_id).to_string()
- })
+ let (mut suggestion, suggestion_span) = if let Some(call_span) =
+ full_call_span.find_ancestor_inside_same_ctxt(error_span)
+ {
+ ("(".to_string(), call_span.shrink_to_hi().to(error_span.shrink_to_hi()))
+ } else {
+ (
+ format!(
+ "{}(",
+ source_map.span_to_snippet(full_call_span).unwrap_or_else(|_| {
+ fn_def_id.map_or("".to_string(), |fn_def_id| {
+ tcx.item_name(fn_def_id).to_string()
})
- ),
- error_span,
- )
- };
+ })
+ ),
+ error_span,
+ )
+ };
let mut needs_comma = false;
for (expected_idx, provided_idx) in matched_inputs.iter_enumerated() {
if needs_comma {
@@ -1366,29 +1357,32 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> {
}
Res::Def(DefKind::Variant, _) => match ty.normalized.ty_adt_def() {
Some(adt) => {
- Some((adt.variant_of_res(def), adt.did(), Self::user_substs_for_adt(ty)))
+ Some((adt.variant_of_res(def), adt.did(), Self::user_args_for_adt(ty)))
}
_ => bug!("unexpected type: {:?}", ty.normalized),
},
- Res::Def(DefKind::Struct | DefKind::Union | DefKind::TyAlias | DefKind::AssocTy, _)
+ Res::Def(
+ DefKind::Struct | DefKind::Union | DefKind::TyAlias { .. } | DefKind::AssocTy,
+ _,
+ )
| Res::SelfTyParam { .. }
| Res::SelfTyAlias { .. } => match ty.normalized.ty_adt_def() {
Some(adt) if !adt.is_enum() => {
- Some((adt.non_enum_variant(), adt.did(), Self::user_substs_for_adt(ty)))
+ Some((adt.non_enum_variant(), adt.did(), Self::user_args_for_adt(ty)))
}
_ => None,
},
_ => bug!("unexpected definition: {:?}", def),
};
- if let Some((variant, did, ty::UserSubsts { substs, user_self_ty })) = variant {
- debug!("check_struct_path: did={:?} substs={:?}", did, substs);
+ if let Some((variant, did, ty::UserArgs { args, user_self_ty })) = variant {
+ debug!("check_struct_path: did={:?} args={:?}", did, args);
// Register type annotation.
- self.write_user_type_annotation_from_substs(hir_id, did, substs, user_self_ty);
+ self.write_user_type_annotation_from_args(hir_id, did, args, user_self_ty);
// Check bounds on type arguments used in the path.
- self.add_required_obligations_for_hir(path_span, did, substs, hir_id);
+ self.add_required_obligations_for_hir(path_span, did, args, hir_id);
Ok((variant, ty.normalized))
} else {
@@ -1474,11 +1468,11 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> {
};
// Type check the pattern. Override if necessary to avoid knock-on errors.
- self.check_pat_top(&decl.pat, decl_ty, ty_span, origin_expr);
+ self.check_pat_top(&decl.pat, decl_ty, ty_span, origin_expr, Some(decl.origin));
let pat_ty = self.node_ty(decl.pat.hir_id);
self.overwrite_local_ty_if_err(decl.hir_id, decl.pat, pat_ty);
- if let Some(blk) = decl.els {
+ if let Some(blk) = decl.origin.try_get_else() {
let previous_diverges = self.diverges.get();
let else_ty = self.check_block_with_expected(blk, NoExpectation);
let cause = self.cause(blk.span, ObligationCauseCode::LetElse);
@@ -1496,7 +1490,7 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> {
self.check_decl(local.into());
}
- pub fn check_stmt(&self, stmt: &'tcx hir::Stmt<'tcx>, is_last: bool) {
+ pub fn check_stmt(&self, stmt: &'tcx hir::Stmt<'tcx>) {
// Don't do all the complex logic below for `DeclItem`.
match stmt.kind {
hir::StmtKind::Item(..) => return,
@@ -1523,14 +1517,7 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> {
});
}
hir::StmtKind::Semi(ref expr) => {
- // All of this is equivalent to calling `check_expr`, but it is inlined out here
- // in order to capture the fact that this `match` is the last statement in its
- // function. This is done for better suggestions to remove the `;`.
- let expectation = match expr.kind {
- hir::ExprKind::Match(..) if is_last => IsLast(stmt.span),
- _ => NoExpectation,
- };
- self.check_expr_with_expectation(expr, expectation);
+ self.check_expr(expr);
}
}
@@ -1581,8 +1568,8 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> {
let ctxt = BreakableCtxt { coerce: Some(coerce), may_break: false };
let (ctxt, ()) = self.with_breakable_ctxt(blk.hir_id, ctxt, || {
- for (pos, s) in blk.stmts.iter().enumerate() {
- self.check_stmt(s, blk.stmts.len() - 1 == pos);
+ for s in blk.stmts {
+ self.check_stmt(s);
}
// check the tail expression **without** holding the
@@ -1595,7 +1582,10 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> {
let coerce = ctxt.coerce.as_mut().unwrap();
if let Some((tail_expr, tail_expr_ty)) = tail_expr_ty {
let span = self.get_expr_coercion_span(tail_expr);
- let cause = self.cause(span, ObligationCauseCode::BlockTailExpression(blk.hir_id));
+ let cause = self.cause(
+ span,
+ ObligationCauseCode::BlockTailExpression(blk.hir_id, hir::MatchSource::Normal),
+ );
let ty_for_diagnostic = coerce.merged_ty();
// We use coerce_inner here because we want to augment the error
// suggesting to wrap the block in square brackets if it might've
@@ -1605,9 +1595,9 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> {
&cause,
Some(tail_expr),
tail_expr_ty,
- Some(&mut |diag: &mut Diagnostic| {
+ |diag| {
self.suggest_block_to_brackets(diag, blk, tail_expr_ty, ty_for_diagnostic);
- }),
+ },
false,
);
} else {
@@ -1644,7 +1634,7 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> {
coerce.coerce_forced_unit(
self,
&self.misc(sp),
- &mut |err| {
+ |err| {
if let Some(expected_ty) = expected.only_has_type(self) {
if blk.stmts.is_empty() && blk.expr.is_none() {
self.suggest_boxing_when_appropriate(
@@ -1867,19 +1857,11 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> {
if self.adjust_fulfillment_error_for_expr_obligation(error)
|| before_span != error.obligation.cause.span
{
- // Store both the predicate and the predicate *without constness*
- // since sometimes we instantiate and check both of these in a
- // method call, for example.
remap_cause.insert((
before_span,
error.obligation.predicate,
error.obligation.cause.clone(),
));
- remap_cause.insert((
- before_span,
- error.obligation.predicate.without_const(self.tcx),
- error.obligation.cause.clone(),
- ));
} else {
// If it failed to be adjusted once around, it may be adjusted
// via the "remap cause" mapping the second time...
@@ -2031,7 +2013,7 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> {
} else {
("closure", self.tcx.def_span(def_id))
};
- err.span_note(span, format!("{} defined here", kind));
+ err.span_note(span, format!("{kind} defined here"));
} else {
err.span_note(
self.tcx.def_span(def_id),
diff --git a/compiler/rustc_hir_typeck/src/fn_ctxt/mod.rs b/compiler/rustc_hir_typeck/src/fn_ctxt/mod.rs
index 20b34df99..6a82b0021 100644
--- a/compiler/rustc_hir_typeck/src/fn_ctxt/mod.rs
+++ b/compiler/rustc_hir_typeck/src/fn_ctxt/mod.rs
@@ -288,21 +288,23 @@ impl<'a, 'tcx> AstConv<'tcx> for FnCtxt<'a, 'tcx> {
poly_trait_ref,
);
- let item_substs = self.astconv().create_substs_for_associated_item(
+ let item_args = self.astconv().create_args_for_associated_item(
span,
item_def_id,
item_segment,
- trait_ref.substs,
+ trait_ref.args,
);
- Ty::new_projection(self.tcx(), item_def_id, item_substs)
+ Ty::new_projection(self.tcx(), item_def_id, item_args)
}
fn probe_adt(&self, span: Span, ty: Ty<'tcx>) -> Option<ty::AdtDef<'tcx>> {
match ty.kind() {
ty::Adt(adt_def, _) => Some(*adt_def),
// FIXME(#104767): Should we handle bound regions here?
- ty::Alias(ty::Projection | ty::Inherent, _) if !ty.has_escaping_bound_vars() => {
+ ty::Alias(ty::Projection | ty::Inherent | ty::Weak, _)
+ if !ty.has_escaping_bound_vars() =>
+ {
self.normalize(span, ty).ty_adt_def()
}
_ => None,
diff --git a/compiler/rustc_hir_typeck/src/fn_ctxt/suggestions.rs b/compiler/rustc_hir_typeck/src/fn_ctxt/suggestions.rs
index 79a7c0161..d2a53ee8b 100644
--- a/compiler/rustc_hir_typeck/src/fn_ctxt/suggestions.rs
+++ b/compiler/rustc_hir_typeck/src/fn_ctxt/suggestions.rs
@@ -1,8 +1,6 @@
use super::FnCtxt;
-use crate::errors::{
- AddReturnTypeSuggestion, ExpectedReturnTypeLabel, SuggestBoxing, SuggestConvertViaMethod,
-};
+use crate::errors;
use crate::fluent_generated as fluent;
use crate::method::probe::{IsSuggestion, Mode, ProbeScope};
use rustc_ast::util::parser::{ExprPrecedence, PREC_POSTFIX};
@@ -97,8 +95,9 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> {
found: Ty<'tcx>,
can_satisfy: impl FnOnce(Ty<'tcx>) -> bool,
) -> bool {
- let Some((def_id_or_name, output, inputs)) = self.extract_callable_info(found)
- else { return false; };
+ let Some((def_id_or_name, output, inputs)) = self.extract_callable_info(found) else {
+ return false;
+ };
if can_satisfy(output) {
let (sugg_call, mut applicability) = match inputs.len() {
0 => ("".to_string(), Applicability::MachineApplicable),
@@ -180,10 +179,12 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> {
rhs_ty: Ty<'tcx>,
can_satisfy: impl FnOnce(Ty<'tcx>, Ty<'tcx>) -> bool,
) -> bool {
- let Some((_, lhs_output_ty, lhs_inputs)) = self.extract_callable_info(lhs_ty)
- else { return false; };
- let Some((_, rhs_output_ty, rhs_inputs)) = self.extract_callable_info(rhs_ty)
- else { return false; };
+ let Some((_, lhs_output_ty, lhs_inputs)) = self.extract_callable_info(lhs_ty) else {
+ return false;
+ };
+ let Some((_, rhs_output_ty, rhs_inputs)) = self.extract_callable_info(rhs_ty) else {
+ return false;
+ };
if can_satisfy(lhs_output_ty, rhs_output_ty) {
let mut sugg = vec![];
@@ -392,9 +393,9 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> {
vec![(expr.span.shrink_to_hi(), format!(".{}()", conversion_method.name))]
};
let struct_pat_shorthand_field =
- self.maybe_get_struct_pattern_shorthand_field(expr);
+ self.tcx.hir().maybe_get_struct_pattern_shorthand_field(expr);
if let Some(name) = struct_pat_shorthand_field {
- sugg.insert(0, (expr.span.shrink_to_lo(), format!("{}: ", name)));
+ sugg.insert(0, (expr.span.shrink_to_lo(), format!("{name}: ")));
}
Some(sugg)
})
@@ -431,7 +432,7 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> {
// FIXME: This could/should be extended to suggest `as_mut` and `as_deref_mut`,
// but those checks need to be a bit more delicate and the benefit is diminishing.
if self.can_eq(self.param_env, found_ty_inner, peeled) && error_tys_equate_as_ref {
- err.subdiagnostic(SuggestConvertViaMethod {
+ err.subdiagnostic(errors::SuggestConvertViaMethod {
span: expr.span.shrink_to_hi(),
sugg: ".as_ref()",
expected,
@@ -444,7 +445,7 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> {
&& self.can_eq(self.param_env, deref_ty, peeled)
&& error_tys_equate_as_ref
{
- err.subdiagnostic(SuggestConvertViaMethod {
+ err.subdiagnostic(errors::SuggestConvertViaMethod {
span: expr.span.shrink_to_hi(),
sugg: ".as_deref()",
expected,
@@ -478,23 +479,23 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> {
found_ty: Ty<'tcx>,
expected_ty: Ty<'tcx>,
) -> Option<(Ty<'tcx>, Ty<'tcx>, Option<(Ty<'tcx>, Ty<'tcx>)>)> {
- let ty::Adt(found_adt, found_substs) = found_ty.peel_refs().kind() else {
+ let ty::Adt(found_adt, found_args) = found_ty.peel_refs().kind() else {
return None;
};
- let ty::Adt(expected_adt, expected_substs) = expected_ty.kind() else {
+ let ty::Adt(expected_adt, expected_args) = expected_ty.kind() else {
return None;
};
if self.tcx.is_diagnostic_item(sym::Option, found_adt.did())
&& self.tcx.is_diagnostic_item(sym::Option, expected_adt.did())
{
- Some((found_substs.type_at(0), expected_substs.type_at(0), None))
+ Some((found_args.type_at(0), expected_args.type_at(0), None))
} else if self.tcx.is_diagnostic_item(sym::Result, found_adt.did())
&& self.tcx.is_diagnostic_item(sym::Result, expected_adt.did())
{
Some((
- found_substs.type_at(0),
- expected_substs.type_at(0),
- Some((found_substs.type_at(1), expected_substs.type_at(1))),
+ found_args.type_at(0),
+ expected_args.type_at(0),
+ Some((found_args.type_at(1), expected_args.type_at(1))),
))
} else {
None
@@ -518,7 +519,7 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> {
if self.can_coerce(Ty::new_box(self.tcx, found), expected) {
let suggest_boxing = match found.kind() {
ty::Tuple(tuple) if tuple.is_empty() => {
- SuggestBoxing::Unit { start: span.shrink_to_lo(), end: span }
+ errors::SuggestBoxing::Unit { start: span.shrink_to_lo(), end: span }
}
ty::Generator(def_id, ..)
if matches!(
@@ -526,9 +527,12 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> {
Some(GeneratorKind::Async(AsyncGeneratorKind::Closure))
) =>
{
- SuggestBoxing::AsyncBody
+ errors::SuggestBoxing::AsyncBody
}
- _ => SuggestBoxing::Other { start: span.shrink_to_lo(), end: span.shrink_to_hi() },
+ _ => errors::SuggestBoxing::Other {
+ start: span.shrink_to_lo(),
+ end: span.shrink_to_hi(),
+ },
};
err.subdiagnostic(suggest_boxing);
@@ -555,7 +559,7 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> {
.take(4)
.map(|(var_hir_id, upvar)| {
let var_name = self.tcx.hir().name(*var_hir_id).to_string();
- let msg = format!("`{}` captured here", var_name);
+ let msg = format!("`{var_name}` captured here");
(upvar.span, msg)
})
.collect::<Vec<_>>();
@@ -635,7 +639,9 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> {
// is and we were expecting a Box, ergo Pin<Box<expected>>, we
// can suggest Box::pin.
let parent = self.tcx.hir().parent_id(expr.hir_id);
- let Some(Node::Expr(Expr { kind: ExprKind::Call(fn_name, _), .. })) = self.tcx.hir().find(parent) else {
+ let Some(Node::Expr(Expr { kind: ExprKind::Call(fn_name, _), .. })) =
+ self.tcx.hir().find(parent)
+ else {
return false;
};
match fn_name.kind {
@@ -751,23 +757,23 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> {
match &fn_decl.output {
&hir::FnRetTy::DefaultReturn(span) if expected.is_unit() && !can_suggest => {
// `fn main()` must return `()`, do not suggest changing return type
- err.subdiagnostic(ExpectedReturnTypeLabel::Unit { span });
+ err.subdiagnostic(errors::ExpectedReturnTypeLabel::Unit { span });
return true;
}
&hir::FnRetTy::DefaultReturn(span) if expected.is_unit() => {
if let Some(found) = found.make_suggestable(self.tcx, false) {
- err.subdiagnostic(AddReturnTypeSuggestion::Add { span, found: found.to_string() });
+ err.subdiagnostic(errors::AddReturnTypeSuggestion::Add { span, found: found.to_string() });
return true;
- } else if let ty::Closure(_, substs) = found.kind()
+ } else if let ty::Closure(_, args) = found.kind()
// FIXME(compiler-errors): Get better at printing binders...
- && let closure = substs.as_closure()
+ && let closure = args.as_closure()
&& closure.sig().is_suggestable(self.tcx, false)
{
- err.subdiagnostic(AddReturnTypeSuggestion::Add { span, found: closure.print_as_impl_trait().to_string() });
+ err.subdiagnostic(errors::AddReturnTypeSuggestion::Add { span, found: closure.print_as_impl_trait().to_string() });
return true;
} else {
// FIXME: if `found` could be `impl Iterator` we should suggest that.
- err.subdiagnostic(AddReturnTypeSuggestion::MissingHere { span });
+ err.subdiagnostic(errors::AddReturnTypeSuggestion::MissingHere { span });
return true
}
}
@@ -789,10 +795,10 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> {
debug!(?found);
if found.is_suggestable(self.tcx, false) {
if term.span.is_empty() {
- err.subdiagnostic(AddReturnTypeSuggestion::Add { span, found: found.to_string() });
+ err.subdiagnostic(errors::AddReturnTypeSuggestion::Add { span, found: found.to_string() });
return true;
} else {
- err.subdiagnostic(ExpectedReturnTypeLabel::Other { span, expected });
+ err.subdiagnostic(errors::ExpectedReturnTypeLabel::Other { span, expected });
}
}
}
@@ -808,7 +814,7 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> {
let ty = self.normalize(span, ty);
let ty = self.tcx.erase_late_bound_regions(ty);
if self.can_coerce(expected, ty) {
- err.subdiagnostic(ExpectedReturnTypeLabel::Other { span, expected });
+ err.subdiagnostic(errors::ExpectedReturnTypeLabel::Other { span, expected });
self.try_suggest_return_impl_trait(err, expected, ty, fn_id);
return true;
}
@@ -850,12 +856,18 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> {
let Some(hir::Node::Item(hir::Item {
kind:
hir::ItemKind::Fn(
- hir::FnSig { decl: hir::FnDecl { inputs: fn_parameters, output: fn_return, .. }, .. },
+ hir::FnSig {
+ decl: hir::FnDecl { inputs: fn_parameters, output: fn_return, .. },
+ ..
+ },
hir::Generics { params, predicates, .. },
_body_id,
),
..
- })) = fn_node else { return };
+ })) = fn_node
+ else {
+ return;
+ };
if params.get(expected_ty_as_param.index as usize).is_none() {
return;
@@ -920,7 +932,7 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> {
err.span_suggestion(
fn_return.span(),
"consider using an impl return type",
- format!("impl {}", all_bounds_str),
+ format!("impl {all_bounds_str}"),
Applicability::MaybeIncorrect,
);
}
@@ -938,7 +950,7 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> {
if !expected.is_unit() {
return;
}
- let found = self.resolve_vars_with_obligations(found);
+ let found = self.resolve_vars_if_possible(found);
let in_loop = self.is_loop(id)
|| self
@@ -982,14 +994,19 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> {
};
let ty = self.normalize(expr.span, ty);
if self.can_coerce(found, ty) {
- err.multipart_suggestion(
- "you might have meant to return this value",
- vec![
- (expr.span.shrink_to_lo(), "return ".to_string()),
- (expr.span.shrink_to_hi(), ";".to_string()),
- ],
- Applicability::MaybeIncorrect,
- );
+ if let Some(node) = self.tcx.hir().find(fn_id)
+ && let Some(owner_node) = node.as_owner()
+ && let Some(span) = expr.span.find_ancestor_inside(owner_node.span())
+ {
+ err.multipart_suggestion(
+ "you might have meant to return this value",
+ vec![
+ (span.shrink_to_lo(), "return ".to_string()),
+ (span.shrink_to_hi(), ";".to_string()),
+ ],
+ Applicability::MaybeIncorrect,
+ );
+ }
}
}
}
@@ -1058,8 +1075,8 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> {
)
.must_apply_modulo_regions()
{
- let suggestion = match self.maybe_get_struct_pattern_shorthand_field(expr) {
- Some(ident) => format!(": {}.clone()", ident),
+ let suggestion = match self.tcx.hir().maybe_get_struct_pattern_shorthand_field(expr) {
+ Some(ident) => format!(": {ident}.clone()"),
None => ".clone()".to_string()
};
@@ -1074,68 +1091,55 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> {
false
}
- pub(crate) fn suggest_copied_or_cloned(
+ pub(crate) fn suggest_copied_cloned_or_as_ref(
&self,
diag: &mut Diagnostic,
expr: &hir::Expr<'_>,
expr_ty: Ty<'tcx>,
expected_ty: Ty<'tcx>,
) -> bool {
- let ty::Adt(adt_def, substs) = expr_ty.kind() else { return false; };
- let ty::Adt(expected_adt_def, expected_substs) = expected_ty.kind() else { return false; };
+ let ty::Adt(adt_def, args) = expr_ty.kind() else {
+ return false;
+ };
+ let ty::Adt(expected_adt_def, expected_args) = expected_ty.kind() else {
+ return false;
+ };
if adt_def != expected_adt_def {
return false;
}
- let mut suggest_copied_or_cloned = || {
- let expr_inner_ty = substs.type_at(0);
- let expected_inner_ty = expected_substs.type_at(0);
- if let &ty::Ref(_, ty, hir::Mutability::Not) = expr_inner_ty.kind()
- && self.can_eq(self.param_env, ty, expected_inner_ty)
- {
- let def_path = self.tcx.def_path_str(adt_def.did());
- if self.type_is_copy_modulo_regions(self.param_env, ty) {
- diag.span_suggestion_verbose(
- expr.span.shrink_to_hi(),
- format!(
- "use `{def_path}::copied` to copy the value inside the `{def_path}`"
- ),
- ".copied()",
- Applicability::MachineApplicable,
- );
- return true;
- } else if let Some(clone_did) = self.tcx.lang_items().clone_trait()
- && rustc_trait_selection::traits::type_known_to_meet_bound_modulo_regions(
- self,
- self.param_env,
- ty,
- clone_did,
- )
+ if Some(adt_def.did()) == self.tcx.get_diagnostic_item(sym::Result)
+ && self.can_eq(self.param_env, args.type_at(1), expected_args.type_at(1))
+ || Some(adt_def.did()) == self.tcx.get_diagnostic_item(sym::Option)
+ {
+ let expr_inner_ty = args.type_at(0);
+ let expected_inner_ty = expected_args.type_at(0);
+ if let &ty::Ref(_, ty, _mutability) = expr_inner_ty.kind()
+ && self.can_eq(self.param_env, ty, expected_inner_ty)
{
- diag.span_suggestion_verbose(
- expr.span.shrink_to_hi(),
- format!(
- "use `{def_path}::cloned` to clone the value inside the `{def_path}`"
- ),
- ".cloned()",
- Applicability::MachineApplicable,
- );
+ let def_path = self.tcx.def_path_str(adt_def.did());
+ let span = expr.span.shrink_to_hi();
+ let subdiag = if self.type_is_copy_modulo_regions(self.param_env, ty) {
+ errors::OptionResultRefMismatch::Copied {
+ span, def_path
+ }
+ } else if let Some(clone_did) = self.tcx.lang_items().clone_trait()
+ && rustc_trait_selection::traits::type_known_to_meet_bound_modulo_regions(
+ self,
+ self.param_env,
+ ty,
+ clone_did,
+ )
+ {
+ errors::OptionResultRefMismatch::Cloned {
+ span, def_path
+ }
+ } else {
+ return false;
+ };
+ diag.subdiagnostic(subdiag);
return true;
}
- }
- false
- };
-
- if let Some(result_did) = self.tcx.get_diagnostic_item(sym::Result)
- && adt_def.did() == result_did
- // Check that the error types are equal
- && self.can_eq(self.param_env, substs.type_at(1), expected_substs.type_at(1))
- {
- return suggest_copied_or_cloned();
- } else if let Some(option_did) = self.tcx.get_diagnostic_item(sym::Option)
- && adt_def.did() == option_did
- {
- return suggest_copied_or_cloned();
}
false
@@ -1177,10 +1181,16 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> {
),
))
{
+ let mut span = expr.span;
+ while expr.span.eq_ctxt(span) && let Some(parent_callsite) = span.parent_callsite()
+ {
+ span = parent_callsite;
+ }
+
let sugg = if expr.precedence().order() >= PREC_POSTFIX {
- vec![(expr.span.shrink_to_hi(), ".into()".to_owned())]
+ vec![(span.shrink_to_hi(), ".into()".to_owned())]
} else {
- vec![(expr.span.shrink_to_lo(), "(".to_owned()), (expr.span.shrink_to_hi(), ").into()".to_owned())]
+ vec![(span.shrink_to_lo(), "(".to_owned()), (span.shrink_to_hi(), ").into()".to_owned())]
};
diag.multipart_suggestion(
format!("call `Into::into` on this expression to convert `{expr_ty}` into `{expected_ty}`"),
@@ -1205,7 +1215,9 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> {
return false;
}
- let ty::Adt(def, _) = expr_ty.peel_refs().kind() else { return false; };
+ let ty::Adt(def, _) = expr_ty.peel_refs().kind() else {
+ return false;
+ };
if !self.tcx.is_diagnostic_item(sym::Option, def.did()) {
return false;
}
@@ -1230,8 +1242,8 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> {
return false;
}
- let suggestion = match self.maybe_get_struct_pattern_shorthand_field(expr) {
- Some(ident) => format!(": {}.is_some()", ident),
+ let suggestion = match self.tcx.hir().maybe_get_struct_pattern_shorthand_field(expr) {
+ Some(ident) => format!(": {ident}.is_some()"),
None => ".is_some()".to_string(),
};
@@ -1327,7 +1339,9 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> {
node: rustc_ast::LitKind::Int(lit, rustc_ast::LitIntType::Unsuffixed),
span,
}) => {
- let Ok(snippet) = self.tcx.sess.source_map().span_to_snippet(*span) else { return false; };
+ let Ok(snippet) = self.tcx.sess.source_map().span_to_snippet(*span) else {
+ return false;
+ };
if !(snippet.starts_with("0x") || snippet.starts_with("0X")) {
return false;
}
@@ -1367,10 +1381,7 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> {
};
// Provided expression needs to be a literal `0`.
- let ExprKind::Lit(Spanned {
- node: rustc_ast::LitKind::Int(0, _),
- span,
- }) = expr.kind else {
+ let ExprKind::Lit(Spanned { node: rustc_ast::LitKind::Int(0, _), span }) = expr.kind else {
return false;
};
@@ -1401,7 +1412,9 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> {
expr: &hir::Expr<'_>,
expected_ty: Ty<'tcx>,
) -> bool {
- let Some((DefKind::AssocFn, old_def_id)) = self.typeck_results.borrow().type_dependent_def(expr.hir_id) else {
+ let Some((DefKind::AssocFn, old_def_id)) =
+ self.typeck_results.borrow().type_dependent_def(expr.hir_id)
+ else {
return false;
};
let old_item_name = self.tcx.item_name(old_def_id);
@@ -1457,7 +1470,7 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> {
// Same item
return false;
}
- let item_ty = self.tcx.type_of(item.def_id).subst_identity();
+ let item_ty = self.tcx.type_of(item.def_id).instantiate_identity();
// FIXME(compiler-errors): This check is *so* rudimentary
if item_ty.has_param() {
return false;
@@ -1494,8 +1507,18 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> {
found_ty: Ty<'tcx>,
expr: &hir::Expr<'_>,
) {
- let hir::ExprKind::MethodCall(segment, callee_expr, &[], _) = expr.kind else { return; };
- let Some(clone_trait_did) = self.tcx.lang_items().clone_trait() else { return; };
+ // When `expr` is `x` in something like `let x = foo.clone(); x`, need to recurse up to get
+ // `foo` and `clone`.
+ let expr = self.note_type_is_not_clone_inner_expr(expr);
+
+ // If we've recursed to an `expr` of `foo.clone()`, get `foo` and `clone`.
+ let hir::ExprKind::MethodCall(segment, callee_expr, &[], _) = expr.kind else {
+ return;
+ };
+
+ let Some(clone_trait_did) = self.tcx.lang_items().clone_trait() else {
+ return;
+ };
let ty::Ref(_, pointee_ty, _) = found_ty.kind() else { return };
let results = self.typeck_results.borrow();
// First, look for a `Clone::clone` call
@@ -1545,6 +1568,83 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> {
}
}
+ /// Given a type mismatch error caused by `&T` being cloned instead of `T`, and
+ /// the `expr` as the source of this type mismatch, try to find the method call
+ /// as the source of this error and return that instead. Otherwise, return the
+ /// original expression.
+ fn note_type_is_not_clone_inner_expr<'b>(
+ &'b self,
+ expr: &'b hir::Expr<'b>,
+ ) -> &'b hir::Expr<'b> {
+ match expr.peel_blocks().kind {
+ hir::ExprKind::Path(hir::QPath::Resolved(
+ None,
+ hir::Path { segments: [_], res: crate::Res::Local(binding), .. },
+ )) => {
+ let Some(hir::Node::Pat(hir::Pat { hir_id, .. })) = self.tcx.hir().find(*binding)
+ else {
+ return expr;
+ };
+ let Some(parent) = self.tcx.hir().find(self.tcx.hir().parent_id(*hir_id)) else {
+ return expr;
+ };
+
+ match parent {
+ // foo.clone()
+ hir::Node::Local(hir::Local { init: Some(init), .. }) => {
+ self.note_type_is_not_clone_inner_expr(init)
+ }
+ // When `expr` is more complex like a tuple
+ hir::Node::Pat(hir::Pat {
+ hir_id: pat_hir_id,
+ kind: hir::PatKind::Tuple(pats, ..),
+ ..
+ }) => {
+ let Some(hir::Node::Local(hir::Local { init: Some(init), .. })) =
+ self.tcx.hir().find(self.tcx.hir().parent_id(*pat_hir_id)) else {
+ return expr;
+ };
+
+ match init.peel_blocks().kind {
+ ExprKind::Tup(init_tup) => {
+ if let Some(init) = pats
+ .iter()
+ .enumerate()
+ .filter(|x| x.1.hir_id == *hir_id)
+ .find_map(|(i, _)| init_tup.get(i))
+ {
+ self.note_type_is_not_clone_inner_expr(init)
+ } else {
+ expr
+ }
+ }
+ _ => expr,
+ }
+ }
+ _ => expr,
+ }
+ }
+ // If we're calling into a closure that may not be typed recurse into that call. no need
+ // to worry if it's a call to a typed function or closure as this would ne handled
+ // previously.
+ hir::ExprKind::Call(Expr { kind: call_expr_kind, .. }, _) => {
+ if let hir::ExprKind::Path(hir::QPath::Resolved(None, call_expr_path)) = call_expr_kind
+ && let hir::Path { segments: [_], res: crate::Res::Local(binding), .. } = call_expr_path
+ && let Some(hir::Node::Pat(hir::Pat { hir_id, .. })) = self.tcx.hir().find(*binding)
+ && let Some(closure) = self.tcx.hir().find(self.tcx.hir().parent_id(*hir_id))
+ && let hir::Node::Local(hir::Local { init: Some(init), .. }) = closure
+ && let Expr { kind: hir::ExprKind::Closure(hir::Closure { body: body_id, .. }), ..} = init
+ {
+ let hir::Body { value: body_expr, .. } = self.tcx.hir().body(*body_id);
+ self.note_type_is_not_clone_inner_expr(body_expr)
+ } else {
+ expr
+ }
+ }
+ _ => expr,
+ }
+ }
+
/// A common error is to add an extra semicolon:
///
/// ```compile_fail,E0308
diff --git a/compiler/rustc_hir_typeck/src/gather_locals.rs b/compiler/rustc_hir_typeck/src/gather_locals.rs
index 4f45a24b2..ed4c63f17 100644
--- a/compiler/rustc_hir_typeck/src/gather_locals.rs
+++ b/compiler/rustc_hir_typeck/src/gather_locals.rs
@@ -9,6 +9,26 @@ use rustc_span::def_id::LocalDefId;
use rustc_span::Span;
use rustc_trait_selection::traits;
+/// Provides context for checking patterns in declarations. More specifically this
+/// allows us to infer array types if the pattern is irrefutable and allows us to infer
+/// the size of the array. See issue #76342.
+#[derive(Debug, Copy, Clone)]
+pub(super) enum DeclOrigin<'a> {
+ // from an `if let` expression
+ LetExpr,
+ // from `let x = ..`
+ LocalDecl { els: Option<&'a hir::Block<'a>> },
+}
+
+impl<'a> DeclOrigin<'a> {
+ pub(super) fn try_get_else(&self) -> Option<&'a hir::Block<'a>> {
+ match self {
+ Self::LocalDecl { els } => *els,
+ Self::LetExpr => None,
+ }
+ }
+}
+
/// A declaration is an abstraction of [hir::Local] and [hir::Let].
///
/// It must have a hir_id, as this is how we connect gather_locals to the check functions.
@@ -18,20 +38,20 @@ pub(super) struct Declaration<'a> {
pub ty: Option<&'a hir::Ty<'a>>,
pub span: Span,
pub init: Option<&'a hir::Expr<'a>>,
- pub els: Option<&'a hir::Block<'a>>,
+ pub origin: DeclOrigin<'a>,
}
impl<'a> From<&'a hir::Local<'a>> for Declaration<'a> {
fn from(local: &'a hir::Local<'a>) -> Self {
let hir::Local { hir_id, pat, ty, span, init, els, source: _ } = *local;
- Declaration { hir_id, pat, ty, span, init, els }
+ Declaration { hir_id, pat, ty, span, init, origin: DeclOrigin::LocalDecl { els } }
}
}
impl<'a> From<&'a hir::Let<'a>> for Declaration<'a> {
fn from(let_expr: &'a hir::Let<'a>) -> Self {
let hir::Let { hir_id, pat, ty, span, init } = *let_expr;
- Declaration { hir_id, pat, ty, span, init: Some(init), els: None }
+ Declaration { hir_id, pat, ty, span, init: Some(init), origin: DeclOrigin::LetExpr }
}
}
diff --git a/compiler/rustc_hir_typeck/src/generator_interior/drop_ranges/cfg_build.rs b/compiler/rustc_hir_typeck/src/generator_interior/drop_ranges/cfg_build.rs
index b84c49186..cfedcee99 100644
--- a/compiler/rustc_hir_typeck/src/generator_interior/drop_ranges/cfg_build.rs
+++ b/compiler/rustc_hir_typeck/src/generator_interior/drop_ranges/cfg_build.rs
@@ -6,7 +6,7 @@ use hir::{
intravisit::{self, Visitor},
Body, Expr, ExprKind, Guard, HirId, LoopIdError,
};
-use rustc_data_structures::fx::{FxHashMap, FxHashSet};
+use rustc_data_structures::unord::{UnordMap, UnordSet};
use rustc_hir as hir;
use rustc_index::IndexVec;
use rustc_infer::infer::InferCtxt;
@@ -28,7 +28,7 @@ pub(super) fn build_control_flow_graph<'tcx>(
consumed_borrowed_places: ConsumedAndBorrowedPlaces,
body: &'tcx Body<'tcx>,
num_exprs: usize,
-) -> (DropRangesBuilder, FxHashSet<HirId>) {
+) -> (DropRangesBuilder, UnordSet<HirId>) {
let mut drop_range_visitor = DropRangeVisitor::new(
infcx,
typeck_results,
@@ -443,9 +443,9 @@ impl<'a, 'tcx> Visitor<'tcx> for DropRangeVisitor<'a, 'tcx> {
// We add an edge to the hir_id of the expression/block we are breaking out of, and
// then in process_deferred_edges we will map this hir_id to its PostOrderId, which
// will refer to the end of the block due to the post order traversal.
- self.find_target_expression_from_destination(destination).map_or((), |target| {
+ if let Ok(target) = self.find_target_expression_from_destination(destination) {
self.drop_ranges.add_control_edge_hir_id(self.expr_index, target)
- });
+ }
if let Some(value) = value {
self.visit_expr(value);
@@ -528,7 +528,7 @@ impl DropRangesBuilder {
hir: Map<'_>,
num_exprs: usize,
) -> Self {
- let mut tracked_value_map = FxHashMap::<_, TrackedValueIndex>::default();
+ let mut tracked_value_map = UnordMap::<_, TrackedValueIndex>::default();
let mut next = <_>::from(0u32);
for value in tracked_values {
for_each_consumable(hir, value, |value| {
diff --git a/compiler/rustc_hir_typeck/src/generator_interior/drop_ranges/mod.rs b/compiler/rustc_hir_typeck/src/generator_interior/drop_ranges/mod.rs
index ecafbd668..e563bd40b 100644
--- a/compiler/rustc_hir_typeck/src/generator_interior/drop_ranges/mod.rs
+++ b/compiler/rustc_hir_typeck/src/generator_interior/drop_ranges/mod.rs
@@ -17,7 +17,7 @@ use self::record_consumed_borrow::find_consumed_and_borrowed;
use crate::FnCtxt;
use hir::def_id::DefId;
use hir::{Body, HirId, HirIdMap, Node};
-use rustc_data_structures::fx::{FxHashMap, FxHashSet};
+use rustc_data_structures::unord::{UnordMap, UnordSet};
use rustc_hir as hir;
use rustc_index::bit_set::BitSet;
use rustc_index::IndexVec;
@@ -63,7 +63,7 @@ pub fn compute_drop_ranges<'a, 'tcx>(
// If drop range tracking is not enabled, skip all the analysis and produce an
// empty set of DropRanges.
DropRanges {
- tracked_value_map: FxHashMap::default(),
+ tracked_value_map: UnordMap::default(),
nodes: IndexVec::new(),
borrowed_temporaries: None,
}
@@ -125,8 +125,8 @@ impl Debug for TrackedValue {
write!(f, "{}", tcx.hir().node_to_string(self.hir_id()))
} else {
match self {
- Self::Variable(hir_id) => write!(f, "Variable({:?})", hir_id),
- Self::Temporary(hir_id) => write!(f, "Temporary({:?})", hir_id),
+ Self::Variable(hir_id) => write!(f, "Variable({hir_id:?})"),
+ Self::Temporary(hir_id) => write!(f, "Temporary({hir_id:?})"),
}
}
})
@@ -182,9 +182,9 @@ impl TryFrom<&PlaceWithHirId<'_>> for TrackedValue {
}
pub struct DropRanges {
- tracked_value_map: FxHashMap<TrackedValue, TrackedValueIndex>,
+ tracked_value_map: UnordMap<TrackedValue, TrackedValueIndex>,
nodes: IndexVec<PostOrderId, NodeInfo>,
- borrowed_temporaries: Option<FxHashSet<HirId>>,
+ borrowed_temporaries: Option<UnordSet<HirId>>,
}
impl DropRanges {
@@ -227,7 +227,7 @@ struct DropRangesBuilder {
/// (see NodeInfo::drop_state). The hir_id_map field stores the mapping
/// from HirIds to the HirIdIndex that is used to represent that value in
/// bitvector.
- tracked_value_map: FxHashMap<TrackedValue, TrackedValueIndex>,
+ tracked_value_map: UnordMap<TrackedValue, TrackedValueIndex>,
/// When building the control flow graph, we don't always know the
/// post-order index of the target node at the point we encounter it.
diff --git a/compiler/rustc_hir_typeck/src/generator_interior/drop_ranges/record_consumed_borrow.rs b/compiler/rustc_hir_typeck/src/generator_interior/drop_ranges/record_consumed_borrow.rs
index 8ab0bd535..29413f080 100644
--- a/compiler/rustc_hir_typeck/src/generator_interior/drop_ranges/record_consumed_borrow.rs
+++ b/compiler/rustc_hir_typeck/src/generator_interior/drop_ranges/record_consumed_borrow.rs
@@ -4,7 +4,7 @@ use crate::{
FnCtxt,
};
use hir::{def_id::DefId, Body, HirId, HirIdMap};
-use rustc_data_structures::fx::FxHashSet;
+use rustc_data_structures::{fx::FxIndexSet, unord::UnordSet};
use rustc_hir as hir;
use rustc_middle::ty::{ParamEnv, TyCtxt};
use rustc_middle::{
@@ -30,13 +30,13 @@ pub(super) struct ConsumedAndBorrowedPlaces {
///
/// Note that this set excludes "partial drops" -- for example, a statement like `drop(x.y)` is
/// not considered a drop of `x`, although it would be a drop of `x.y`.
- pub(super) consumed: HirIdMap<FxHashSet<TrackedValue>>,
+ pub(super) consumed: HirIdMap<FxIndexSet<TrackedValue>>,
/// A set of hir-ids of values or variables that are borrowed at some point within the body.
- pub(super) borrowed: FxHashSet<TrackedValue>,
+ pub(super) borrowed: UnordSet<TrackedValue>,
/// A set of hir-ids of values or variables that are borrowed at some point within the body.
- pub(super) borrowed_temporaries: FxHashSet<HirId>,
+ pub(super) borrowed_temporaries: UnordSet<HirId>,
}
/// Works with ExprUseVisitor to find interesting values for the drop range analysis.
@@ -150,9 +150,10 @@ impl<'tcx> expr_use_visitor::Delegate<'tcx> for ExprUseDelegate<'tcx> {
hir.node_to_string(diag_expr_id),
hir.node_to_string(parent)
);
- place_with_id
- .try_into()
- .map_or((), |tracked_value| self.mark_consumed(parent, tracked_value));
+
+ if let Ok(tracked_value) = place_with_id.try_into() {
+ self.mark_consumed(parent, tracked_value)
+ }
}
fn borrow(
diff --git a/compiler/rustc_hir_typeck/src/generator_interior/mod.rs b/compiler/rustc_hir_typeck/src/generator_interior/mod.rs
index 86ea092bc..6a8171224 100644
--- a/compiler/rustc_hir_typeck/src/generator_interior/mod.rs
+++ b/compiler/rustc_hir_typeck/src/generator_interior/mod.rs
@@ -112,7 +112,7 @@ impl<'a, 'tcx> InteriorVisitor<'a, 'tcx> {
self.fcx
.tcx
.sess
- .delay_span_bug(span, format!("Encountered var {:?}", unresolved_term));
+ .delay_span_bug(span, format!("Encountered var {unresolved_term:?}"));
} else {
let note = format!(
"the type is part of the {} because of this {}",
diff --git a/compiler/rustc_hir_typeck/src/inherited.rs b/compiler/rustc_hir_typeck/src/inherited.rs
index d5619af2a..7064484a4 100644
--- a/compiler/rustc_hir_typeck/src/inherited.rs
+++ b/compiler/rustc_hir_typeck/src/inherited.rs
@@ -1,6 +1,6 @@
use super::callee::DeferredCallResolution;
-use rustc_data_structures::fx::{FxHashMap, FxHashSet};
+use rustc_data_structures::unord::{UnordMap, UnordSet};
use rustc_hir as hir;
use rustc_hir::def_id::LocalDefId;
use rustc_hir::HirIdMap;
@@ -61,9 +61,9 @@ pub struct Inherited<'tcx> {
/// Whenever we introduce an adjustment from `!` into a type variable,
/// we record that type variable here. This is later used to inform
/// fallback. See the `fallback` module for details.
- pub(super) diverging_type_vars: RefCell<FxHashSet<Ty<'tcx>>>,
+ pub(super) diverging_type_vars: RefCell<UnordSet<Ty<'tcx>>>,
- pub(super) infer_var_info: RefCell<FxHashMap<ty::TyVid, ty::InferVarInfo>>,
+ pub(super) infer_var_info: RefCell<UnordMap<ty::TyVid, ty::InferVarInfo>>,
}
impl<'tcx> Deref for Inherited<'tcx> {
diff --git a/compiler/rustc_hir_typeck/src/intrinsicck.rs b/compiler/rustc_hir_typeck/src/intrinsicck.rs
index e58efc9d1..4e65182f1 100644
--- a/compiler/rustc_hir_typeck/src/intrinsicck.rs
+++ b/compiler/rustc_hir_typeck/src/intrinsicck.rs
@@ -11,7 +11,7 @@ use super::FnCtxt;
/// If the type is `Option<T>`, it will return `T`, otherwise
/// the type itself. Works on most `Option`-like types.
fn unpack_option_like<'tcx>(tcx: TyCtxt<'tcx>, ty: Ty<'tcx>) -> Ty<'tcx> {
- let ty::Adt(def, substs) = *ty.kind() else { return ty };
+ let ty::Adt(def, args) = *ty.kind() else { return ty };
if def.variants().len() == 2 && !def.repr().c() && def.repr().int.is_none() {
let data_idx;
@@ -28,7 +28,7 @@ fn unpack_option_like<'tcx>(tcx: TyCtxt<'tcx>, ty: Ty<'tcx>) -> Ty<'tcx> {
}
if def.variant(data_idx).fields.len() == 1 {
- return def.variant(data_idx).single_field().ty(tcx, substs);
+ return def.variant(data_idx).single_field().ty(tcx, args);
}
}
@@ -85,7 +85,7 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> {
Ok(SizeSkeleton::Pointer { tail, .. }) => format!("pointer to `{tail}`"),
Ok(SizeSkeleton::Known(size)) => {
if let Some(v) = u128::from(size.bytes()).checked_mul(8) {
- format!("{} bits", v)
+ format!("{v} bits")
} else {
// `u128` should definitely be able to hold the size of different architectures
// larger sizes should be reported as error `are too big for the current architecture`
@@ -122,14 +122,9 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> {
} else {
err.note(format!("source type: `{}` ({})", from, skeleton_string(from, sk_from)))
.note(format!("target type: `{}` ({})", to, skeleton_string(to, sk_to)));
- let mut should_delay_as_bug = false;
- if let Err(LayoutError::Unknown(bad_from)) = sk_from && bad_from.references_error() {
- should_delay_as_bug = true;
- }
- if let Err(LayoutError::Unknown(bad_to)) = sk_to && bad_to.references_error() {
- should_delay_as_bug = true;
- }
- if should_delay_as_bug {
+ if let Err(LayoutError::ReferencesError(_)) = sk_from {
+ err.delay_as_bug();
+ } else if let Err(LayoutError::ReferencesError(_)) = sk_to {
err.delay_as_bug();
}
}
diff --git a/compiler/rustc_hir_typeck/src/lib.rs b/compiler/rustc_hir_typeck/src/lib.rs
index 6f82ffcfe..c4d3cbc9f 100644
--- a/compiler/rustc_hir_typeck/src/lib.rs
+++ b/compiler/rustc_hir_typeck/src/lib.rs
@@ -6,7 +6,6 @@
#![feature(min_specialization)]
#![feature(control_flow_enum)]
#![feature(option_as_slice)]
-#![allow(rustc::potential_query_instability)]
#![recursion_limit = "256"]
#[macro_use]
@@ -72,7 +71,7 @@ use rustc_middle::traits;
use rustc_middle::ty::{self, Ty, TyCtxt};
use rustc_session::config;
use rustc_span::def_id::{DefId, LocalDefId};
-use rustc_span::{sym, Span};
+use rustc_span::Span;
fluent_messages! { "../messages.ftl" }
@@ -102,7 +101,7 @@ fn primary_body_of(
) -> Option<(hir::BodyId, Option<&hir::Ty<'_>>, Option<&hir::FnSig<'_>>)> {
match node {
Node::Item(item) => match item.kind {
- hir::ItemKind::Const(ty, body) | hir::ItemKind::Static(ty, _, body) => {
+ hir::ItemKind::Const(ty, _, body) | hir::ItemKind::Static(ty, _, body) => {
Some((body, Some(ty), None))
}
hir::ItemKind::Fn(ref sig, .., body) => Some((body, None, Some(sig))),
@@ -141,11 +140,11 @@ fn has_typeck_results(tcx: TyCtxt<'_>, def_id: DefId) -> bool {
}
fn used_trait_imports(tcx: TyCtxt<'_>, def_id: LocalDefId) -> &UnordSet<LocalDefId> {
- &*tcx.typeck(def_id).used_trait_imports
+ &tcx.typeck(def_id).used_trait_imports
}
fn typeck<'tcx>(tcx: TyCtxt<'tcx>, def_id: LocalDefId) -> &ty::TypeckResults<'tcx> {
- let fallback = move || tcx.type_of(def_id.to_def_id()).subst_identity();
+ let fallback = move || tcx.type_of(def_id.to_def_id()).instantiate_identity();
typeck_with_fallback(tcx, def_id, fallback)
}
@@ -183,11 +182,7 @@ fn typeck_with_fallback<'tcx>(
let body = tcx.hir().body(body_id);
let param_env = tcx.param_env(def_id);
- let param_env = if tcx.has_attr(def_id, sym::rustc_do_not_const_check) {
- param_env.without_const()
- } else {
- param_env
- };
+
let inh = Inherited::new(tcx, def_id);
let mut fcx = FnCtxt::new(&inh, param_env, def_id);
@@ -195,7 +190,7 @@ fn typeck_with_fallback<'tcx>(
let fn_sig = if rustc_hir_analysis::collect::get_infer_ret_ty(&decl.output).is_some() {
fcx.astconv().ty_of_fn(id, header.unsafety, header.abi, decl, None, None)
} else {
- tcx.fn_sig(def_id).subst_identity()
+ tcx.fn_sig(def_id).instantiate_identity()
};
check_abi(tcx, id, span, fn_sig.abi());
@@ -264,11 +259,7 @@ fn typeck_with_fallback<'tcx>(
// Closure and generator analysis may run after fallback
// because they don't constrain other type variables.
- // Closure analysis only runs on closures. Therefore they only need to fulfill non-const predicates (as of now)
- let prev_constness = fcx.param_env.constness();
- fcx.param_env = fcx.param_env.without_const();
fcx.closure_analyze(body);
- fcx.param_env = fcx.param_env.with_constness(prev_constness);
assert!(fcx.deferred_call_resolutions.borrow().is_empty());
// Before the generator analysis, temporary scopes shall be marked to provide more
// precise information on types to be captured.
diff --git a/compiler/rustc_hir_typeck/src/mem_categorization.rs b/compiler/rustc_hir_typeck/src/mem_categorization.rs
index a1aa09084..9574da021 100644
--- a/compiler/rustc_hir_typeck/src/mem_categorization.rs
+++ b/compiler/rustc_hir_typeck/src/mem_categorization.rs
@@ -198,13 +198,14 @@ impl<'a, 'tcx> MemCategorizationContext<'a, 'tcx> {
}
/// Like `pat_ty`, but ignores implicit `&` patterns.
+ #[instrument(level = "debug", skip(self), ret)]
fn pat_ty_unadjusted(&self, pat: &hir::Pat<'_>) -> McResult<Ty<'tcx>> {
let base_ty = self.node_ty(pat.hir_id)?;
- debug!("pat_ty(pat={:?}) base_ty={:?}", pat, base_ty);
+ trace!(?base_ty);
// This code detects whether we are looking at a `ref x`,
// and if so, figures out what the type *being borrowed* is.
- let ret_ty = match pat.kind {
+ match pat.kind {
PatKind::Binding(..) => {
let bm = *self
.typeck_results
@@ -217,21 +218,18 @@ impl<'a, 'tcx> MemCategorizationContext<'a, 'tcx> {
// but what we want here is the type of the underlying value being borrowed.
// So peel off one-level, turning the &T into T.
match base_ty.builtin_deref(false) {
- Some(t) => t.ty,
+ Some(t) => Ok(t.ty),
None => {
- debug!("By-ref binding of non-derefable type {:?}", base_ty);
- return Err(());
+ debug!("By-ref binding of non-derefable type");
+ Err(())
}
}
} else {
- base_ty
+ Ok(base_ty)
}
}
- _ => base_ty,
- };
- debug!("pat_ty(pat={:?}) ret_ty={:?}", pat, ret_ty);
-
- Ok(ret_ty)
+ _ => Ok(base_ty),
+ }
}
pub(crate) fn cat_expr(&self, expr: &hir::Expr<'_>) -> McResult<PlaceWithHirId<'tcx>> {
@@ -299,13 +297,11 @@ impl<'a, 'tcx> MemCategorizationContext<'a, 'tcx> {
}
}
- #[instrument(level = "debug", skip(self))]
+ #[instrument(level = "debug", skip(self), ret)]
pub(crate) fn cat_expr_unadjusted(
&self,
expr: &hir::Expr<'_>,
) -> McResult<PlaceWithHirId<'tcx>> {
- debug!("cat_expr: id={} expr={:?}", expr.hir_id, expr);
-
let expr_ty = self.expr_ty(expr)?;
match expr.kind {
hir::ExprKind::Unary(hir::UnOp::Deref, ref e_base) => {
@@ -319,7 +315,7 @@ impl<'a, 'tcx> MemCategorizationContext<'a, 'tcx> {
hir::ExprKind::Field(ref base, _) => {
let base = self.cat_expr(base)?;
- debug!("cat_expr(cat_field): id={} expr={:?} base={:?}", expr.hir_id, expr, base);
+ debug!(?base);
let field_idx = self
.typeck_results
@@ -336,7 +332,7 @@ impl<'a, 'tcx> MemCategorizationContext<'a, 'tcx> {
))
}
- hir::ExprKind::Index(ref base, _) => {
+ hir::ExprKind::Index(ref base, _, _) => {
if self.typeck_results.is_method_call(expr) {
// If this is an index implemented by a method call, then it
// will include an implicit deref of the result.
@@ -389,7 +385,7 @@ impl<'a, 'tcx> MemCategorizationContext<'a, 'tcx> {
}
}
- #[instrument(level = "debug", skip(self, span))]
+ #[instrument(level = "debug", skip(self, span), ret)]
pub(crate) fn cat_res(
&self,
hir_id: hir::HirId,
@@ -430,6 +426,7 @@ impl<'a, 'tcx> MemCategorizationContext<'a, 'tcx> {
/// Note: the actual upvar access contains invisible derefs of closure
/// environment and upvar reference as appropriate. Only regionck cares
/// about these dereferences, so we let it compute them as needed.
+ #[instrument(level = "debug", skip(self), ret)]
fn cat_upvar(&self, hir_id: hir::HirId, var_id: hir::HirId) -> McResult<PlaceWithHirId<'tcx>> {
let closure_expr_def_id = self.body_owner;
@@ -439,24 +436,20 @@ impl<'a, 'tcx> MemCategorizationContext<'a, 'tcx> {
};
let var_ty = self.node_ty(var_id)?;
- let ret = PlaceWithHirId::new(hir_id, var_ty, PlaceBase::Upvar(upvar_id), Vec::new());
-
- debug!("cat_upvar ret={:?}", ret);
- Ok(ret)
+ Ok(PlaceWithHirId::new(hir_id, var_ty, PlaceBase::Upvar(upvar_id), Vec::new()))
}
+ #[instrument(level = "debug", skip(self), ret)]
pub(crate) fn cat_rvalue(
&self,
hir_id: hir::HirId,
span: Span,
expr_ty: Ty<'tcx>,
) -> PlaceWithHirId<'tcx> {
- debug!("cat_rvalue hir_id={:?}, expr_ty={:?}, span={:?}", hir_id, expr_ty, span);
- let ret = PlaceWithHirId::new(hir_id, expr_ty, PlaceBase::Rvalue, Vec::new());
- debug!("cat_rvalue ret={:?}", ret);
- ret
+ PlaceWithHirId::new(hir_id, expr_ty, PlaceBase::Rvalue, Vec::new())
}
+ #[instrument(level = "debug", skip(self, node), ret)]
pub(crate) fn cat_projection<N: HirNode>(
&self,
node: &N,
@@ -464,16 +457,23 @@ impl<'a, 'tcx> MemCategorizationContext<'a, 'tcx> {
ty: Ty<'tcx>,
kind: ProjectionKind,
) -> PlaceWithHirId<'tcx> {
+ let place_ty = base_place.place.ty();
let mut projections = base_place.place.projections;
+
+ let node_ty = self.typeck_results.node_type(node.hir_id());
+ // Opaque types can't have field projections, but we can instead convert
+ // the current place in-place (heh) to the hidden type, and then apply all
+ // follow up projections on that.
+ if node_ty != place_ty && matches!(place_ty.kind(), ty::Alias(ty::Opaque, ..)) {
+ projections.push(Projection { kind: ProjectionKind::OpaqueCast, ty: node_ty });
+ }
projections.push(Projection { kind, ty });
- let ret = PlaceWithHirId::new(
+ PlaceWithHirId::new(
node.hir_id(),
base_place.place.base_ty,
base_place.place.base,
projections,
- );
- debug!("cat_field ret {:?}", ret);
- ret
+ )
}
#[instrument(level = "debug", skip(self))]
@@ -497,7 +497,7 @@ impl<'a, 'tcx> MemCategorizationContext<'a, 'tcx> {
self.cat_deref(expr, base)
}
- #[instrument(level = "debug", skip(self, node))]
+ #[instrument(level = "debug", skip(self, node), ret)]
fn cat_deref(
&self,
node: &impl HirNode,
@@ -514,14 +514,12 @@ impl<'a, 'tcx> MemCategorizationContext<'a, 'tcx> {
let mut projections = base_place.place.projections;
projections.push(Projection { kind: ProjectionKind::Deref, ty: deref_ty });
- let ret = PlaceWithHirId::new(
+ Ok(PlaceWithHirId::new(
node.hir_id(),
base_place.place.base_ty,
base_place.place.base,
projections,
- );
- debug!("cat_deref ret {:?}", ret);
- Ok(ret)
+ ))
}
pub(crate) fn cat_pattern<F>(
@@ -559,7 +557,10 @@ impl<'a, 'tcx> MemCategorizationContext<'a, 'tcx> {
Ok(adt_def.variant_index_with_ctor_id(variant_ctor_id))
}
Res::Def(DefKind::Ctor(CtorOf::Struct, ..), _)
- | Res::Def(DefKind::Struct | DefKind::Union | DefKind::TyAlias | DefKind::AssocTy, _)
+ | Res::Def(
+ DefKind::Struct | DefKind::Union | DefKind::TyAlias { .. } | DefKind::AssocTy,
+ _,
+ )
| Res::SelfCtor(..)
| Res::SelfTyParam { .. }
| Res::SelfTyAlias { .. } => {
@@ -595,7 +596,7 @@ impl<'a, 'tcx> MemCategorizationContext<'a, 'tcx> {
fn total_fields_in_tuple(&self, pat_hir_id: hir::HirId, span: Span) -> McResult<usize> {
let ty = self.typeck_results.node_type(pat_hir_id);
match ty.kind() {
- ty::Tuple(substs) => Ok(substs.len()),
+ ty::Tuple(args) => Ok(args.len()),
_ => {
self.tcx().sess.delay_span_bug(span, "tuple pattern not applied to a tuple");
Err(())
@@ -603,6 +604,13 @@ impl<'a, 'tcx> MemCategorizationContext<'a, 'tcx> {
}
}
+ /// Here, `place` is the `PlaceWithHirId` being matched and pat is the pattern it
+ /// is being matched against.
+ ///
+ /// In general, the way that this works is that we walk down the pattern,
+ /// constructing a `PlaceWithHirId` that represents the path that will be taken
+ /// to reach the value being matched.
+ #[instrument(skip(self, op), ret, level = "debug")]
fn cat_pattern_<F>(
&self,
mut place_with_id: PlaceWithHirId<'tcx>,
@@ -612,15 +620,6 @@ impl<'a, 'tcx> MemCategorizationContext<'a, 'tcx> {
where
F: FnMut(&PlaceWithHirId<'tcx>, &hir::Pat<'_>),
{
- // Here, `place` is the `PlaceWithHirId` being matched and pat is the pattern it
- // is being matched against.
- //
- // In general, the way that this works is that we walk down the pattern,
- // constructing a `PlaceWithHirId` that represents the path that will be taken
- // to reach the value being matched.
-
- debug!("cat_pattern(pat={:?}, place_with_id={:?})", pat, place_with_id);
-
// If (pattern) adjustments are active for this pattern, adjust the `PlaceWithHirId` correspondingly.
// `PlaceWithHirId`s are constructed differently from patterns. For example, in
//
@@ -654,11 +653,11 @@ impl<'a, 'tcx> MemCategorizationContext<'a, 'tcx> {
// `deref { deref { place_foo }}` instead of `place_foo` since the pattern is now `Some(x,)`
// and not `&&Some(x,)`, even though its assigned type is that of `&&Some(x,)`.
for _ in 0..self.typeck_results.pat_adjustments().get(pat.hir_id).map_or(0, |v| v.len()) {
- debug!("cat_pattern: applying adjustment to place_with_id={:?}", place_with_id);
+ debug!("applying adjustment to place_with_id={:?}", place_with_id);
place_with_id = self.cat_deref(pat, place_with_id)?;
}
let place_with_id = place_with_id; // lose mutability
- debug!("cat_pattern: applied adjustment derefs to get place_with_id={:?}", place_with_id);
+ debug!("applied adjustment derefs to get place_with_id={:?}", place_with_id);
// Invoke the callback, but only now, after the `place_with_id` has adjusted.
//
diff --git a/compiler/rustc_hir_typeck/src/method/confirm.rs b/compiler/rustc_hir_typeck/src/method/confirm.rs
index 87edb8031..7c73f6a89 100644
--- a/compiler/rustc_hir_typeck/src/method/confirm.rs
+++ b/compiler/rustc_hir_typeck/src/method/confirm.rs
@@ -5,7 +5,7 @@ use rustc_hir as hir;
use rustc_hir::def_id::DefId;
use rustc_hir::GenericArg;
use rustc_hir_analysis::astconv::generics::{
- check_generic_arg_count_for_call, create_substs_for_generic_args,
+ check_generic_arg_count_for_call, create_args_for_parent_generic_args,
};
use rustc_hir_analysis::astconv::{AstConv, CreateSubstsForGenericArgsCtxt, IsMethodCall};
use rustc_infer::infer::{self, DefineOpaqueTypes, InferOk};
@@ -13,9 +13,9 @@ use rustc_middle::traits::{ObligationCauseCode, UnifyReceiverContext};
use rustc_middle::ty::adjustment::{Adjust, Adjustment, PointerCoercion};
use rustc_middle::ty::adjustment::{AllowTwoPhase, AutoBorrow, AutoBorrowMutability};
use rustc_middle::ty::fold::TypeFoldable;
-use rustc_middle::ty::subst::{self, SubstsRef};
-use rustc_middle::ty::{self, GenericParamDefKind, Ty, TyCtxt};
-use rustc_middle::ty::{InternalSubsts, UserSubsts, UserType};
+use rustc_middle::ty::{
+ self, GenericArgs, GenericArgsRef, GenericParamDefKind, Ty, TyCtxt, UserArgs, UserType,
+};
use rustc_span::{Span, DUMMY_SP};
use rustc_trait_selection::traits;
@@ -96,13 +96,13 @@ impl<'a, 'tcx> ConfirmContext<'a, 'tcx> {
let self_ty = self.adjust_self_ty(unadjusted_self_ty, &pick);
// Create substitutions for the method's type parameters.
- let rcvr_substs = self.fresh_receiver_substs(self_ty, &pick);
- let all_substs = self.instantiate_method_substs(&pick, segment, rcvr_substs);
+ let rcvr_args = self.fresh_receiver_args(self_ty, &pick);
+ let all_args = self.instantiate_method_args(&pick, segment, rcvr_args);
- debug!("rcvr_substs={rcvr_substs:?}, all_substs={all_substs:?}");
+ debug!("rcvr_args={rcvr_args:?}, all_args={all_args:?}");
// Create the final signature for the method, replacing late-bound regions.
- let (method_sig, method_predicates) = self.instantiate_method_sig(&pick, all_substs);
+ let (method_sig, method_predicates) = self.instantiate_method_sig(&pick, all_args);
// If there is a `Self: Sized` bound and `Self` is a trait object, it is possible that
// something which derefs to `Self` actually implements the trait and the caller
@@ -112,10 +112,10 @@ impl<'a, 'tcx> ConfirmContext<'a, 'tcx> {
// In that case, we'll error anyway, but we'll also re-run the search with all traits
// in scope, and if we find another method which can be used, we'll output an
// appropriate hint suggesting to import the trait.
- let filler_substs = rcvr_substs
+ let filler_args = rcvr_args
.extend_to(self.tcx, pick.item.def_id, |def, _| self.tcx.mk_param_from_def(def));
let illegal_sized_bound = self.predicates_require_illegal_sized_bound(
- self.tcx.predicates_of(pick.item.def_id).instantiate(self.tcx, filler_substs),
+ self.tcx.predicates_of(pick.item.def_id).instantiate(self.tcx, filler_args),
);
// Unify the (adjusted) self type with what the method expects.
@@ -129,7 +129,7 @@ impl<'a, 'tcx> ConfirmContext<'a, 'tcx> {
"confirm: self_ty={:?} method_sig_rcvr={:?} method_sig={:?} method_predicates={:?}",
self_ty, method_sig_rcvr, method_sig, method_predicates
);
- self.unify_receivers(self_ty, method_sig_rcvr, &pick, all_substs);
+ self.unify_receivers(self_ty, method_sig_rcvr, &pick, all_args);
let (method_sig, method_predicates) =
self.normalize(self.span, (method_sig, method_predicates));
@@ -144,7 +144,7 @@ impl<'a, 'tcx> ConfirmContext<'a, 'tcx> {
if illegal_sized_bound.is_none() {
self.add_obligations(
Ty::new_fn_ptr(self.tcx, method_sig),
- all_substs,
+ all_args,
method_predicates,
pick.item.def_id,
);
@@ -153,7 +153,7 @@ impl<'a, 'tcx> ConfirmContext<'a, 'tcx> {
// Create the final `MethodCallee`.
let callee = MethodCallee {
def_id: pick.item.def_id,
- substs: all_substs,
+ args: all_args,
sig: method_sig.skip_binder(),
};
ConfirmResult { callee, illegal_sized_bound }
@@ -171,7 +171,8 @@ impl<'a, 'tcx> ConfirmContext<'a, 'tcx> {
// time writing the results into the various typeck results.
let mut autoderef = self.autoderef(self.call_expr.span, unadjusted_self_ty);
let Some((ty, n)) = autoderef.nth(pick.autoderefs) else {
- return Ty::new_error_with_message(self.tcx,
+ return Ty::new_error_with_message(
+ self.tcx,
rustc_span::DUMMY_SP,
format!("failed autoderef {}", pick.autoderefs),
);
@@ -224,7 +225,7 @@ impl<'a, 'tcx> ConfirmContext<'a, 'tcx> {
assert!(mutbl.is_mut());
Ty::new_ptr(self.tcx, ty::TypeAndMut { mutbl: hir::Mutability::Not, ty })
}
- other => panic!("Cannot adjust receiver type {:?} to const ptr", other),
+ other => panic!("Cannot adjust receiver type {other:?} to const ptr"),
};
adjustments.push(Adjustment {
@@ -251,20 +252,19 @@ impl<'a, 'tcx> ConfirmContext<'a, 'tcx> {
///
/// Note that this substitution may include late-bound regions from the impl level. If so,
/// these are instantiated later in the `instantiate_method_sig` routine.
- fn fresh_receiver_substs(
+ fn fresh_receiver_args(
&mut self,
self_ty: Ty<'tcx>,
pick: &probe::Pick<'tcx>,
- ) -> SubstsRef<'tcx> {
+ ) -> GenericArgsRef<'tcx> {
match pick.kind {
probe::InherentImplPick => {
let impl_def_id = pick.item.container_id(self.tcx);
assert!(
self.tcx.impl_trait_ref(impl_def_id).is_none(),
- "impl {:?} is not an inherent impl",
- impl_def_id
+ "impl {impl_def_id:?} is not an inherent impl"
);
- self.fresh_substs_for_item(self.span, impl_def_id)
+ self.fresh_args_for_item(self.span, impl_def_id)
}
probe::ObjectPick => {
@@ -288,7 +288,7 @@ impl<'a, 'tcx> ConfirmContext<'a, 'tcx> {
"original_poly_trait_ref={:?} upcast_trait_ref={:?} target_trait={:?}",
original_poly_trait_ref, upcast_trait_ref, trait_def_id
);
- upcast_trait_ref.substs
+ upcast_trait_ref.args
})
}
@@ -300,13 +300,13 @@ impl<'a, 'tcx> ConfirmContext<'a, 'tcx> {
// the process we will unify the transformed-self-type
// of the method with the actual type in order to
// unify some of these variables.
- self.fresh_substs_for_item(self.span, trait_def_id)
+ self.fresh_args_for_item(self.span, trait_def_id)
}
probe::WhereClausePick(poly_trait_ref) => {
// Where clauses can have bound regions in them. We need to instantiate
// those to convert from a poly-trait-ref to a trait-ref.
- self.instantiate_binder_with_fresh_vars(poly_trait_ref).substs
+ self.instantiate_binder_with_fresh_vars(poly_trait_ref).args
}
}
}
@@ -343,12 +343,12 @@ impl<'a, 'tcx> ConfirmContext<'a, 'tcx> {
})
}
- fn instantiate_method_substs(
+ fn instantiate_method_args(
&mut self,
pick: &probe::Pick<'tcx>,
seg: &hir::PathSegment<'_>,
- parent_substs: SubstsRef<'tcx>,
- ) -> SubstsRef<'tcx> {
+ parent_args: GenericArgsRef<'tcx>,
+ ) -> GenericArgsRef<'tcx> {
// Determine the values for the generic parameters of the method.
// If they were not explicitly supplied, just construct fresh
// variables.
@@ -365,7 +365,7 @@ impl<'a, 'tcx> ConfirmContext<'a, 'tcx> {
// Create subst for early-bound lifetime parameters, combining
// parameters from the type and those from the method.
- assert_eq!(generics.parent_count, parent_substs.len());
+ assert_eq!(generics.parent_count, parent_args.len());
struct MethodSubstsCtxt<'a, 'tcx> {
cfcx: &'a ConfirmContext<'a, 'tcx>,
@@ -389,7 +389,7 @@ impl<'a, 'tcx> ConfirmContext<'a, 'tcx> {
&mut self,
param: &ty::GenericParamDef,
arg: &GenericArg<'_>,
- ) -> subst::GenericArg<'tcx> {
+ ) -> ty::GenericArg<'tcx> {
match (&param.kind, arg) {
(GenericParamDefKind::Lifetime, GenericArg::Lifetime(lt)) => {
self.cfcx.fcx.astconv().ast_region_to_region(lt, Some(param)).into()
@@ -421,31 +421,31 @@ impl<'a, 'tcx> ConfirmContext<'a, 'tcx> {
fn inferred_kind(
&mut self,
- _substs: Option<&[subst::GenericArg<'tcx>]>,
+ _args: Option<&[ty::GenericArg<'tcx>]>,
param: &ty::GenericParamDef,
_infer_args: bool,
- ) -> subst::GenericArg<'tcx> {
+ ) -> ty::GenericArg<'tcx> {
self.cfcx.var_for_def(self.cfcx.span, param)
}
}
- let substs = create_substs_for_generic_args(
+ let args = create_args_for_parent_generic_args(
self.tcx,
pick.item.def_id,
- parent_substs,
+ parent_args,
false,
None,
&arg_count_correct,
&mut MethodSubstsCtxt { cfcx: self, pick, seg },
);
- // When the method is confirmed, the `substs` includes
+ // When the method is confirmed, the `args` includes
// parameters from not just the method, but also the impl of
// the method -- in particular, the `Self` type will be fully
// resolved. However, those are not something that the "user
// specified" -- i.e., those types come from the inferred type
// of the receiver, not something the user wrote. So when we
- // create the user-substs, we want to replace those earlier
+ // create the user-args, we want to replace those earlier
// types with just the types that the user actually wrote --
// that is, those that appear on the *method itself*.
//
@@ -453,15 +453,15 @@ impl<'a, 'tcx> ConfirmContext<'a, 'tcx> {
// `foo.bar::<u32>(...)` -- the `Self` type here will be the
// type of `foo` (possibly adjusted), but we don't want to
// include that. We want just the `[_, u32]` part.
- if !substs.is_empty() && !generics.params.is_empty() {
+ if !args.is_empty() && !generics.params.is_empty() {
let user_type_annotation = self.probe(|_| {
- let user_substs = UserSubsts {
- substs: InternalSubsts::for_item(self.tcx, pick.item.def_id, |param, _| {
+ let user_args = UserArgs {
+ args: GenericArgs::for_item(self.tcx, pick.item.def_id, |param, _| {
let i = param.index as usize;
if i < generics.parent_count {
self.fcx.var_for_def(DUMMY_SP, param)
} else {
- substs[i]
+ args[i]
}
}),
user_self_ty: None, // not relevant here
@@ -469,18 +469,18 @@ impl<'a, 'tcx> ConfirmContext<'a, 'tcx> {
self.fcx.canonicalize_user_type_annotation(UserType::TypeOf(
pick.item.def_id,
- user_substs,
+ user_args,
))
});
- debug!("instantiate_method_substs: user_type_annotation={:?}", user_type_annotation);
+ debug!("instantiate_method_args: user_type_annotation={:?}", user_type_annotation);
if !self.skip_record_for_diagnostics {
self.fcx.write_user_type_annotation(self.call_expr.hir_id, user_type_annotation);
}
}
- self.normalize(self.span, substs)
+ self.normalize(self.span, args)
}
fn unify_receivers(
@@ -488,7 +488,7 @@ impl<'a, 'tcx> ConfirmContext<'a, 'tcx> {
self_ty: Ty<'tcx>,
method_self_ty: Ty<'tcx>,
pick: &probe::Pick<'tcx>,
- substs: SubstsRef<'tcx>,
+ args: GenericArgsRef<'tcx>,
) {
debug!(
"unify_receivers: self_ty={:?} method_self_ty={:?} span={:?} pick={:?}",
@@ -499,7 +499,7 @@ impl<'a, 'tcx> ConfirmContext<'a, 'tcx> {
ObligationCauseCode::UnifyReceiver(Box::new(UnifyReceiverContext {
assoc_item: pick.item,
param_env: self.param_env,
- substs,
+ args,
})),
);
match self.at(&cause, self.param_env).sup(DefineOpaqueTypes::No, method_self_ty, self_ty) {
@@ -509,7 +509,7 @@ impl<'a, 'tcx> ConfirmContext<'a, 'tcx> {
Err(terr) => {
// FIXME(arbitrary_self_types): We probably should limit the
// situations where this can occur by adding additional restrictions
- // to the feature, like the self type can't reference method substs.
+ // to the feature, like the self type can't reference method args.
if self.tcx.features().arbitrary_self_types {
self.err_ctxt()
.report_mismatched_types(&cause, method_self_ty, self_ty, terr)
@@ -532,19 +532,19 @@ impl<'a, 'tcx> ConfirmContext<'a, 'tcx> {
fn instantiate_method_sig(
&mut self,
pick: &probe::Pick<'tcx>,
- all_substs: SubstsRef<'tcx>,
+ all_args: GenericArgsRef<'tcx>,
) -> (ty::FnSig<'tcx>, ty::InstantiatedPredicates<'tcx>) {
- debug!("instantiate_method_sig(pick={:?}, all_substs={:?})", pick, all_substs);
+ debug!("instantiate_method_sig(pick={:?}, all_args={:?})", pick, all_args);
// Instantiate the bounds on the method with the
// type/early-bound-regions substitutions performed. There can
// be no late-bound regions appearing here.
let def_id = pick.item.def_id;
- let method_predicates = self.tcx.predicates_of(def_id).instantiate(self.tcx, all_substs);
+ let method_predicates = self.tcx.predicates_of(def_id).instantiate(self.tcx, all_args);
debug!("method_predicates after subst = {:?}", method_predicates);
- let sig = self.tcx.fn_sig(def_id).subst(self.tcx, all_substs);
+ let sig = self.tcx.fn_sig(def_id).instantiate(self.tcx, all_args);
debug!("type scheme substituted, sig={:?}", sig);
let sig = self.instantiate_binder_with_fresh_vars(sig);
@@ -556,18 +556,18 @@ impl<'a, 'tcx> ConfirmContext<'a, 'tcx> {
fn add_obligations(
&mut self,
fty: Ty<'tcx>,
- all_substs: SubstsRef<'tcx>,
+ all_args: GenericArgsRef<'tcx>,
method_predicates: ty::InstantiatedPredicates<'tcx>,
def_id: DefId,
) {
debug!(
- "add_obligations: fty={:?} all_substs={:?} method_predicates={:?} def_id={:?}",
- fty, all_substs, method_predicates, def_id
+ "add_obligations: fty={:?} all_args={:?} method_predicates={:?} def_id={:?}",
+ fty, all_args, method_predicates, def_id
);
// FIXME: could replace with the following, but we already calculated `method_predicates`,
// so we just call `predicates_for_generics` directly to avoid redoing work.
- // `self.add_required_obligations(self.span, def_id, &all_substs);`
+ // `self.add_required_obligations(self.span, def_id, &all_args);`
for obligation in traits::predicates_for_generics(
|idx, span| {
let code = if span.is_dummy() {
@@ -590,10 +590,10 @@ impl<'a, 'tcx> ConfirmContext<'a, 'tcx> {
// this is a projection from a trait reference, so we have to
// make sure that the trait reference inputs are well-formed.
- self.add_wf_bounds(all_substs, self.call_expr);
+ self.add_wf_bounds(all_args, self.call_expr);
// the function type must also be well-formed (this is not
- // implied by the substs being well-formed because of inherent
+ // implied by the args being well-formed because of inherent
// impls and late-bound regions - see issue #28609).
self.register_wf_obligation(fty.into(), self.span, traits::WellFormed(None));
}
diff --git a/compiler/rustc_hir_typeck/src/method/mod.rs b/compiler/rustc_hir_typeck/src/method/mod.rs
index e52cea188..6dd131aa2 100644
--- a/compiler/rustc_hir_typeck/src/method/mod.rs
+++ b/compiler/rustc_hir_typeck/src/method/mod.rs
@@ -7,12 +7,11 @@ mod prelude2021;
pub mod probe;
mod suggest;
-pub use self::suggest::SelfSource;
+pub use self::suggest::{MethodCallComponents, SelfSource};
pub use self::MethodError::*;
use crate::errors::OpMethodGenericParams;
use crate::FnCtxt;
-use rustc_data_structures::sync::Lrc;
use rustc_errors::{Applicability, Diagnostic, SubdiagnosticMessage};
use rustc_hir as hir;
use rustc_hir::def::{CtorOf, DefKind, Namespace};
@@ -20,8 +19,8 @@ use rustc_hir::def_id::DefId;
use rustc_infer::infer::{self, InferOk};
use rustc_middle::query::Providers;
use rustc_middle::traits::ObligationCause;
-use rustc_middle::ty::subst::{InternalSubsts, SubstsRef};
use rustc_middle::ty::{self, GenericParamDefKind, Ty, TypeVisitableExt};
+use rustc_middle::ty::{GenericArgs, GenericArgsRef};
use rustc_span::symbol::Ident;
use rustc_span::Span;
use rustc_trait_selection::traits::query::evaluate_obligation::InferCtxtExt;
@@ -37,7 +36,7 @@ pub fn provide(providers: &mut Providers) {
pub struct MethodCallee<'tcx> {
/// Impl method ID, for inherent methods, or trait method ID, otherwise.
pub def_id: DefId,
- pub substs: SubstsRef<'tcx>,
+ pub args: GenericArgsRef<'tcx>,
/// Instantiated method signature, i.e., it has been
/// substituted, normalized, and has had late-bound
@@ -190,11 +189,9 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> {
self.lint_dot_call_from_2018(self_ty, segment, span, call_expr, self_expr, &pick, args);
- for import_id in &pick.import_ids {
+ for &import_id in &pick.import_ids {
debug!("used_trait_import: {:?}", import_id);
- Lrc::get_mut(&mut self.typeck_results.borrow_mut().used_trait_imports)
- .unwrap()
- .insert(*import_id);
+ self.typeck_results.borrow_mut().used_trait_imports.insert(import_id);
}
self.tcx.check_stability(pick.item.def_id, Some(call_expr.hir_id), span, None);
@@ -324,9 +321,9 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> {
trait_def_id: DefId,
self_ty: Ty<'tcx>,
opt_input_types: Option<&[Ty<'tcx>]>,
- ) -> (traits::PredicateObligation<'tcx>, &'tcx ty::List<ty::subst::GenericArg<'tcx>>) {
+ ) -> (traits::PredicateObligation<'tcx>, &'tcx ty::List<ty::GenericArg<'tcx>>) {
// Construct a trait-reference `self_ty : Trait<input_tys>`
- let substs = InternalSubsts::for_item(self.tcx, trait_def_id, |param, _| {
+ let args = GenericArgs::for_item(self.tcx, trait_def_id, |param, _| {
match param.kind {
GenericParamDefKind::Lifetime | GenericParamDefKind::Const { .. } => {}
GenericParamDefKind::Type { .. } => {
@@ -340,19 +337,11 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> {
self.var_for_def(cause.span, param)
});
- let trait_ref = ty::TraitRef::new(self.tcx, trait_def_id, substs);
+ let trait_ref = ty::TraitRef::new(self.tcx, trait_def_id, args);
// Construct an obligation
let poly_trait_ref = ty::Binder::dummy(trait_ref);
- (
- traits::Obligation::new(
- self.tcx,
- cause,
- self.param_env,
- poly_trait_ref.without_const(),
- ),
- substs,
- )
+ (traits::Obligation::new(self.tcx, cause, self.param_env, poly_trait_ref), args)
}
/// `lookup_method_in_trait` is used for overloaded operators.
@@ -369,9 +358,9 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> {
self_ty: Ty<'tcx>,
opt_input_types: Option<&[Ty<'tcx>]>,
) -> Option<InferOk<'tcx, MethodCallee<'tcx>>> {
- let (obligation, substs) =
+ let (obligation, args) =
self.obligation_for_method(cause, trait_def_id, self_ty, opt_input_types);
- self.construct_obligation_for_trait(m_name, trait_def_id, obligation, substs)
+ self.construct_obligation_for_trait(m_name, trait_def_id, obligation, args)
}
// FIXME(#18741): it seems likely that we can consolidate some of this
@@ -382,7 +371,7 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> {
m_name: Ident,
trait_def_id: DefId,
obligation: traits::PredicateObligation<'tcx>,
- substs: &'tcx ty::List<ty::subst::GenericArg<'tcx>>,
+ args: &'tcx ty::List<ty::GenericArg<'tcx>>,
) -> Option<InferOk<'tcx, MethodCallee<'tcx>>> {
debug!(?obligation);
@@ -428,7 +417,7 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> {
// N.B., instantiate late-bound regions before normalizing the
// function signature so that normalization does not need to deal
// with bound regions.
- let fn_sig = tcx.fn_sig(def_id).subst(self.tcx, substs);
+ let fn_sig = tcx.fn_sig(def_id).instantiate(self.tcx, args);
let fn_sig =
self.instantiate_binder_with_fresh_vars(obligation.cause.span, infer::FnCall, fn_sig);
@@ -447,7 +436,7 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> {
//
// Note that as the method comes from a trait, it should not have
// any late-bound regions appearing in its bounds.
- let bounds = self.tcx.predicates_of(def_id).instantiate(self.tcx, substs);
+ let bounds = self.tcx.predicates_of(def_id).instantiate(self.tcx, args);
let InferOk { value, obligations: o } =
self.at(&obligation.cause, self.param_env).normalize(bounds);
@@ -480,7 +469,7 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> {
))),
));
- let callee = MethodCallee { def_id, substs, sig: fn_sig };
+ let callee = MethodCallee { def_id, args, sig: fn_sig };
debug!("callee = {:?}", callee);
@@ -567,10 +556,9 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> {
debug!(?pick);
{
let mut typeck_results = self.typeck_results.borrow_mut();
- let used_trait_imports = Lrc::get_mut(&mut typeck_results.used_trait_imports).unwrap();
for import_id in pick.import_ids {
debug!(used_trait_import=?import_id);
- used_trait_imports.insert(import_id);
+ typeck_results.used_trait_imports.insert(import_id);
}
}
diff --git a/compiler/rustc_hir_typeck/src/method/prelude2021.rs b/compiler/rustc_hir_typeck/src/method/prelude2021.rs
index ec4e7f7f8..3f1dca5b1 100644
--- a/compiler/rustc_hir_typeck/src/method/prelude2021.rs
+++ b/compiler/rustc_hir_typeck/src/method/prelude2021.rs
@@ -14,6 +14,7 @@ use rustc_span::symbol::kw::{Empty, Underscore};
use rustc_span::symbol::{sym, Ident};
use rustc_span::Span;
use rustc_trait_selection::infer::InferCtxtExt;
+use std::fmt::Write;
impl<'a, 'tcx> FnCtxt<'a, 'tcx> {
pub(super) fn lint_dot_call_from_2018(
@@ -32,7 +33,7 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> {
);
// Rust 2021 and later is already using the new prelude
- if span.rust_2021() {
+ if span.at_least_rust_2021() {
return;
}
@@ -97,28 +98,28 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> {
let self_adjusted = if let Some(probe::AutorefOrPtrAdjustment::ToConstPtr) =
pick.autoref_or_ptr_adjustment
{
- format!("{}{} as *const _", derefs, self_expr)
+ format!("{derefs}{self_expr} as *const _")
} else {
- format!("{}{}{}", autoref, derefs, self_expr)
+ format!("{autoref}{derefs}{self_expr}")
};
lint.span_suggestion(
sp,
"disambiguate the method call",
- format!("({})", self_adjusted),
+ format!("({self_adjusted})"),
Applicability::MachineApplicable,
);
} else {
let self_adjusted = if let Some(probe::AutorefOrPtrAdjustment::ToConstPtr) =
pick.autoref_or_ptr_adjustment
{
- format!("{}(...) as *const _", derefs)
+ format!("{derefs}(...) as *const _")
} else {
- format!("{}{}...", autoref, derefs)
+ format!("{autoref}{derefs}...")
};
lint.span_help(
sp,
- format!("disambiguate the method call with `({})`", self_adjusted,),
+ format!("disambiguate the method call with `({self_adjusted})`",),
);
}
@@ -143,16 +144,16 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> {
let (self_adjusted, precise) = self.adjust_expr(pick, self_expr, sp);
if precise {
- let args = args
- .iter()
- .map(|arg| {
- let span = arg.span.find_ancestor_inside(sp).unwrap_or_default();
- format!(
- ", {}",
- self.sess().source_map().span_to_snippet(span).unwrap()
- )
- })
- .collect::<String>();
+ let args = args.iter().fold(String::new(), |mut string, arg| {
+ let span = arg.span.find_ancestor_inside(sp).unwrap_or_default();
+ write!(
+ string,
+ ", {}",
+ self.sess().source_map().span_to_snippet(span).unwrap()
+ )
+ .unwrap();
+ string
+ });
lint.span_suggestion(
sp,
@@ -168,7 +169,7 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> {
.ok())
{
// Keep turbofish.
- format!("::{}", args)
+ format!("::{args}")
} else {
String::new()
},
@@ -203,7 +204,7 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> {
pick: &Pick<'tcx>,
) {
// Rust 2021 and later is already using the new prelude
- if span.rust_2021() {
+ if span.at_least_rust_2021() {
return;
}
@@ -347,7 +348,7 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> {
// Glob import, so just use its name.
return None;
} else {
- return Some(format!("{}", any_id));
+ return Some(format!("{any_id}"));
}
}
@@ -396,9 +397,9 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> {
let adjusted_text = if let Some(probe::AutorefOrPtrAdjustment::ToConstPtr) =
pick.autoref_or_ptr_adjustment
{
- format!("{}{} as *const _", derefs, expr_text)
+ format!("{derefs}{expr_text} as *const _")
} else {
- format!("{}{}{}", autoref, derefs, expr_text)
+ format!("{autoref}{derefs}{expr_text}")
};
(adjusted_text, precise)
diff --git a/compiler/rustc_hir_typeck/src/method/probe.rs b/compiler/rustc_hir_typeck/src/method/probe.rs
index 03a3eebbd..7164102a3 100644
--- a/compiler/rustc_hir_typeck/src/method/probe.rs
+++ b/compiler/rustc_hir_typeck/src/method/probe.rs
@@ -22,7 +22,7 @@ use rustc_middle::ty::AssocItem;
use rustc_middle::ty::GenericParamDefKind;
use rustc_middle::ty::ToPredicate;
use rustc_middle::ty::{self, ParamEnvAnd, Ty, TyCtxt, TypeFoldable, TypeVisitableExt};
-use rustc_middle::ty::{InternalSubsts, SubstsRef};
+use rustc_middle::ty::{GenericArgs, GenericArgsRef};
use rustc_session::lint;
use rustc_span::def_id::DefId;
use rustc_span::def_id::LocalDefId;
@@ -100,10 +100,10 @@ impl<'a, 'tcx> Deref for ProbeContext<'a, 'tcx> {
#[derive(Debug, Clone)]
pub(crate) struct Candidate<'tcx> {
// Candidates are (I'm not quite sure, but they are mostly) basically
- // some metadata on top of a `ty::AssocItem` (without substs).
+ // some metadata on top of a `ty::AssocItem` (without args).
//
// However, method probing wants to be able to evaluate the predicates
- // for a function with the substs applied - for example, if a function
+ // for a function with the args applied - for example, if a function
// has `where Self: Sized`, we don't want to consider it unless `Self`
// is actually `Sized`, and similarly, return-type suggestions want
// to consider the "actual" return type.
@@ -140,7 +140,7 @@ pub(crate) struct Candidate<'tcx> {
#[derive(Debug, Clone)]
pub(crate) enum CandidateKind<'tcx> {
InherentImplCandidate(
- SubstsRef<'tcx>,
+ GenericArgsRef<'tcx>,
// Normalize obligations
Vec<traits::PredicateObligation<'tcx>>,
),
@@ -437,7 +437,7 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> {
// this case used to be allowed by the compiler,
// so we do a future-compat lint here for the 2015 edition
// (see https://github.com/rust-lang/rust/issues/46906)
- if self.tcx.sess.rust_2018() {
+ if self.tcx.sess.at_least_rust_2018() {
self.tcx.sess.emit_err(MethodCallOnUnknownRawPointee { span });
} else {
self.tcx.struct_span_lint_hir(
@@ -738,13 +738,13 @@ impl<'a, 'tcx> ProbeContext<'a, 'tcx> {
continue;
}
- let (impl_ty, impl_substs) = self.impl_ty_and_substs(impl_def_id);
- let impl_ty = impl_ty.subst(self.tcx, impl_substs);
+ let (impl_ty, impl_args) = self.impl_ty_and_args(impl_def_id);
+ let impl_ty = impl_ty.instantiate(self.tcx, impl_args);
debug!("impl_ty: {:?}", impl_ty);
// Determine the receiver type that the method itself expects.
- let (xform_self_ty, xform_ret_ty) = self.xform_self_ty(item, impl_ty, impl_substs);
+ let (xform_self_ty, xform_ret_ty) = self.xform_self_ty(item, impl_ty, impl_args);
debug!("xform_self_ty: {:?}, xform_ret_ty: {:?}", xform_self_ty, xform_ret_ty);
// We can't use normalize_associated_types_in as it will pollute the
@@ -770,7 +770,7 @@ impl<'a, 'tcx> ProbeContext<'a, 'tcx> {
xform_self_ty,
xform_ret_ty,
item,
- kind: InherentImplCandidate(impl_substs, obligations),
+ kind: InherentImplCandidate(impl_args, obligations),
import_ids: smallvec![],
},
true,
@@ -813,7 +813,7 @@ impl<'a, 'tcx> ProbeContext<'a, 'tcx> {
let new_trait_ref = this.erase_late_bound_regions(new_trait_ref);
let (xform_self_ty, xform_ret_ty) =
- this.xform_self_ty(item, new_trait_ref.self_ty(), new_trait_ref.substs);
+ this.xform_self_ty(item, new_trait_ref.self_ty(), new_trait_ref.args);
this.push_candidate(
Candidate {
xform_self_ty,
@@ -859,7 +859,7 @@ impl<'a, 'tcx> ProbeContext<'a, 'tcx> {
);
let (xform_self_ty, xform_ret_ty) =
- this.xform_self_ty(item, trait_ref.self_ty(), trait_ref.substs);
+ this.xform_self_ty(item, trait_ref.self_ty(), trait_ref.args);
this.push_candidate(
Candidate {
@@ -929,8 +929,8 @@ impl<'a, 'tcx> ProbeContext<'a, 'tcx> {
) -> bool {
match method.kind {
ty::AssocKind::Fn => self.probe(|_| {
- let substs = self.fresh_substs_for_item(self.span, method.def_id);
- let fty = self.tcx.fn_sig(method.def_id).subst(self.tcx, substs);
+ let args = self.fresh_args_for_item(self.span, method.def_id);
+ let fty = self.tcx.fn_sig(method.def_id).instantiate(self.tcx, args);
let fty = self.instantiate_binder_with_fresh_vars(self.span, infer::FnCall, fty);
if let Some(self_ty) = self_ty {
@@ -954,8 +954,8 @@ impl<'a, 'tcx> ProbeContext<'a, 'tcx> {
trait_def_id: DefId,
) {
debug!("assemble_extension_candidates_for_trait(trait_def_id={:?})", trait_def_id);
- let trait_substs = self.fresh_substs_for_item(self.span, trait_def_id);
- let trait_ref = ty::TraitRef::new(self.tcx, trait_def_id, trait_substs);
+ let trait_args = self.fresh_args_for_item(self.span, trait_def_id);
+ let trait_ref = ty::TraitRef::new(self.tcx, trait_def_id, trait_args);
if self.tcx.is_trait_alias(trait_def_id) {
// For trait aliases, recursively assume all explicitly named traits are relevant
@@ -977,7 +977,7 @@ impl<'a, 'tcx> ProbeContext<'a, 'tcx> {
);
let (xform_self_ty, xform_ret_ty) =
- self.xform_self_ty(item, new_trait_ref.self_ty(), new_trait_ref.substs);
+ self.xform_self_ty(item, new_trait_ref.self_ty(), new_trait_ref.args);
self.push_candidate(
Candidate {
xform_self_ty,
@@ -1005,7 +1005,7 @@ impl<'a, 'tcx> ProbeContext<'a, 'tcx> {
}
let (xform_self_ty, xform_ret_ty) =
- self.xform_self_ty(item, trait_ref.self_ty(), trait_substs);
+ self.xform_self_ty(item, trait_ref.self_ty(), trait_args);
self.push_candidate(
Candidate {
xform_self_ty,
@@ -1510,7 +1510,7 @@ impl<'a, 'tcx> ProbeContext<'a, 'tcx> {
// match as well (or at least may match, sometimes we
// don't have enough information to fully evaluate).
match probe.kind {
- InherentImplCandidate(ref substs, ref ref_obligations) => {
+ InherentImplCandidate(ref args, ref ref_obligations) => {
// `xform_ret_ty` hasn't been normalized yet, only `xform_self_ty`,
// see the reasons mentioned in the comments in `assemble_inherent_impl_probe`
// for why this is necessary
@@ -1524,7 +1524,7 @@ impl<'a, 'tcx> ProbeContext<'a, 'tcx> {
// Check whether the impl imposes obligations we have to worry about.
let impl_def_id = probe.item.container_id(self.tcx);
let impl_bounds = self.tcx.predicates_of(impl_def_id);
- let impl_bounds = impl_bounds.instantiate(self.tcx, substs);
+ let impl_bounds = impl_bounds.instantiate(self.tcx, args);
let InferOk { value: impl_bounds, obligations: norm_obligations } =
self.fcx.at(&cause, self.param_env).normalize(impl_bounds);
@@ -1592,15 +1592,14 @@ impl<'a, 'tcx> ProbeContext<'a, 'tcx> {
if let Some(method_name) = self.method_name {
// Some trait methods are excluded for arrays before 2021.
// (`array.into_iter()` wants a slice iterator for compatibility.)
- if self_ty.is_array() && !method_name.span.rust_2021() {
+ if self_ty.is_array() && !method_name.span.at_least_rust_2021() {
let trait_def = self.tcx.trait_def(trait_ref.def_id);
if trait_def.skip_array_during_method_dispatch {
return ProbeResult::NoMatch;
}
}
}
- let predicate =
- ty::Binder::dummy(trait_ref).without_const().to_predicate(self.tcx);
+ let predicate = ty::Binder::dummy(trait_ref).to_predicate(self.tcx);
parent_pred = Some(predicate);
let obligation =
traits::Obligation::new(self.tcx, cause.clone(), self.param_env, predicate);
@@ -1843,10 +1842,10 @@ impl<'a, 'tcx> ProbeContext<'a, 'tcx> {
&self,
item: ty::AssocItem,
impl_ty: Ty<'tcx>,
- substs: SubstsRef<'tcx>,
+ args: GenericArgsRef<'tcx>,
) -> (Ty<'tcx>, Option<Ty<'tcx>>) {
if item.kind == ty::AssocKind::Fn && self.mode == Mode::MethodCall {
- let sig = self.xform_method_sig(item.def_id, substs);
+ let sig = self.xform_method_sig(item.def_id, args);
(sig.inputs()[0], Some(sig.output()))
} else {
(impl_ty, None)
@@ -1854,11 +1853,11 @@ impl<'a, 'tcx> ProbeContext<'a, 'tcx> {
}
#[instrument(level = "debug", skip(self))]
- fn xform_method_sig(&self, method: DefId, substs: SubstsRef<'tcx>) -> ty::FnSig<'tcx> {
+ fn xform_method_sig(&self, method: DefId, args: GenericArgsRef<'tcx>) -> ty::FnSig<'tcx> {
let fn_sig = self.tcx.fn_sig(method);
debug!(?fn_sig);
- assert!(!substs.has_escaping_bound_vars());
+ assert!(!args.has_escaping_bound_vars());
// It is possible for type parameters or early-bound lifetimes
// to appear in the signature of `self`. The substitutions we
@@ -1866,15 +1865,15 @@ impl<'a, 'tcx> ProbeContext<'a, 'tcx> {
// method yet. So create fresh variables here for those too,
// if there are any.
let generics = self.tcx.generics_of(method);
- assert_eq!(substs.len(), generics.parent_count as usize);
+ assert_eq!(args.len(), generics.parent_count as usize);
let xform_fn_sig = if generics.params.is_empty() {
- fn_sig.subst(self.tcx, substs)
+ fn_sig.instantiate(self.tcx, args)
} else {
- let substs = InternalSubsts::for_item(self.tcx, method, |param, _| {
+ let args = GenericArgs::for_item(self.tcx, method, |param, _| {
let i = param.index as usize;
- if i < substs.len() {
- substs[i]
+ if i < args.len() {
+ args[i]
} else {
match param.kind {
GenericParamDefKind::Lifetime => {
@@ -1887,18 +1886,18 @@ impl<'a, 'tcx> ProbeContext<'a, 'tcx> {
}
}
});
- fn_sig.subst(self.tcx, substs)
+ fn_sig.instantiate(self.tcx, args)
};
self.erase_late_bound_regions(xform_fn_sig)
}
/// Gets the type of an impl and generate substitutions with inference vars.
- fn impl_ty_and_substs(
+ fn impl_ty_and_args(
&self,
impl_def_id: DefId,
- ) -> (ty::EarlyBinder<Ty<'tcx>>, SubstsRef<'tcx>) {
- (self.tcx.type_of(impl_def_id), self.fresh_substs_for_item(self.span, impl_def_id))
+ ) -> (ty::EarlyBinder<Ty<'tcx>>, GenericArgsRef<'tcx>) {
+ (self.tcx.type_of(impl_def_id), self.fresh_args_for_item(self.span, impl_def_id))
}
/// Replaces late-bound-regions bound by `value` with `'static` using
@@ -1938,13 +1937,21 @@ impl<'a, 'tcx> ProbeContext<'a, 'tcx> {
/// Determine if the associated item withe the given DefId matches
/// the desired name via a doc alias.
fn matches_by_doc_alias(&self, def_id: DefId) -> bool {
- let Some(name) = self.method_name else { return false; };
- let Some(local_def_id) = def_id.as_local() else { return false; };
+ let Some(name) = self.method_name else {
+ return false;
+ };
+ let Some(local_def_id) = def_id.as_local() else {
+ return false;
+ };
let hir_id = self.fcx.tcx.hir().local_def_id_to_hir_id(local_def_id);
let attrs = self.fcx.tcx.hir().attrs(hir_id);
for attr in attrs {
- let sym::doc = attr.name_or_empty() else { continue; };
- let Some(values) = attr.meta_item_list() else { continue; };
+ let sym::doc = attr.name_or_empty() else {
+ continue;
+ };
+ let Some(values) = attr.meta_item_list() else {
+ continue;
+ };
for v in values {
if v.name_or_empty() != sym::alias {
continue;
@@ -2032,8 +2039,8 @@ impl<'tcx> Candidate<'tcx> {
// means they are safe to put into the
// `WhereClausePick`.
assert!(
- !trait_ref.skip_binder().substs.has_infer()
- && !trait_ref.skip_binder().substs.has_placeholders()
+ !trait_ref.skip_binder().args.has_infer()
+ && !trait_ref.skip_binder().args.has_placeholders()
);
WhereClausePick(*trait_ref)
diff --git a/compiler/rustc_hir_typeck/src/method/suggest.rs b/compiler/rustc_hir_typeck/src/method/suggest.rs
index 5f924f309..72a04a02b 100644
--- a/compiler/rustc_hir_typeck/src/method/suggest.rs
+++ b/compiler/rustc_hir_typeck/src/method/suggest.rs
@@ -2,12 +2,13 @@
//! found or is otherwise invalid.
use crate::errors;
-use crate::errors::CandidateTraitNote;
-use crate::errors::NoAssociatedItem;
+use crate::errors::{CandidateTraitNote, NoAssociatedItem};
use crate::Expectation;
use crate::FnCtxt;
use rustc_ast::ast::Mutability;
-use rustc_data_structures::fx::{FxHashMap, FxHashSet};
+use rustc_attr::parse_confusables;
+use rustc_data_structures::fx::{FxIndexMap, FxIndexSet};
+use rustc_data_structures::unord::UnordSet;
use rustc_errors::StashKey;
use rustc_errors::{
pluralize, struct_span_err, Applicability, Diagnostic, DiagnosticBuilder, ErrorGuaranteed,
@@ -31,6 +32,7 @@ use rustc_middle::ty::fast_reject::{simplify_type, TreatParams};
use rustc_middle::ty::print::{with_crate_prefix, with_forced_trimmed_paths};
use rustc_middle::ty::IsSuggestable;
use rustc_middle::ty::{self, GenericArgKind, Ty, TyCtxt, TypeVisitableExt};
+use rustc_span::def_id::DefIdSet;
use rustc_span::symbol::{kw, sym, Ident};
use rustc_span::Symbol;
use rustc_span::{edit_distance, source_map, ExpnKind, FileName, MacroKind, Span};
@@ -48,6 +50,15 @@ use rustc_hir::intravisit::Visitor;
use std::cmp::{self, Ordering};
use std::iter;
+/// After identifying that `full_expr` is a method call, we use this type to keep the expression's
+/// components readily available to us to point at the right place in diagnostics.
+#[derive(Debug, Clone, Copy)]
+pub struct MethodCallComponents<'tcx> {
+ pub receiver: &'tcx hir::Expr<'tcx>,
+ pub args: &'tcx [hir::Expr<'tcx>],
+ pub full_expr: &'tcx hir::Expr<'tcx>,
+}
+
impl<'a, 'tcx> FnCtxt<'a, 'tcx> {
fn is_fn_ty(&self, ty: Ty<'tcx>, span: Span) -> bool {
let tcx = self.tcx;
@@ -92,7 +103,7 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> {
span,
self.body_id,
self.param_env,
- poly_trait_ref.without_const(),
+ poly_trait_ref,
);
self.predicate_may_hold(&obligation)
})
@@ -113,7 +124,7 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> {
item_name: Ident,
source: SelfSource<'tcx>,
error: MethodError<'tcx>,
- args: Option<(&'tcx hir::Expr<'tcx>, &'tcx [hir::Expr<'tcx>])>,
+ args: Option<MethodCallComponents<'tcx>>,
expected: Expectation<'tcx>,
trait_missing_method: bool,
) -> Option<DiagnosticBuilder<'_, ErrorGuaranteed>> {
@@ -151,7 +162,7 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> {
E0034,
"multiple applicable items in scope"
);
- err.span_label(item_name.span, format!("multiple `{}` found", item_name));
+ err.span_label(item_name.span, format!("multiple `{item_name}` found"));
self.note_candidates_on_method_error(
rcvr_ty,
@@ -175,13 +186,13 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> {
kind,
item_name
);
- err.span_label(item_name.span, format!("private {}", kind));
+ err.span_label(item_name.span, format!("private {kind}"));
let sp = self
.tcx
.hir()
.span_if_local(def_id)
.unwrap_or_else(|| self.tcx.def_span(def_id));
- err.span_label(sp, format!("private {} defined here", kind));
+ err.span_label(sp, format!("private {kind} defined here"));
self.suggest_valid_traits(&mut err, out_of_scope_traits);
err.emit();
}
@@ -216,7 +227,7 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> {
*region,
ty::TypeAndMut { ty: *t_type, mutbl: mutability.invert() },
);
- let msg = format!("you need `{}` instead of `{}`", trait_type, rcvr_ty);
+ let msg = format!("you need `{trait_type}` instead of `{rcvr_ty}`");
let mut kind = &self_expr.kind;
while let hir::ExprKind::AddrOf(_, _, expr)
| hir::ExprKind::Unary(hir::UnOp::Deref, expr) = kind
@@ -255,18 +266,23 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> {
fn suggest_missing_writer(
&self,
rcvr_ty: Ty<'tcx>,
- args: (&'tcx hir::Expr<'tcx>, &'tcx [hir::Expr<'tcx>]),
+ args: MethodCallComponents<'tcx>,
) -> DiagnosticBuilder<'_, ErrorGuaranteed> {
let (ty_str, _ty_file) = self.tcx.short_ty_string(rcvr_ty);
- let mut err =
- struct_span_err!(self.tcx.sess, args.0.span, E0599, "cannot write into `{}`", ty_str);
+ let mut err = struct_span_err!(
+ self.tcx.sess,
+ args.receiver.span,
+ E0599,
+ "cannot write into `{}`",
+ ty_str
+ );
err.span_note(
- args.0.span,
+ args.receiver.span,
"must implement `io::Write`, `fmt::Write`, or have a `write_fmt` method",
);
- if let ExprKind::Lit(_) = args.0.kind {
+ if let ExprKind::Lit(_) = args.receiver.kind {
err.span_help(
- args.0.span.shrink_to_lo(),
+ args.receiver.span.shrink_to_lo(),
"a writer is needed before this format string",
);
};
@@ -280,7 +296,7 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> {
rcvr_ty: Ty<'tcx>,
item_name: Ident,
source: SelfSource<'tcx>,
- args: Option<(&'tcx hir::Expr<'tcx>, &'tcx [hir::Expr<'tcx>])>,
+ args: Option<MethodCallComponents<'tcx>>,
sugg_span: Span,
no_match_data: &mut NoMatchData<'tcx>,
expected: Expectation<'tcx>,
@@ -536,11 +552,11 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> {
));
}
} else if !unsatisfied_predicates.is_empty() {
- let mut type_params = FxHashMap::default();
+ let mut type_params = FxIndexMap::default();
// Pick out the list of unimplemented traits on the receiver.
// This is used for custom error messages with the `#[rustc_on_unimplemented]` attribute.
- let mut unimplemented_traits = FxHashMap::default();
+ let mut unimplemented_traits = FxIndexMap::default();
let mut unimplemented_traits_only = true;
for (predicate, _parent_pred, cause) in unsatisfied_predicates {
if let (ty::PredicateKind::Clause(ty::ClauseKind::Trait(p)), Some(cause)) =
@@ -606,7 +622,7 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> {
);
type_params
.entry(key)
- .or_insert_with(FxHashSet::default)
+ .or_insert_with(UnordSet::default)
.insert(obligation.to_owned());
return true;
}
@@ -635,7 +651,7 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> {
}
// Point at the closure that couldn't satisfy the bound.
ty::Closure(def_id, _) => bound_spans
- .push((tcx.def_span(*def_id), format!("doesn't satisfy `{}`", quiet))),
+ .push((tcx.def_span(*def_id), format!("doesn't satisfy `{quiet}`"))),
_ => {}
}
};
@@ -647,17 +663,17 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> {
// `<Foo as Iterator>::Item = String`.
let projection_ty = pred.skip_binder().projection_ty;
- let substs_with_infer_self = tcx.mk_substs_from_iter(
+ let args_with_infer_self = tcx.mk_args_from_iter(
iter::once(Ty::new_var(tcx, ty::TyVid::from_u32(0)).into())
- .chain(projection_ty.substs.iter().skip(1)),
+ .chain(projection_ty.args.iter().skip(1)),
);
let quiet_projection_ty =
- tcx.mk_alias_ty(projection_ty.def_id, substs_with_infer_self);
+ tcx.mk_alias_ty(projection_ty.def_id, args_with_infer_self);
let term = pred.skip_binder().term;
- let obligation = format!("{} = {}", projection_ty, term);
+ let obligation = format!("{projection_ty} = {term}");
let quiet = with_forced_trimmed_paths!(format!(
"{} = {}",
quiet_projection_ty, term
@@ -670,7 +686,7 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> {
let p = poly_trait_ref.trait_ref;
let self_ty = p.self_ty();
let path = p.print_only_trait_path();
- let obligation = format!("{}: {}", self_ty, path);
+ let obligation = format!("{self_ty}: {path}");
let quiet = with_forced_trimmed_paths!(format!("_: {}", path));
bound_span_label(self_ty, &obligation, &quiet);
Some((obligation, self_ty))
@@ -680,8 +696,8 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> {
};
// Find all the requirements that come from a local `impl` block.
- let mut skip_list: FxHashSet<_> = Default::default();
- let mut spanned_predicates = FxHashMap::default();
+ let mut skip_list: UnordSet<_> = Default::default();
+ let mut spanned_predicates = FxIndexMap::default();
for (p, parent_p, cause) in unsatisfied_predicates {
// Extract the predicate span and parent def id of the cause,
// if we have one.
@@ -723,7 +739,7 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> {
let span = self_ty.span.ctxt().outer_expn_data().call_site;
let entry = spanned_predicates.entry(span);
let entry = entry.or_insert_with(|| {
- (FxHashSet::default(), FxHashSet::default(), Vec::new())
+ (FxIndexSet::default(), FxIndexSet::default(), Vec::new())
});
entry.0.insert(span);
entry.1.insert((
@@ -771,7 +787,7 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> {
skip_list.insert(p);
let entry = spanned_predicates.entry(self_ty.span);
let entry = entry.or_insert_with(|| {
- (FxHashSet::default(), FxHashSet::default(), Vec::new())
+ (FxIndexSet::default(), FxIndexSet::default(), Vec::new())
});
entry.2.push(p);
if cause_span != *item_span {
@@ -806,7 +822,7 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> {
skip_list.insert(p);
let entry = spanned_predicates.entry(ident.span);
let entry = entry.or_insert_with(|| {
- (FxHashSet::default(), FxHashSet::default(), Vec::new())
+ (FxIndexSet::default(), FxIndexSet::default(), Vec::new())
});
entry.0.insert(cause_span);
entry.1.insert((ident.span, ""));
@@ -823,12 +839,12 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> {
let mut preds: Vec<_> = predicates
.iter()
.filter_map(|pred| format_pred(**pred))
- .map(|(p, _)| format!("`{}`", p))
+ .map(|(p, _)| format!("`{p}`"))
.collect();
preds.sort();
preds.dedup();
let msg = if let [pred] = &preds[..] {
- format!("trait bound {} was not satisfied", pred)
+ format!("trait bound {pred} was not satisfied")
} else {
format!("the following trait bounds were not satisfied:\n{}", preds.join("\n"),)
};
@@ -840,7 +856,7 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> {
unsatisfied_bounds = true;
}
- let mut suggested_bounds = FxHashSet::default();
+ let mut suggested_bounds = UnordSet::default();
// The requirements that didn't have an `impl` span to show.
let mut bound_list = unsatisfied_predicates
.iter()
@@ -873,7 +889,7 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> {
suggested_bounds.insert(pred);
}
}
- format!("`{}`\nwhich is required by `{}`", p, parent_p)
+ format!("`{p}`\nwhich is required by `{parent_p}`")
}
},
},
@@ -889,8 +905,7 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> {
for ((span, add_where_or_comma), obligations) in type_params.into_iter() {
restrict_type_params = true;
// #74886: Sort here so that the output is always the same.
- let mut obligations = obligations.into_iter().collect::<Vec<_>>();
- obligations.sort();
+ let obligations = obligations.to_sorted_stable_ord();
err.span_suggestion_verbose(
span,
format!(
@@ -952,6 +967,39 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> {
unsatisfied_bounds = true;
}
+ } else if let ty::Adt(def, targs) = rcvr_ty.kind() && let Some(args) = args {
+ // This is useful for methods on arbitrary self types that might have a simple
+ // mutability difference, like calling a method on `Pin<&mut Self>` that is on
+ // `Pin<&Self>`.
+ if targs.len() == 1 {
+ let mut item_segment = hir::PathSegment::invalid();
+ item_segment.ident = item_name;
+ for t in [Ty::new_mut_ref, Ty::new_imm_ref, |_, _, t| t] {
+ let new_args = tcx.mk_args_from_iter(
+ targs
+ .iter()
+ .map(|arg| match arg.as_type() {
+ Some(ty) => ty::GenericArg::from(
+ t(tcx, tcx.lifetimes.re_erased, ty.peel_refs()),
+ ),
+ _ => arg,
+ })
+ );
+ let rcvr_ty = Ty::new_adt(tcx, *def, new_args);
+ if let Ok(method) = self.lookup_method_for_diagnostic(
+ rcvr_ty,
+ &item_segment,
+ span,
+ args.full_expr,
+ args.receiver,
+ ) {
+ err.span_note(
+ tcx.def_span(method.def_id),
+ format!("{item_kind} is available for `{rcvr_ty}`"),
+ );
+ }
+ }
+ }
}
let label_span_not_found = |err: &mut Diagnostic| {
@@ -993,9 +1041,13 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> {
// different from the received one
// So we avoid suggestion method with Box<Self>
// for instance
- self.tcx.at(span).type_of(*def_id).subst_identity()
+ self.tcx.at(span).type_of(*def_id).instantiate_identity()
!= rcvr_ty
- && self.tcx.at(span).type_of(*def_id).subst_identity()
+ && self
+ .tcx
+ .at(span)
+ .type_of(*def_id)
+ .instantiate_identity()
!= rcvr_ty
}
(Mode::Path, false, _) => true,
@@ -1018,7 +1070,7 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> {
.map(|impl_item| {
format!(
"- `{}`",
- self.tcx.at(span).type_of(*impl_item).subst_identity()
+ self.tcx.at(span).type_of(*impl_item).instantiate_identity()
)
})
.collect::<Vec<_>>()
@@ -1029,9 +1081,30 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> {
"".to_string()
};
err.note(format!(
- "the {item_kind} was found for\n{}{}",
- type_candidates, additional_types
+ "the {item_kind} was found for\n{type_candidates}{additional_types}"
));
+ } else {
+ 'outer: for inherent_impl_did in self.tcx.inherent_impls(adt.did()) {
+ for inherent_method in
+ self.tcx.associated_items(inherent_impl_did).in_definition_order()
+ {
+ if let Some(attr) = self.tcx.get_attr(inherent_method.def_id, sym::rustc_confusables)
+ && let Some(candidates) = parse_confusables(attr)
+ && candidates.contains(&item_name.name)
+ {
+ err.span_suggestion_verbose(
+ item_name.span,
+ format!(
+ "you might have meant to use `{}`",
+ inherent_method.name.as_str()
+ ),
+ inherent_method.name.as_str(),
+ Applicability::MaybeIncorrect,
+ );
+ break 'outer;
+ }
+ }
+ }
}
}
} else {
@@ -1085,7 +1158,7 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> {
span,
rcvr_ty,
item_name,
- args.map(|(_, args)| args.len() + 1),
+ args.map(|MethodCallComponents { args, .. }| args.len() + 1),
source,
no_match_data.out_of_scope_traits.clone(),
&unsatisfied_predicates,
@@ -1166,7 +1239,7 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> {
&self,
rcvr_ty: Ty<'tcx>,
item_name: Ident,
- args: Option<(&'tcx hir::Expr<'tcx>, &'tcx [hir::Expr<'tcx>])>,
+ args: Option<MethodCallComponents<'tcx>>,
span: Span,
err: &mut Diagnostic,
sources: &mut Vec<CandidateSource>,
@@ -1197,7 +1270,7 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> {
None
};
- let impl_ty = self.tcx.at(span).type_of(impl_did).subst_identity();
+ let impl_ty = self.tcx.at(span).type_of(impl_did).instantiate_identity();
let insertion = match self.tcx.impl_trait_ref(impl_did) {
None => String::new(),
@@ -1222,8 +1295,7 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> {
} else {
(
format!(
- "the candidate is defined in an impl{} for the type `{}`",
- insertion, impl_ty,
+ "the candidate is defined in an impl{insertion} for the type `{impl_ty}`",
),
None,
)
@@ -1243,7 +1315,7 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> {
ty::AssocKind::Fn => self
.tcx
.fn_sig(item.def_id)
- .subst_identity()
+ .instantiate_identity()
.inputs()
.skip_binder()
.get(0)
@@ -1318,7 +1390,7 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> {
rcvr_ty: Ty<'tcx>,
source: SelfSource<'tcx>,
item_name: Ident,
- args: Option<(&hir::Expr<'tcx>, &[hir::Expr<'tcx>])>,
+ args: Option<MethodCallComponents<'tcx>>,
sugg_span: Span,
) {
let mut has_unsuggestable_args = false;
@@ -1326,7 +1398,7 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> {
// When the "method" is resolved through dereferencing, we really want the
// original type that has the associated function for accurate suggestions.
// (#61411)
- let impl_ty = self.tcx.type_of(*impl_did).subst_identity();
+ let impl_ty = self.tcx.type_of(*impl_did).instantiate_identity();
let target_ty = self
.autoderef(sugg_span, rcvr_ty)
.find(|(rcvr_ty, _)| {
@@ -1335,10 +1407,10 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> {
})
.map_or(impl_ty, |(ty, _)| ty)
.peel_refs();
- if let ty::Adt(def, substs) = target_ty.kind() {
+ if let ty::Adt(def, args) = target_ty.kind() {
// If there are any inferred arguments, (`{integer}`), we should replace
// them with underscores to allow the compiler to infer them
- let infer_substs = self.tcx.mk_substs_from_iter(substs.into_iter().map(|arg| {
+ let infer_args = self.tcx.mk_args_from_iter(args.into_iter().map(|arg| {
if !arg.is_suggestable(self.tcx, true) {
has_unsuggestable_args = true;
match arg.unpack() {
@@ -1368,7 +1440,7 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> {
}
}));
- self.tcx.value_path_str_with_substs(def.did(), infer_substs)
+ self.tcx.value_path_str_with_args(def.did(), infer_args)
} else {
self.ty_to_value_string(target_ty)
}
@@ -1380,7 +1452,7 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> {
&& let Some(assoc) = self.associated_value(*impl_did, item_name)
&& assoc.kind == ty::AssocKind::Fn
{
- let sig = self.tcx.fn_sig(assoc.def_id).subst_identity();
+ let sig = self.tcx.fn_sig(assoc.def_id).instantiate_identity();
sig.inputs().skip_binder().get(0).and_then(|first| if first.peel_refs() == rcvr_ty.peel_refs() {
None
} else {
@@ -1390,7 +1462,7 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> {
None
};
let mut applicability = Applicability::MachineApplicable;
- let args = if let Some((receiver, args)) = args {
+ let args = if let Some(MethodCallComponents { receiver, args, .. }) = args {
// The first arg is the same kind as the receiver
let explicit_args = if first_arg.is_some() {
std::iter::once(receiver).chain(args.iter()).collect::<Vec<_>>()
@@ -1425,11 +1497,11 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> {
err.span_suggestion(
sugg_span,
"use associated function syntax instead",
- format!("{}::{}{}", ty_str, item_name, args),
+ format!("{ty_str}::{item_name}{args}"),
applicability,
);
} else {
- err.help(format!("try with `{}::{}`", ty_str, item_name,));
+ err.help(format!("try with `{ty_str}::{item_name}`",));
}
}
@@ -1445,11 +1517,11 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> {
) -> bool {
let tcx = self.tcx;
let field_receiver = self.autoderef(span, rcvr_ty).find_map(|(ty, _)| match ty.kind() {
- ty::Adt(def, substs) if !def.is_enum() => {
+ ty::Adt(def, args) if !def.is_enum() => {
let variant = &def.non_enum_variant();
tcx.find_field_index(item_name, variant).map(|index| {
let field = &variant.fields[index];
- let field_ty = field.ty(tcx, substs);
+ let field_ty = field.ty(tcx, args);
(field, field_ty)
})
}
@@ -1464,9 +1536,8 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> {
let expr_span = expr.span.to(item_name.span);
err.multipart_suggestion(
format!(
- "to call the function stored in `{}`, \
+ "to call the function stored in `{item_name}`, \
surround the field access with parentheses",
- item_name,
),
vec![
(expr_span.shrink_to_lo(), '('.to_string()),
@@ -1489,7 +1560,7 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> {
}
let field_kind = if is_accessible { "field" } else { "private field" };
- err.span_label(item_name.span, format!("{}, not a method", field_kind));
+ err.span_label(item_name.span, format!("{field_kind}, not a method"));
return true;
}
false
@@ -1546,7 +1617,8 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> {
}
let range_def_id = self.tcx.require_lang_item(lang_item.unwrap(), None);
- let range_ty = self.tcx.type_of(range_def_id).subst(self.tcx, &[actual.into()]);
+ let range_ty =
+ self.tcx.type_of(range_def_id).instantiate(self.tcx, &[actual.into()]);
let pick = self.lookup_probe_for_diagnostic(
item_name,
@@ -1609,7 +1681,7 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> {
|| found_assoc(tcx.types.u64)
|| found_assoc(tcx.types.u128)
|| found_assoc(tcx.types.f32)
- || found_assoc(tcx.types.f32);
+ || found_assoc(tcx.types.f64);
if found_candidate
&& actual.is_numeric()
&& !actual.has_concrete_skeleton()
@@ -1641,8 +1713,7 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> {
lit.span,
format!(
"you must specify a concrete type for this numeric value, \
- like `{}`",
- concrete_type
+ like `{concrete_type}`"
),
format!("{snippet}_{concrete_type}"),
Applicability::MaybeIncorrect,
@@ -1657,8 +1728,7 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> {
let parent_node =
self.tcx.hir().get_parent(hir_id);
let msg = format!(
- "you must specify a type for this binding, like `{}`",
- concrete_type,
+ "you must specify a type for this binding, like `{concrete_type}`",
);
match (filename, parent_node) {
@@ -1699,10 +1769,14 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> {
/// we try to suggest `rect.area()`
pub(crate) fn suggest_assoc_method_call(&self, segs: &[PathSegment<'_>]) {
debug!("suggest_assoc_method_call segs: {:?}", segs);
- let [seg1, seg2] = segs else { return; };
+ let [seg1, seg2] = segs else {
+ return;
+ };
let Some(mut diag) =
- self.tcx.sess.diagnostic().steal_diagnostic(seg1.ident.span, StashKey::CallAssocMethod)
- else { return };
+ self.tcx.sess.diagnostic().steal_diagnostic(seg1.ident.span, StashKey::CallAssocMethod)
+ else {
+ return;
+ };
let map = self.infcx.tcx.hir();
let body_id = self.tcx.hir().body_owned_by(self.body_id);
@@ -1766,7 +1840,7 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> {
) {
if let SelfSource::MethodCall(expr) = source
&& let mod_id = self.tcx.parent_module(expr.hir_id).to_def_id()
- && let Some((fields, substs)) =
+ && let Some((fields, args)) =
self.get_field_candidates_considering_privacy(span, actual, mod_id)
{
let call_expr = self.tcx.hir().expect_expr(self.tcx.hir().parent_id(expr.hir_id));
@@ -1801,7 +1875,7 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> {
})
},
candidate_field,
- substs,
+ args,
vec![],
mod_id,
)
@@ -1839,18 +1913,24 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> {
item_name: Ident,
) {
let tcx = self.tcx;
- let SelfSource::MethodCall(expr) = source else { return; };
+ let SelfSource::MethodCall(expr) = source else {
+ return;
+ };
let call_expr = tcx.hir().expect_expr(tcx.hir().parent_id(expr.hir_id));
- let ty::Adt(kind, substs) = actual.kind() else { return; };
+ let ty::Adt(kind, args) = actual.kind() else {
+ return;
+ };
match kind.adt_kind() {
ty::AdtKind::Enum => {
let matching_variants: Vec<_> = kind
.variants()
.iter()
.flat_map(|variant| {
- let [field] = &variant.fields.raw[..] else { return None; };
- let field_ty = field.ty(tcx, substs);
+ let [field] = &variant.fields.raw[..] else {
+ return None;
+ };
+ let field_ty = field.ty(tcx, args);
// Skip `_`, since that'll just lead to ambiguity.
if self.resolve_vars_if_possible(field_ty).is_ty_var() {
@@ -1885,7 +1965,7 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> {
match &matching_variants[..] {
[(_, field, pick)] => {
- let self_ty = field.ty(tcx, substs);
+ let self_ty = field.ty(tcx, args);
err.span_note(
tcx.def_span(pick.item.def_id),
format!("the method `{item_name}` exists on the type `{self_ty}`"),
@@ -1927,15 +2007,21 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> {
// Target wrapper types - types that wrap or pretend to wrap another type,
// perhaps this inner type is meant to be called?
ty::AdtKind::Struct | ty::AdtKind::Union => {
- let [first] = ***substs else { return; };
- let ty::GenericArgKind::Type(ty) = first.unpack() else { return; };
+ let [first] = ***args else {
+ return;
+ };
+ let ty::GenericArgKind::Type(ty) = first.unpack() else {
+ return;
+ };
let Ok(pick) = self.lookup_probe_for_diagnostic(
item_name,
ty,
call_expr,
ProbeScope::TraitsInScope,
None,
- ) else { return; };
+ ) else {
+ return;
+ };
let name = self.ty_to_value_string(actual);
let inner_id = kind.did();
@@ -2037,7 +2123,7 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> {
ty::Adt(def, _) => Some(def.did()),
_ => None,
})
- .collect::<FxHashSet<_>>();
+ .collect::<FxIndexSet<_>>();
let mut spans: MultiSpan = def_ids
.iter()
.filter_map(|def_id| {
@@ -2100,7 +2186,7 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> {
let Some(ty::PredicateKind::Clause(ty::ClauseKind::Trait(trait_pred))) =
pred.kind().no_bound_vars()
else {
- continue
+ continue;
};
let adt = match trait_pred.self_ty().ty_adt_def() {
Some(adt) if adt.did().is_local() => adt,
@@ -2150,7 +2236,7 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> {
if let Some((last_self_name, _, ref mut last_trait_names)) = derives_grouped.last_mut()
{
if last_self_name == &self_name {
- last_trait_names.push_str(format!(", {}", trait_name).as_str());
+ last_trait_names.push_str(format!(", {trait_name}").as_str());
continue;
}
}
@@ -2182,8 +2268,8 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> {
for (self_name, self_span, traits) in &derives_grouped {
err.span_suggestion_verbose(
self_span.shrink_to_lo(),
- format!("consider annotating `{}` with `#[derive({})]`", self_name, traits),
- format!("#[derive({})]\n", traits),
+ format!("consider annotating `{self_name}` with `#[derive({traits})]`"),
+ format!("#[derive({traits})]\n"),
Applicability::MaybeIncorrect,
);
}
@@ -2197,7 +2283,9 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> {
item_name: Ident,
expected: Expectation<'tcx>,
) {
- let SelfSource::QPath(ty) = self_source else { return; };
+ let SelfSource::QPath(ty) = self_source else {
+ return;
+ };
for (deref_ty, _) in self.autoderef(rustc_span::DUMMY_SP, rcvr_ty).skip(1) {
if let Ok(pick) = self.probe_for_name(
Mode::Path,
@@ -2214,7 +2302,7 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> {
// just changing the path.
&& pick.item.fn_has_self_parameter
&& let Some(self_ty) =
- self.tcx.fn_sig(pick.item.def_id).subst_identity().inputs().skip_binder().get(0)
+ self.tcx.fn_sig(pick.item.def_id).instantiate_identity().inputs().skip_binder().get(0)
&& self_ty.is_ref()
{
let suggested_path = match deref_ty.kind() {
@@ -2255,7 +2343,7 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> {
/// Print out the type for use in value namespace.
fn ty_to_value_string(&self, ty: Ty<'tcx>) -> String {
match ty.kind() {
- ty::Adt(def, substs) => self.tcx.def_path_str_with_substs(def.did(), substs),
+ ty::Adt(def, args) => self.tcx.def_path_str_with_args(def.did(), args),
_ => self.ty_to_string(ty),
}
}
@@ -2429,7 +2517,7 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> {
if pick.autoderefs == 0 && !skip {
err.span_label(
pick.item.ident(self.tcx).span,
- format!("the method is available for `{}` here", rcvr_ty),
+ format!("the method is available for `{rcvr_ty}` here"),
);
}
break;
@@ -2475,13 +2563,13 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> {
if pick.autoderefs == 0 && !skip {
err.span_label(
pick.item.ident(self.tcx).span,
- format!("the method is available for `{}` here", new_rcvr_t),
+ format!("the method is available for `{new_rcvr_t}` here"),
);
err.multipart_suggestion(
"consider wrapping the receiver expression with the \
appropriate type",
vec![
- (rcvr.span.shrink_to_lo(), format!("{}({}", pre, post)),
+ (rcvr.span.shrink_to_lo(), format!("{pre}({post}")),
(rcvr.span.shrink_to_hi(), ")".to_string()),
],
Applicability::MaybeIncorrect,
@@ -2651,7 +2739,7 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> {
Nothing,
}
let ast_generics = hir.get_generics(id.owner.def_id).unwrap();
- let trait_def_ids: FxHashSet<DefId> = ast_generics
+ let trait_def_ids: DefIdSet = ast_generics
.bounds_for_param(def_id)
.flat_map(|bp| bp.bounds.iter())
.filter_map(|bound| bound.trait_ref()?.trait_def_id())
@@ -2721,7 +2809,7 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> {
};
err.span_suggestions(
sp,
- message(format!("add {} supertrait for", article)),
+ message(format!("add {article} supertrait for")),
candidates.iter().map(|t| {
format!("{} {}", sep, self.tcx.def_path_str(t.def_id),)
}),
@@ -2752,7 +2840,8 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> {
self.tcx.impl_polarity(*imp_did) == ty::ImplPolarity::Negative
})
.any(|imp_did| {
- let imp = self.tcx.impl_trait_ref(imp_did).unwrap().subst_identity();
+ let imp =
+ self.tcx.impl_trait_ref(imp_did).unwrap().instantiate_identity();
let imp_simp =
simplify_type(self.tcx, imp.self_ty(), TreatParams::ForLookup);
imp_simp.is_some_and(|s| s == simp_rcvr_ty)
@@ -2789,7 +2878,7 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> {
trait_infos => {
let mut msg = message(param_type.map_or_else(
|| "implement".to_string(), // FIXME: it might only need to be imported into scope, not implemented.
- |param| format!("restrict type parameter `{}` with", param),
+ |param| format!("restrict type parameter `{param}` with"),
));
for (i, trait_info) in trait_infos.iter().enumerate() {
msg.push_str(&format!(
@@ -2813,8 +2902,7 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> {
}
trait_infos => {
let mut msg = format!(
- "the following traits define an item `{}`, but are explicitly unimplemented:",
- item_name
+ "the following traits define an item `{item_name}`, but are explicitly unimplemented:"
);
for trait_info in trait_infos {
msg.push_str(&format!("\n{}", self.tcx.def_path_str(trait_info.def_id)));
@@ -2834,9 +2922,8 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> {
found: Ty<'tcx>,
expected: Ty<'tcx>,
) -> bool {
- let Some((_def_id_or_name, output, _inputs)) =
- self.extract_callable_info(found) else {
- return false;
+ let Some((_def_id_or_name, output, _inputs)) = self.extract_callable_info(found) else {
+ return false;
};
if !self.can_coerce(output, expected) {
@@ -2907,7 +2994,7 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> {
// This occurs for UFCS desugaring of `T::method`, where there is no
// receiver expression for the method call, and thus no autoderef.
if let SelfSource::QPath(_) = source {
- return is_local(self.resolve_vars_with_obligations(rcvr_ty));
+ return is_local(rcvr_ty);
}
self.autoderef(span, rcvr_ty).any(|(ty, _)| is_local(ty))
@@ -2955,7 +3042,7 @@ pub fn all_traits(tcx: TyCtxt<'_>) -> Vec<TraitInfo> {
fn print_disambiguation_help<'tcx>(
item_name: Ident,
- args: Option<(&'tcx hir::Expr<'tcx>, &'tcx [hir::Expr<'tcx>])>,
+ args: Option<MethodCallComponents<'tcx>>,
err: &mut Diagnostic,
trait_name: String,
rcvr_ty: Ty<'_>,
@@ -2967,7 +3054,11 @@ fn print_disambiguation_help<'tcx>(
fn_has_self_parameter: bool,
) {
let mut applicability = Applicability::MachineApplicable;
- let (span, sugg) = if let (ty::AssocKind::Fn, Some((receiver, args))) = (kind, args) {
+ let (span, sugg) = if let (
+ ty::AssocKind::Fn,
+ Some(MethodCallComponents { receiver, args, .. }),
+ ) = (kind, args)
+ {
let args = format!(
"({}{})",
rcvr_ty.ref_mutability().map_or("", |mutbl| mutbl.ref_prefix_str()),
@@ -2981,13 +3072,13 @@ fn print_disambiguation_help<'tcx>(
.join(", "),
);
let trait_name = if !fn_has_self_parameter {
- format!("<{} as {}>", rcvr_ty, trait_name)
+ format!("<{rcvr_ty} as {trait_name}>")
} else {
trait_name
};
- (span, format!("{}::{}{}", trait_name, item_name, args))
+ (span, format!("{trait_name}::{item_name}{args}"))
} else {
- (span.with_hi(item_name.span.lo()), format!("<{} as {}>::", rcvr_ty, trait_name))
+ (span.with_hi(item_name.span.lo()), format!("<{rcvr_ty} as {trait_name}>::"))
};
err.span_suggestion_verbose(
span,
@@ -2995,7 +3086,7 @@ fn print_disambiguation_help<'tcx>(
"disambiguate the {} for {}",
def_kind_descr,
if let Some(candidate) = candidate {
- format!("candidate #{}", candidate)
+ format!("candidate #{candidate}")
} else {
"the candidate".to_string()
},
diff --git a/compiler/rustc_hir_typeck/src/op.rs b/compiler/rustc_hir_typeck/src/op.rs
index 1eae258c1..a283cd1ab 100644
--- a/compiler/rustc_hir_typeck/src/op.rs
+++ b/compiler/rustc_hir_typeck/src/op.rs
@@ -4,7 +4,7 @@ use super::method::MethodCallee;
use super::{has_expected_num_generic_args, FnCtxt};
use crate::Expectation;
use rustc_ast as ast;
-use rustc_errors::{self, struct_span_err, Applicability, Diagnostic};
+use rustc_errors::{self, struct_span_err, Applicability, Diagnostic, DiagnosticBuilder};
use rustc_hir as hir;
use rustc_infer::infer::type_variable::{TypeVariableOrigin, TypeVariableOriginKind};
use rustc_infer::traits::ObligationCauseCode;
@@ -380,33 +380,93 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> {
}
};
- let mut suggest_deref_binop = |lhs_deref_ty: Ty<'tcx>| {
- if self
- .lookup_op_method(
- lhs_deref_ty,
- Some((rhs_expr, rhs_ty)),
- Op::Binary(op, is_assign),
- expected,
- )
- .is_ok()
- {
- let msg = format!(
- "`{}{}` can be used on `{}` if you dereference the left-hand side",
- op.node.as_str(),
- match is_assign {
- IsAssign::Yes => "=",
- IsAssign::No => "",
- },
- lhs_deref_ty,
- );
- err.span_suggestion_verbose(
- lhs_expr.span.shrink_to_lo(),
- msg,
- "*",
- rustc_errors::Applicability::MachineApplicable,
- );
- }
- };
+ let suggest_deref_binop =
+ |err: &mut DiagnosticBuilder<'_, _>, lhs_deref_ty: Ty<'tcx>| {
+ if self
+ .lookup_op_method(
+ lhs_deref_ty,
+ Some((rhs_expr, rhs_ty)),
+ Op::Binary(op, is_assign),
+ expected,
+ )
+ .is_ok()
+ {
+ let msg = format!(
+ "`{}{}` can be used on `{}` if you dereference the left-hand side",
+ op.node.as_str(),
+ match is_assign {
+ IsAssign::Yes => "=",
+ IsAssign::No => "",
+ },
+ lhs_deref_ty,
+ );
+ err.span_suggestion_verbose(
+ lhs_expr.span.shrink_to_lo(),
+ msg,
+ "*",
+ rustc_errors::Applicability::MachineApplicable,
+ );
+ }
+ };
+
+ let suggest_different_borrow =
+ |err: &mut DiagnosticBuilder<'_, _>,
+ lhs_adjusted_ty,
+ lhs_new_mutbl: Option<ast::Mutability>,
+ rhs_adjusted_ty,
+ rhs_new_mutbl: Option<ast::Mutability>| {
+ if self
+ .lookup_op_method(
+ lhs_adjusted_ty,
+ Some((rhs_expr, rhs_adjusted_ty)),
+ Op::Binary(op, is_assign),
+ expected,
+ )
+ .is_ok()
+ {
+ let op_str = op.node.as_str();
+ err.note(format!("an implementation for `{lhs_adjusted_ty} {op_str} {rhs_adjusted_ty}` exists"));
+
+ if let Some(lhs_new_mutbl) = lhs_new_mutbl
+ && let Some(rhs_new_mutbl) = rhs_new_mutbl
+ && lhs_new_mutbl.is_not()
+ && rhs_new_mutbl.is_not() {
+ err.multipart_suggestion_verbose(
+ "consider reborrowing both sides",
+ vec![
+ (lhs_expr.span.shrink_to_lo(), "&*".to_string()),
+ (rhs_expr.span.shrink_to_lo(), "&*".to_string())
+ ],
+ rustc_errors::Applicability::MachineApplicable,
+ );
+ } else {
+ let mut suggest_new_borrow = |new_mutbl: ast::Mutability, sp: Span| {
+ // Can reborrow (&mut -> &)
+ if new_mutbl.is_not() {
+ err.span_suggestion_verbose(
+ sp.shrink_to_lo(),
+ "consider reborrowing this side",
+ "&*",
+ rustc_errors::Applicability::MachineApplicable,
+ );
+ // Works on &mut but have &
+ } else {
+ err.span_help(
+ sp,
+ "consider making this expression a mutable borrow",
+ );
+ }
+ };
+
+ if let Some(lhs_new_mutbl) = lhs_new_mutbl {
+ suggest_new_borrow(lhs_new_mutbl, lhs_expr.span);
+ }
+ if let Some(rhs_new_mutbl) = rhs_new_mutbl {
+ suggest_new_borrow(rhs_new_mutbl, rhs_expr.span);
+ }
+ }
+ }
+ };
let is_compatible_after_call = |lhs_ty, rhs_ty| {
self.lookup_op_method(
@@ -429,15 +489,60 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> {
} else if is_assign == IsAssign::Yes
&& let Some(lhs_deref_ty) = self.deref_once_mutably_for_diagnostic(lhs_ty)
{
- suggest_deref_binop(lhs_deref_ty);
+ suggest_deref_binop(&mut err, lhs_deref_ty);
} else if is_assign == IsAssign::No
- && let Ref(_, lhs_deref_ty, _) = lhs_ty.kind()
+ && let Ref(region, lhs_deref_ty, mutbl) = lhs_ty.kind()
{
if self.type_is_copy_modulo_regions(
self.param_env,
*lhs_deref_ty,
) {
- suggest_deref_binop(*lhs_deref_ty);
+ suggest_deref_binop(&mut err, *lhs_deref_ty);
+ } else {
+ let lhs_inv_mutbl = mutbl.invert();
+ let lhs_inv_mutbl_ty = Ty::new_ref(
+ self.tcx,
+ *region,
+ ty::TypeAndMut {
+ ty: *lhs_deref_ty,
+ mutbl: lhs_inv_mutbl,
+ },
+ );
+
+ suggest_different_borrow(
+ &mut err,
+ lhs_inv_mutbl_ty,
+ Some(lhs_inv_mutbl),
+ rhs_ty,
+ None,
+ );
+
+ if let Ref(region, rhs_deref_ty, mutbl) = rhs_ty.kind() {
+ let rhs_inv_mutbl = mutbl.invert();
+ let rhs_inv_mutbl_ty = Ty::new_ref(
+ self.tcx,
+ *region,
+ ty::TypeAndMut {
+ ty: *rhs_deref_ty,
+ mutbl: rhs_inv_mutbl,
+ },
+ );
+
+ suggest_different_borrow(
+ &mut err,
+ lhs_ty,
+ None,
+ rhs_inv_mutbl_ty,
+ Some(rhs_inv_mutbl),
+ );
+ suggest_different_borrow(
+ &mut err,
+ lhs_inv_mutbl_ty,
+ Some(lhs_inv_mutbl),
+ rhs_inv_mutbl_ty,
+ Some(rhs_inv_mutbl),
+ );
+ }
}
} else if self.suggest_fn_call(&mut err, lhs_expr, lhs_ty, |lhs_ty| {
is_compatible_after_call(lhs_ty, rhs_ty)
diff --git a/compiler/rustc_hir_typeck/src/pat.rs b/compiler/rustc_hir_typeck/src/pat.rs
index 42f4531c0..8fc236f46 100644
--- a/compiler/rustc_hir_typeck/src/pat.rs
+++ b/compiler/rustc_hir_typeck/src/pat.rs
@@ -1,3 +1,4 @@
+use crate::gather_locals::DeclOrigin;
use crate::{errors, FnCtxt, RawTy};
use rustc_ast as ast;
use rustc_data_structures::fx::FxHashMap;
@@ -77,6 +78,13 @@ struct TopInfo<'tcx> {
span: Option<Span>,
}
+#[derive(Copy, Clone)]
+struct PatInfo<'tcx, 'a> {
+ binding_mode: BindingMode,
+ top_info: TopInfo<'tcx>,
+ decl_origin: Option<DeclOrigin<'a>>,
+}
+
impl<'tcx> FnCtxt<'_, 'tcx> {
fn pattern_cause(&self, ti: TopInfo<'tcx>, cause_span: Span) -> ObligationCause<'tcx> {
let code =
@@ -135,15 +143,17 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> {
///
/// Otherwise, `Some(span)` represents the span of a type expression
/// which originated the `expected` type.
- pub fn check_pat_top(
+ pub(crate) fn check_pat_top(
&self,
pat: &'tcx Pat<'tcx>,
expected: Ty<'tcx>,
span: Option<Span>,
origin_expr: Option<&'tcx hir::Expr<'tcx>>,
+ decl_origin: Option<DeclOrigin<'tcx>>,
) {
let info = TopInfo { expected, origin_expr, span };
- self.check_pat(pat, expected, INITIAL_BM, info);
+ let pat_info = PatInfo { binding_mode: INITIAL_BM, top_info: info, decl_origin };
+ self.check_pat(pat, expected, pat_info);
}
/// Type check the given `pat` against the `expected` type
@@ -151,14 +161,9 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> {
///
/// Outside of this module, `check_pat_top` should always be used.
/// Conversely, inside this module, `check_pat_top` should never be used.
- #[instrument(level = "debug", skip(self, ti))]
- fn check_pat(
- &self,
- pat: &'tcx Pat<'tcx>,
- expected: Ty<'tcx>,
- def_bm: BindingMode,
- ti: TopInfo<'tcx>,
- ) {
+ #[instrument(level = "debug", skip(self, pat_info))]
+ fn check_pat(&self, pat: &'tcx Pat<'tcx>, expected: Ty<'tcx>, pat_info: PatInfo<'tcx, '_>) {
+ let PatInfo { binding_mode: def_bm, top_info: ti, .. } = pat_info;
let path_res = match &pat.kind {
PatKind::Path(qpath) => {
Some(self.resolve_ty_and_res_fully_qualified_call(qpath, pat.hir_id, pat.span))
@@ -167,38 +172,38 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> {
};
let adjust_mode = self.calc_adjust_mode(pat, path_res.map(|(res, ..)| res));
let (expected, def_bm) = self.calc_default_binding_mode(pat, expected, def_bm, adjust_mode);
+ let pat_info =
+ PatInfo { binding_mode: def_bm, top_info: ti, decl_origin: pat_info.decl_origin };
let ty = match pat.kind {
PatKind::Wild => expected,
PatKind::Lit(lt) => self.check_pat_lit(pat.span, lt, expected, ti),
PatKind::Range(lhs, rhs, _) => self.check_pat_range(pat.span, lhs, rhs, expected, ti),
PatKind::Binding(ba, var_id, _, sub) => {
- self.check_pat_ident(pat, ba, var_id, sub, expected, def_bm, ti)
+ self.check_pat_ident(pat, ba, var_id, sub, expected, pat_info)
}
PatKind::TupleStruct(ref qpath, subpats, ddpos) => {
- self.check_pat_tuple_struct(pat, qpath, subpats, ddpos, expected, def_bm, ti)
+ self.check_pat_tuple_struct(pat, qpath, subpats, ddpos, expected, pat_info)
}
PatKind::Path(ref qpath) => {
self.check_pat_path(pat, qpath, path_res.unwrap(), expected, ti)
}
PatKind::Struct(ref qpath, fields, has_rest_pat) => {
- self.check_pat_struct(pat, qpath, fields, has_rest_pat, expected, def_bm, ti)
+ self.check_pat_struct(pat, qpath, fields, has_rest_pat, expected, pat_info)
}
PatKind::Or(pats) => {
for pat in pats {
- self.check_pat(pat, expected, def_bm, ti);
+ self.check_pat(pat, expected, pat_info);
}
expected
}
PatKind::Tuple(elements, ddpos) => {
- self.check_pat_tuple(pat.span, elements, ddpos, expected, def_bm, ti)
- }
- PatKind::Box(inner) => self.check_pat_box(pat.span, inner, expected, def_bm, ti),
- PatKind::Ref(inner, mutbl) => {
- self.check_pat_ref(pat, inner, mutbl, expected, def_bm, ti)
+ self.check_pat_tuple(pat.span, elements, ddpos, expected, pat_info)
}
+ PatKind::Box(inner) => self.check_pat_box(pat.span, inner, expected, pat_info),
+ PatKind::Ref(inner, mutbl) => self.check_pat_ref(pat, inner, mutbl, expected, pat_info),
PatKind::Slice(before, slice, after) => {
- self.check_pat_slice(pat.span, before, slice, after, expected, def_bm, ti)
+ self.check_pat_slice(pat.span, before, slice, after, expected, pat_info)
}
};
@@ -335,8 +340,7 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> {
expected: Ty<'tcx>,
mut def_bm: BindingMode,
) -> (Ty<'tcx>, BindingMode) {
- let mut expected = self.resolve_vars_with_obligations(expected);
-
+ let mut expected = self.try_structurally_resolve_type(pat.span, expected);
// Peel off as many `&` or `&mut` from the scrutinee type as possible. For example,
// for `match &&&mut Some(5)` the loop runs three times, aborting when it reaches
// the `Some(5)` which is not of type Ref.
@@ -353,7 +357,7 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> {
// Preserve the reference type. We'll need it later during THIR lowering.
pat_adjustments.push(expected);
- expected = inner_ty;
+ expected = self.try_structurally_resolve_type(pat.span, inner_ty);
def_bm = ty::BindByReference(match def_bm {
// If default binding mode is by value, make it `ref` or `ref mut`
// (depending on whether we observe `&` or `&mut`).
@@ -517,7 +521,7 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> {
fn endpoint_has_type(&self, err: &mut Diagnostic, span: Span, ty: Ty<'_>) {
if !ty.references_error() {
- err.span_label(span, format!("this is of type `{}`", ty));
+ err.span_label(span, format!("this is of type `{ty}`"));
}
}
@@ -541,7 +545,7 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> {
);
let msg = |ty| {
let ty = self.resolve_vars_if_possible(ty);
- format!("this is of type `{}` but it should be `char` or numeric", ty)
+ format!("this is of type `{ty}` but it should be `char` or numeric")
};
let mut one_side_err = |first_span, first_ty, second: Option<(bool, Ty<'tcx>, Span)>| {
err.span_label(first_span, msg(first_ty));
@@ -581,9 +585,10 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> {
var_id: HirId,
sub: Option<&'tcx Pat<'tcx>>,
expected: Ty<'tcx>,
- def_bm: BindingMode,
- ti: TopInfo<'tcx>,
+ pat_info: PatInfo<'tcx, '_>,
) -> Ty<'tcx> {
+ let PatInfo { binding_mode: def_bm, top_info: ti, .. } = pat_info;
+
// Determine the binding mode...
let bm = match ba {
hir::BindingAnnotation::NONE => def_bm,
@@ -621,12 +626,15 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> {
}
if let Some(p) = sub {
- self.check_pat(p, expected, def_bm, ti);
+ self.check_pat(p, expected, pat_info);
}
local_ty
}
+ /// When a variable is bound several times in a `PatKind::Or`, it'll resolve all of the
+ /// subsequent bindings of the same name to the first usage. Verify that all of these
+ /// bindings have the same type by comparing them all against the type of that first pat.
fn check_binding_alt_eq_ty(
&self,
ba: hir::BindingAnnotation,
@@ -638,7 +646,7 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> {
let var_ty = self.local_ty(span, var_id);
if let Some(mut err) = self.demand_eqtype_pat_diag(span, var_ty, ty, ti) {
let hir = self.tcx.hir();
- let var_ty = self.resolve_vars_with_obligations(var_ty);
+ let var_ty = self.resolve_vars_if_possible(var_ty);
let msg = format!("first introduced with type `{var_ty}` here");
err.span_label(hir.span(var_id), msg);
let in_match = hir.parent_iter(var_id).any(|(_, n)| {
@@ -651,12 +659,12 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> {
)
});
let pre = if in_match { "in the same arm, " } else { "" };
- err.note(format!("{}a binding must have the same type in all alternatives", pre));
+ err.note(format!("{pre}a binding must have the same type in all alternatives"));
self.suggest_adding_missing_ref_or_removing_ref(
&mut err,
span,
var_ty,
- self.resolve_vars_with_obligations(ty),
+ self.resolve_vars_if_possible(ty),
ba,
);
err.emit();
@@ -753,7 +761,7 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> {
match binding_parent {
// Check that there is explicit type (ie this is not a closure param with inferred type)
// so we don't suggest moving something to the type that does not exist
- hir::Node::Param(hir::Param { ty_span, .. }) if binding.span != *ty_span => {
+ hir::Node::Param(hir::Param { ty_span, pat, .. }) if pat.span != *ty_span => {
err.multipart_suggestion_verbose(
format!("to take parameter `{binding}` by reference, move `&{mutability}` to the type"),
vec![
@@ -841,8 +849,7 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> {
fields: &'tcx [hir::PatField<'tcx>],
has_rest_pat: bool,
expected: Ty<'tcx>,
- def_bm: BindingMode,
- ti: TopInfo<'tcx>,
+ pat_info: PatInfo<'tcx, '_>,
) -> Ty<'tcx> {
// Resolve the path and check the definition for errors.
let (variant, pat_ty) = match self.check_struct_path(qpath, pat.hir_id) {
@@ -850,18 +857,17 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> {
Err(guar) => {
let err = Ty::new_error(self.tcx, guar);
for field in fields {
- let ti = ti;
- self.check_pat(field.pat, err, def_bm, ti);
+ self.check_pat(field.pat, err, pat_info);
}
return err;
}
};
// Type-check the path.
- self.demand_eqtype_pat(pat.span, expected, pat_ty, ti);
+ self.demand_eqtype_pat(pat.span, expected, pat_ty, pat_info.top_info);
// Type-check subpatterns.
- if self.check_struct_pat_fields(pat_ty, &pat, variant, fields, has_rest_pat, def_bm, ti) {
+ if self.check_struct_pat_fields(pat_ty, &pat, variant, fields, has_rest_pat, pat_info) {
pat_ty
} else {
Ty::new_misc_error(self.tcx)
@@ -922,7 +928,8 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> {
match opt_def_id {
Some(def_id) => match self.tcx.hir().get_if_local(def_id) {
Some(hir::Node::Item(hir::Item {
- kind: hir::ItemKind::Const(_, body_id), ..
+ kind: hir::ItemKind::Const(_, _, body_id),
+ ..
})) => match self.tcx.hir().get(body_id.hir_id) {
hir::Node::Expr(expr) => {
if hir::is_range_literal(expr) {
@@ -1026,13 +1033,17 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> {
subpats: &'tcx [Pat<'tcx>],
ddpos: hir::DotDotPos,
expected: Ty<'tcx>,
- def_bm: BindingMode,
- ti: TopInfo<'tcx>,
+ pat_info: PatInfo<'tcx, '_>,
) -> Ty<'tcx> {
+ let PatInfo { binding_mode: def_bm, top_info: ti, decl_origin } = pat_info;
let tcx = self.tcx;
let on_error = |e| {
for pat in subpats {
- self.check_pat(pat, Ty::new_error(tcx, e), def_bm, ti);
+ self.check_pat(
+ pat,
+ Ty::new_error(tcx, e),
+ PatInfo { binding_mode: def_bm, top_info: ti, decl_origin },
+ );
}
};
let report_unexpected_res = |res: Res| {
@@ -1092,13 +1103,17 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> {
if subpats.len() == variant.fields.len()
|| subpats.len() < variant.fields.len() && ddpos.as_opt_usize().is_some()
{
- let ty::Adt(_, substs) = pat_ty.kind() else {
+ let ty::Adt(_, args) = pat_ty.kind() else {
bug!("unexpected pattern type {:?}", pat_ty);
};
for (i, subpat) in subpats.iter().enumerate_and_adjust(variant.fields.len(), ddpos) {
let field = &variant.fields[FieldIdx::from_usize(i)];
- let field_ty = self.field_ty(subpat.span, field, substs);
- self.check_pat(subpat, field_ty, def_bm, ti);
+ let field_ty = self.field_ty(subpat.span, field, args);
+ self.check_pat(
+ subpat,
+ field_ty,
+ PatInfo { binding_mode: def_bm, top_info: ti, decl_origin },
+ );
self.tcx.check_stability(
variant.fields[FieldIdx::from_usize(i)].did,
@@ -1180,10 +1195,10 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> {
// with the subpatterns directly in the tuple variant pattern, e.g., `V_i(p_0, .., p_N)`.
let missing_parentheses = match (&expected.kind(), fields, had_err) {
// #67037: only do this if we could successfully type-check the expected type against
- // the tuple struct pattern. Otherwise the substs could get out of range on e.g.,
+ // the tuple struct pattern. Otherwise the args could get out of range on e.g.,
// `let P() = U;` where `P != U` with `struct P<T>(T);`.
- (ty::Adt(_, substs), [field], false) => {
- let field_ty = self.field_ty(pat_span, field, substs);
+ (ty::Adt(_, args), [field], false) => {
+ let field_ty = self.field_ty(pat_span, field, args);
match field_ty.kind() {
ty::Tuple(fields) => fields.len() == subpats.len(),
_ => false,
@@ -1282,8 +1297,7 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> {
elements: &'tcx [Pat<'tcx>],
ddpos: hir::DotDotPos,
expected: Ty<'tcx>,
- def_bm: BindingMode,
- ti: TopInfo<'tcx>,
+ pat_info: PatInfo<'tcx, '_>,
) -> Ty<'tcx> {
let tcx = self.tcx;
let mut expected_len = elements.len();
@@ -1304,18 +1318,20 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> {
});
let element_tys = tcx.mk_type_list_from_iter(element_tys_iter);
let pat_ty = Ty::new_tup(tcx, element_tys);
- if let Some(mut err) = self.demand_eqtype_pat_diag(span, expected, pat_ty, ti) {
+ if let Some(mut err) =
+ self.demand_eqtype_pat_diag(span, expected, pat_ty, pat_info.top_info)
+ {
let reported = err.emit();
// Walk subpatterns with an expected type of `err` in this case to silence
// further errors being emitted when using the bindings. #50333
let element_tys_iter = (0..max_len).map(|_| Ty::new_error(tcx, reported));
for (_, elem) in elements.iter().enumerate_and_adjust(max_len, ddpos) {
- self.check_pat(elem, Ty::new_error(tcx, reported), def_bm, ti);
+ self.check_pat(elem, Ty::new_error(tcx, reported), pat_info);
}
Ty::new_tup_from_iter(tcx, element_tys_iter)
} else {
for (i, elem) in elements.iter().enumerate_and_adjust(max_len, ddpos) {
- self.check_pat(elem, element_tys[i], def_bm, ti);
+ self.check_pat(elem, element_tys[i], pat_info);
}
pat_ty
}
@@ -1328,12 +1344,11 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> {
variant: &'tcx ty::VariantDef,
fields: &'tcx [hir::PatField<'tcx>],
has_rest_pat: bool,
- def_bm: BindingMode,
- ti: TopInfo<'tcx>,
+ pat_info: PatInfo<'tcx, '_>,
) -> bool {
let tcx = self.tcx;
- let ty::Adt(adt, substs) = adt_ty.kind() else {
+ let ty::Adt(adt, args) = adt_ty.kind() else {
span_bug!(pat.span, "struct pattern is not an ADT");
};
@@ -1366,7 +1381,7 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> {
.map(|(i, f)| {
self.write_field_index(field.hir_id, *i);
self.tcx.check_stability(f.did, Some(pat.hir_id), span, None);
- self.field_ty(span, f, substs)
+ self.field_ty(span, f, args)
})
.unwrap_or_else(|| {
inexistent_fields.push(field);
@@ -1376,7 +1391,7 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> {
}
};
- self.check_pat(field.pat, field_ty, def_bm, ti);
+ self.check_pat(field.pat, field_ty, pat_info);
}
let mut unmentioned_fields = variant
@@ -1394,7 +1409,7 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> {
&inexistent_fields,
&mut unmentioned_fields,
variant,
- substs,
+ args,
))
} else {
None
@@ -1564,7 +1579,7 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> {
inexistent_fields: &[&hir::PatField<'tcx>],
unmentioned_fields: &mut Vec<(&'tcx ty::FieldDef, Ident)>,
variant: &ty::VariantDef,
- substs: &'tcx ty::List<ty::subst::GenericArg<'tcx>>,
+ args: &'tcx ty::List<ty::GenericArg<'tcx>>,
) -> DiagnosticBuilder<'tcx, ErrorGuaranteed> {
let tcx = self.tcx;
let (field_names, t, plural) = if inexistent_fields.len() == 1 {
@@ -1634,7 +1649,7 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> {
self.field_ty(
unmentioned_fields[0].1.span,
unmentioned_fields[0].0,
- substs,
+ args,
),
) => {}
_ => {
@@ -1708,7 +1723,7 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> {
err.span_suggestion_verbose(
qpath.span().shrink_to_hi().to(pat.span.shrink_to_hi()),
"use the tuple variant pattern syntax instead",
- format!("({})", sugg),
+ format!("({sugg})"),
appl,
);
return Some(err);
@@ -1810,7 +1825,7 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> {
const LIMIT: usize = 3;
match witnesses {
[] => bug!(),
- [witness] => format!("`{}`", witness),
+ [witness] => format!("`{witness}`"),
[head @ .., tail] if head.len() < LIMIT => {
let head: Vec<_> = head.iter().map(<_>::to_string).collect();
format!("`{}` and `{}`", head.join("`, `"), tail)
@@ -1832,8 +1847,7 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> {
"ensure that all fields are mentioned explicitly by adding the suggested fields",
);
lint.note(format!(
- "the pattern is of type `{}` and the `non_exhaustive_omitted_patterns` attribute was found",
- ty,
+ "the pattern is of type `{ty}` and the `non_exhaustive_omitted_patterns` attribute was found",
));
lint
@@ -1862,10 +1876,10 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> {
} else {
let fields = unmentioned_fields
.iter()
- .map(|(_, name)| format!("`{}`", name))
+ .map(|(_, name)| format!("`{name}`"))
.collect::<Vec<String>>()
.join(", ");
- format!("fields {}{}", fields, inaccessible)
+ format!("fields {fields}{inaccessible}")
};
let mut err = struct_span_err!(
self.tcx.sess,
@@ -1874,7 +1888,7 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> {
"pattern does not mention {}",
field_names
);
- err.span_label(pat.span, format!("missing {}", field_names));
+ err.span_label(pat.span, format!("missing {field_names}"));
let len = unmentioned_fields.len();
let (prefix, postfix, sp) = match fields {
[] => match &pat.kind {
@@ -1907,11 +1921,7 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> {
.iter()
.map(|(_, name)| {
let field_name = name.to_string();
- if is_number(&field_name) {
- format!("{}: _", field_name)
- } else {
- field_name
- }
+ if is_number(&field_name) { format!("{field_name}: _") } else { field_name }
})
.collect::<Vec<_>>()
.join(", "),
@@ -1928,7 +1938,7 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> {
s = pluralize!(len),
them = if len == 1 { "it" } else { "them" },
),
- format!("{}..{}", prefix, postfix),
+ format!("{prefix}..{postfix}"),
Applicability::MachineApplicable,
);
err
@@ -1939,8 +1949,7 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> {
span: Span,
inner: &'tcx Pat<'tcx>,
expected: Ty<'tcx>,
- def_bm: BindingMode,
- ti: TopInfo<'tcx>,
+ pat_info: PatInfo<'tcx, '_>,
) -> Ty<'tcx> {
let tcx = self.tcx;
let (box_ty, inner_ty) = match self.check_dereferenceable(span, expected, inner) {
@@ -1952,7 +1961,7 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> {
span: inner.span,
});
let box_ty = Ty::new_box(tcx, inner_ty);
- self.demand_eqtype_pat(span, expected, box_ty, ti);
+ self.demand_eqtype_pat(span, expected, box_ty, pat_info.top_info);
(box_ty, inner_ty)
}
Err(guar) => {
@@ -1960,7 +1969,7 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> {
(err, err)
}
};
- self.check_pat(inner, inner_ty, def_bm, ti);
+ self.check_pat(inner, inner_ty, pat_info);
box_ty
}
@@ -1971,8 +1980,7 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> {
inner: &'tcx Pat<'tcx>,
mutbl: hir::Mutability,
expected: Ty<'tcx>,
- def_bm: BindingMode,
- ti: TopInfo<'tcx>,
+ pat_info: PatInfo<'tcx, '_>,
) -> Ty<'tcx> {
let tcx = self.tcx;
let expected = self.shallow_resolve(expected);
@@ -1994,7 +2002,12 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> {
});
let ref_ty = self.new_ref_ty(pat.span, mutbl, inner_ty);
debug!("check_pat_ref: demanding {:?} = {:?}", expected, ref_ty);
- let err = self.demand_eqtype_pat_diag(pat.span, expected, ref_ty, ti);
+ let err = self.demand_eqtype_pat_diag(
+ pat.span,
+ expected,
+ ref_ty,
+ pat_info.top_info,
+ );
// Look for a case like `fn foo(&foo: u32)` and suggest
// `fn foo(foo: &u32)`
@@ -2011,7 +2024,7 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> {
(err, err)
}
};
- self.check_pat(inner, inner_ty, def_bm, ti);
+ self.check_pat(inner, inner_ty, pat_info);
ref_ty
}
@@ -2022,6 +2035,62 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> {
Ty::new_ref(self.tcx, region, mt)
}
+ fn try_resolve_slice_ty_to_array_ty(
+ &self,
+ before: &'tcx [Pat<'tcx>],
+ slice: Option<&'tcx Pat<'tcx>>,
+ span: Span,
+ ) -> Option<Ty<'tcx>> {
+ if !slice.is_none() {
+ return None;
+ }
+
+ let tcx = self.tcx;
+ let len = before.len();
+ let ty_var_origin =
+ TypeVariableOrigin { kind: TypeVariableOriginKind::TypeInference, span };
+ let inner_ty = self.next_ty_var(ty_var_origin);
+
+ Some(Ty::new_array(tcx, inner_ty, len.try_into().unwrap()))
+ }
+
+ /// Used to determines whether we can infer the expected type in the slice pattern to be of type array.
+ /// This is only possible if we're in an irrefutable pattern. If we were to allow this in refutable
+ /// patterns we wouldn't e.g. report ambiguity in the following situation:
+ ///
+ /// ```ignore(rust)
+ /// struct Zeroes;
+ /// const ARR: [usize; 2] = [0; 2];
+ /// const ARR2: [usize; 2] = [2; 2];
+ ///
+ /// impl Into<&'static [usize; 2]> for Zeroes {
+ /// fn into(self) -> &'static [usize; 2] {
+ /// &ARR
+ /// }
+ /// }
+ ///
+ /// impl Into<&'static [usize]> for Zeroes {
+ /// fn into(self) -> &'static [usize] {
+ /// &ARR2
+ /// }
+ /// }
+ ///
+ /// fn main() {
+ /// let &[a, b]: &[usize] = Zeroes.into() else {
+ /// ..
+ /// };
+ /// }
+ /// ```
+ ///
+ /// If we're in an irrefutable pattern we prefer the array impl candidate given that
+ /// the slice impl candidate would be be rejected anyway (if no ambiguity existed).
+ fn pat_is_irrefutable(&self, decl_origin: Option<DeclOrigin<'_>>) -> bool {
+ match decl_origin {
+ Some(DeclOrigin::LocalDecl { els: None }) => true,
+ Some(DeclOrigin::LocalDecl { els: Some(_) } | DeclOrigin::LetExpr) | None => false,
+ }
+ }
+
/// Type check a slice pattern.
///
/// Syntactically, these look like `[pat_0, ..., pat_n]`.
@@ -2039,10 +2108,24 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> {
slice: Option<&'tcx Pat<'tcx>>,
after: &'tcx [Pat<'tcx>],
expected: Ty<'tcx>,
- def_bm: BindingMode,
- ti: TopInfo<'tcx>,
+ pat_info: PatInfo<'tcx, '_>,
) -> Ty<'tcx> {
+ let expected = self.try_structurally_resolve_type(span, expected);
+
+ // If the pattern is irrefutable and `expected` is an infer ty, we try to equate it
+ // to an array if the given pattern allows it. See issue #76342
+ if self.pat_is_irrefutable(pat_info.decl_origin) && expected.is_ty_var() {
+ if let Some(resolved_arr_ty) =
+ self.try_resolve_slice_ty_to_array_ty(before, slice, span)
+ {
+ debug!(?resolved_arr_ty);
+ self.demand_eqtype(span, expected, resolved_arr_ty);
+ }
+ }
+
let expected = self.structurally_resolve_type(span, expected);
+ debug!(?expected);
+
let (element_ty, opt_slice_ty, inferred) = match *expected.kind() {
// An array, so we might have something like `let [a, b, c] = [0, 1, 2];`.
ty::Array(element_ty, len) => {
@@ -2057,10 +2140,9 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> {
ty::Slice(element_ty) => (element_ty, Some(expected), expected),
// The expected type must be an array or slice, but was neither, so error.
_ => {
- let guar = expected
- .error_reported()
- .err()
- .unwrap_or_else(|| self.error_expected_array_or_slice(span, expected, ti));
+ let guar = expected.error_reported().err().unwrap_or_else(|| {
+ self.error_expected_array_or_slice(span, expected, pat_info.top_info)
+ });
let err = Ty::new_error(self.tcx, guar);
(err, Some(err), err)
}
@@ -2068,15 +2150,15 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> {
// Type check all the patterns before `slice`.
for elt in before {
- self.check_pat(elt, element_ty, def_bm, ti);
+ self.check_pat(elt, element_ty, pat_info);
}
// Type check the `slice`, if present, against its expected type.
if let Some(slice) = slice {
- self.check_pat(slice, opt_slice_ty.unwrap(), def_bm, ti);
+ self.check_pat(slice, opt_slice_ty.unwrap(), pat_info);
}
// Type check the elements after `slice`, if present.
for elt in after {
- self.check_pat(elt, element_ty, def_bm, ti);
+ self.check_pat(elt, element_ty, pat_info);
}
inferred
}
diff --git a/compiler/rustc_hir_typeck/src/place_op.rs b/compiler/rustc_hir_typeck/src/place_op.rs
index fd43b475e..406434e09 100644
--- a/compiler/rustc_hir_typeck/src/place_op.rs
+++ b/compiler/rustc_hir_typeck/src/place_op.rs
@@ -284,7 +284,7 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> {
let mut exprs = vec![expr];
while let hir::ExprKind::Field(ref expr, _)
- | hir::ExprKind::Index(ref expr, _)
+ | hir::ExprKind::Index(ref expr, _, _)
| hir::ExprKind::Unary(hir::UnOp::Deref, ref expr) = exprs.last().unwrap().kind
{
exprs.push(expr);
@@ -392,7 +392,7 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> {
// We also could not use `expr_ty_adjusted` of index_expr because reborrowing
// during coercions can also cause type of index_expr to differ from `T`,
// which can potentially cause regionck failure (#74933).
- Some(self.typeck_results.borrow().node_substs(expr.hir_id).type_at(1))
+ Some(self.typeck_results.borrow().node_args(expr.hir_id).type_at(1))
}
};
let arg_tys = arg_ty.as_slice();
diff --git a/compiler/rustc_hir_typeck/src/rvalue_scopes.rs b/compiler/rustc_hir_typeck/src/rvalue_scopes.rs
index 22c9e7961..04d841023 100644
--- a/compiler/rustc_hir_typeck/src/rvalue_scopes.rs
+++ b/compiler/rustc_hir_typeck/src/rvalue_scopes.rs
@@ -40,7 +40,7 @@ fn record_rvalue_scope_rec(
hir::ExprKind::AddrOf(_, _, subexpr)
| hir::ExprKind::Unary(hir::UnOp::Deref, subexpr)
| hir::ExprKind::Field(subexpr, _)
- | hir::ExprKind::Index(subexpr, _) => {
+ | hir::ExprKind::Index(subexpr, _, _) => {
expr = subexpr;
}
_ => {
@@ -74,9 +74,7 @@ pub fn resolve_rvalue_scopes<'a, 'tcx>(
debug!("start resolving rvalue scopes, def_id={def_id:?}");
debug!("rvalue_scope: rvalue_candidates={:?}", scope_tree.rvalue_candidates);
for (&hir_id, candidate) in &scope_tree.rvalue_candidates {
- let Some(Node::Expr(expr)) = hir_map.find(hir_id) else {
- bug!("hir node does not exist")
- };
+ let Some(Node::Expr(expr)) = hir_map.find(hir_id) else { bug!("hir node does not exist") };
record_rvalue_scope(&mut rvalue_scopes, expr, candidate);
}
rvalue_scopes
diff --git a/compiler/rustc_hir_typeck/src/upvar.rs b/compiler/rustc_hir_typeck/src/upvar.rs
index 208c40a39..1a41786d2 100644
--- a/compiler/rustc_hir_typeck/src/upvar.rs
+++ b/compiler/rustc_hir_typeck/src/upvar.rs
@@ -33,6 +33,7 @@
use super::FnCtxt;
use crate::expr_use_visitor as euv;
+use rustc_data_structures::unord::{ExtendUnord, UnordSet};
use rustc_errors::{Applicability, MultiSpan};
use rustc_hir as hir;
use rustc_hir::def_id::LocalDefId;
@@ -41,14 +42,14 @@ use rustc_infer::infer::UpvarRegion;
use rustc_middle::hir::place::{Place, PlaceBase, PlaceWithHirId, Projection, ProjectionKind};
use rustc_middle::mir::FakeReadCause;
use rustc_middle::ty::{
- self, ClosureSizeProfileData, Ty, TyCtxt, TypeckResults, UpvarCapture, UpvarSubsts,
+ self, ClosureSizeProfileData, Ty, TyCtxt, TypeckResults, UpvarArgs, UpvarCapture,
};
use rustc_session::lint;
use rustc_span::sym;
use rustc_span::{BytePos, Pos, Span, Symbol};
use rustc_trait_selection::infer::InferCtxtExt;
-use rustc_data_structures::fx::{FxHashMap, FxHashSet};
+use rustc_data_structures::fx::{FxIndexMap, FxIndexSet};
use rustc_target::abi::FIRST_VARIANT;
use std::iter;
@@ -108,11 +109,11 @@ impl MigrationWarningReason {
fn migration_message(&self) -> String {
let base = "changes to closure capture in Rust 2021 will affect";
if !self.auto_traits.is_empty() && self.drop_order {
- format!("{} drop order and which traits the closure implements", base)
+ format!("{base} drop order and which traits the closure implements")
} else if self.drop_order {
- format!("{} drop order", base)
+ format!("{base} drop order")
} else {
- format!("{} which traits the closure implements", base)
+ format!("{base} which traits the closure implements")
}
}
}
@@ -168,9 +169,9 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> {
) {
// Extract the type of the closure.
let ty = self.node_ty(closure_hir_id);
- let (closure_def_id, substs) = match *ty.kind() {
- ty::Closure(def_id, substs) => (def_id, UpvarSubsts::Closure(substs)),
- ty::Generator(def_id, substs, _) => (def_id, UpvarSubsts::Generator(substs)),
+ let (closure_def_id, args) = match *ty.kind() {
+ ty::Closure(def_id, args) => (def_id, UpvarArgs::Closure(args)),
+ ty::Generator(def_id, args, _) => (def_id, UpvarArgs::Generator(args)),
ty::Error(_) => {
// #51714: skip analysis when we have already encountered type errors
return;
@@ -186,8 +187,8 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> {
};
let closure_def_id = closure_def_id.expect_local();
- let infer_kind = if let UpvarSubsts::Closure(closure_substs) = substs {
- self.closure_kind(closure_substs).is_none().then_some(closure_substs)
+ let infer_kind = if let UpvarArgs::Closure(closure_args) = args {
+ self.closure_kind(closure_args).is_none().then_some(closure_args)
} else {
None
};
@@ -256,19 +257,19 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> {
let before_feature_tys = self.final_upvar_tys(closure_def_id);
- if let Some(closure_substs) = infer_kind {
+ if let Some(closure_args) = infer_kind {
// Unify the (as yet unbound) type variable in the closure
- // substs with the kind we inferred.
- let closure_kind_ty = closure_substs.as_closure().kind_ty();
+ // args with the kind we inferred.
+ let closure_kind_ty = closure_args.as_closure().kind_ty();
self.demand_eqtype(span, closure_kind.to_ty(self.tcx), closure_kind_ty);
// If we have an origin, store it.
- if let Some(origin) = origin {
- let origin = if enable_precise_capture(span) {
- (origin.0, origin.1)
- } else {
- (origin.0, Place { projections: vec![], ..origin.1 })
- };
+ if let Some(mut origin) = origin {
+ if !enable_precise_capture(span) {
+ // Without precise captures, we just capture the base and ignore
+ // the projections.
+ origin.1.projections.clear()
+ }
self.typeck_results
.borrow_mut()
@@ -293,15 +294,12 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> {
// Equate the type variables for the upvars with the actual types.
let final_upvar_tys = self.final_upvar_tys(closure_def_id);
- debug!(
- "analyze_closure: id={:?} substs={:?} final_upvar_tys={:?}",
- closure_hir_id, substs, final_upvar_tys
- );
+ debug!(?closure_hir_id, ?args, ?final_upvar_tys);
// Build a tuple (U0..Un) of the final upvar types U0..Un
// and unify the upvar tuple type in the closure with it:
let final_tupled_upvars_type = Ty::new_tup(self.tcx, &final_upvar_tys);
- self.demand_suptype(span, substs.tupled_upvars_ty(), final_tupled_upvars_type);
+ self.demand_suptype(span, args.tupled_upvars_ty(), final_tupled_upvars_type);
let fake_reads = delegate
.fake_reads
@@ -337,10 +335,7 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> {
let upvar_ty = captured_place.place.ty();
let capture = captured_place.info.capture_kind;
- debug!(
- "final_upvar_tys: place={:?} upvar_ty={:?} capture={:?}, mutability={:?}",
- captured_place.place, upvar_ty, capture, captured_place.mutability,
- );
+ debug!(?captured_place.place, ?upvar_ty, ?capture, ?captured_place.mutability);
apply_capture_kind_on_capture_ty(self.tcx, upvar_ty, capture, captured_place.region)
})
@@ -644,7 +639,9 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> {
for capture in captures {
match capture.info.capture_kind {
ty::UpvarCapture::ByRef(_) => {
- let PlaceBase::Upvar(upvar_id) = capture.place.base else { bug!("expected upvar") };
+ let PlaceBase::Upvar(upvar_id) = capture.place.base else {
+ bug!("expected upvar")
+ };
let origin = UpvarRegion(upvar_id, closure_span);
let upvar_region = self.next_region_var(origin);
capture.region = Some(upvar_region);
@@ -676,6 +673,7 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> {
match (p1.kind, p2.kind) {
// Paths are the same, continue to next loop.
(ProjectionKind::Deref, ProjectionKind::Deref) => {}
+ (ProjectionKind::OpaqueCast, ProjectionKind::OpaqueCast) => {}
(ProjectionKind::Field(i1, _), ProjectionKind::Field(i2, _))
if i1 == i2 => {}
@@ -698,10 +696,12 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> {
l @ (ProjectionKind::Index
| ProjectionKind::Subslice
| ProjectionKind::Deref
+ | ProjectionKind::OpaqueCast
| ProjectionKind::Field(..)),
r @ (ProjectionKind::Index
| ProjectionKind::Subslice
| ProjectionKind::Deref
+ | ProjectionKind::OpaqueCast
| ProjectionKind::Field(..)),
) => bug!(
"ProjectionKinds Index or Subslice were unexpected: ({:?}, {:?})",
@@ -821,8 +821,7 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> {
lint.note("for more information, see <https://doc.rust-lang.org/nightly/edition-guide/rust-2021/disjoint-capture-in-closures.html>");
let diagnostic_msg = format!(
- "add a dummy let to cause {} to be fully captured",
- migrated_variables_concat
+ "add a dummy let to cause {migrated_variables_concat} to be fully captured"
);
let closure_span = self.tcx.hir().span_with_body(closure_hir_id);
@@ -908,19 +907,13 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> {
/// Combines all the reasons for 2229 migrations
fn compute_2229_migrations_reasons(
&self,
- auto_trait_reasons: FxHashSet<&'static str>,
+ auto_trait_reasons: UnordSet<&'static str>,
drop_order: bool,
) -> MigrationWarningReason {
- let mut reasons = MigrationWarningReason::default();
-
- reasons.auto_traits.extend(auto_trait_reasons);
- reasons.drop_order = drop_order;
-
- // `auto_trait_reasons` are in hashset order, so sort them to put the
- // diagnostics we emit later in a cross-platform-consistent order.
- reasons.auto_traits.sort_unstable();
-
- reasons
+ MigrationWarningReason {
+ auto_traits: auto_trait_reasons.to_sorted_stable_ord(),
+ drop_order,
+ }
}
/// Figures out the list of root variables (and their types) that aren't completely
@@ -934,8 +927,8 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> {
min_captures: Option<&ty::RootVariableMinCaptureList<'tcx>>,
var_hir_id: hir::HirId,
closure_clause: hir::CaptureBy,
- ) -> Option<FxHashMap<UpvarMigrationInfo, FxHashSet<&'static str>>> {
- let auto_traits_def_id = vec![
+ ) -> Option<FxIndexMap<UpvarMigrationInfo, UnordSet<&'static str>>> {
+ let auto_traits_def_id = [
self.tcx.lang_items().clone_trait(),
self.tcx.lang_items().sync_trait(),
self.tcx.get_diagnostic_item(sym::Send),
@@ -979,7 +972,7 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> {
}));
}
- let mut problematic_captures = FxHashMap::default();
+ let mut problematic_captures = FxIndexMap::default();
// Check whether captured fields also implement the trait
for capture in root_var_min_capture_list.iter() {
let ty = apply_capture_kind_on_capture_ty(
@@ -999,7 +992,7 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> {
}));
}
- let mut capture_problems = FxHashSet::default();
+ let mut capture_problems = UnordSet::default();
// Checks if for any of the auto traits, one or more trait is implemented
// by the root variable but not by the capture
@@ -1045,7 +1038,7 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> {
min_captures: Option<&ty::RootVariableMinCaptureList<'tcx>>,
closure_clause: hir::CaptureBy,
var_hir_id: hir::HirId,
- ) -> Option<FxHashSet<UpvarMigrationInfo>> {
+ ) -> Option<FxIndexSet<UpvarMigrationInfo>> {
let ty = self.resolve_vars_if_possible(self.node_ty(var_hir_id));
if !ty.has_significant_drop(self.tcx, self.tcx.param_env(closure_def_id)) {
@@ -1064,14 +1057,15 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> {
// ```
debug!("no path starting from it is used");
-
match closure_clause {
// Only migrate if closure is a move closure
hir::CaptureBy::Value => {
- let mut diagnostics_info = FxHashSet::default();
- let upvars = self.tcx.upvars_mentioned(closure_def_id).expect("must be an upvar");
+ let mut diagnostics_info = FxIndexSet::default();
+ let upvars =
+ self.tcx.upvars_mentioned(closure_def_id).expect("must be an upvar");
let upvar = upvars[&var_hir_id];
- diagnostics_info.insert(UpvarMigrationInfo::CapturingNothing { use_span: upvar.span });
+ diagnostics_info
+ .insert(UpvarMigrationInfo::CapturingNothing { use_span: upvar.span });
return Some(diagnostics_info);
}
hir::CaptureBy::Ref => {}
@@ -1082,7 +1076,7 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> {
debug!(?root_var_min_capture_list);
let mut projections_list = Vec::new();
- let mut diagnostics_info = FxHashSet::default();
+ let mut diagnostics_info = FxIndexSet::default();
for captured_place in root_var_min_capture_list.iter() {
match captured_place.info.capture_kind {
@@ -1152,7 +1146,7 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> {
};
let mut need_migrations = Vec::new();
- let mut auto_trait_migration_reasons = FxHashSet::default();
+ let mut auto_trait_migration_reasons = UnordSet::default();
let mut drop_migration_needed = false;
// Perform auto-trait analysis
@@ -1164,7 +1158,7 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> {
{
diagnostics_info
} else {
- FxHashMap::default()
+ FxIndexMap::default()
};
let drop_reorder_diagnostic = if let Some(diagnostics_info) = self
@@ -1178,7 +1172,7 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> {
drop_migration_needed = true;
diagnostics_info
} else {
- FxHashSet::default()
+ FxIndexSet::default()
};
// Combine all the captures responsible for needing migrations into one HashSet
@@ -1195,7 +1189,7 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> {
if let Some(reasons) = auto_trait_diagnostic.get(&captures_info) {
reasons.clone()
} else {
- FxHashSet::default()
+ UnordSet::default()
};
// Check if migration is needed because of drop reorder as a result of that capture
@@ -1203,7 +1197,7 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> {
// Combine all the reasons of why the root variable should be captured as a result of
// auto trait implementation issues
- auto_trait_migration_reasons.extend(capture_trait_reasons.iter().copied());
+ auto_trait_migration_reasons.extend_unord(capture_trait_reasons.items().copied());
diagnostics_info.push(MigrationLintNote {
captures_info,
@@ -1385,7 +1379,7 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> {
ty::Ref(..) => unreachable!(),
ty::RawPtr(..) => unreachable!(),
- ty::Adt(def, substs) => {
+ ty::Adt(def, args) => {
// Multi-variant enums are captured in entirety,
// which would've been handled in the case of single empty slice in `captured_by_move_projs`.
assert_eq!(def.variants().len(), 1);
@@ -1412,7 +1406,7 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> {
})
.collect();
- let after_field_ty = field.ty(self.tcx, substs);
+ let after_field_ty = field.ty(self.tcx, args);
self.has_significant_drop_outside_of_captures(
closure_def_id,
closure_span,
@@ -1893,6 +1887,7 @@ fn restrict_capture_precision(
return (place, curr_mode);
}
ProjectionKind::Deref => {}
+ ProjectionKind::OpaqueCast => {}
ProjectionKind::Field(..) => {} // ignore
}
}
@@ -1945,10 +1940,11 @@ fn construct_place_string<'tcx>(tcx: TyCtxt<'_>, place: &Place<'tcx>) -> String
let mut projections_str = String::new();
for (i, item) in place.projections.iter().enumerate() {
let proj = match item.kind {
- ProjectionKind::Field(a, b) => format!("({:?}, {:?})", a, b),
+ ProjectionKind::Field(a, b) => format!("({a:?}, {b:?})"),
ProjectionKind::Deref => String::from("Deref"),
ProjectionKind::Index => String::from("Index"),
ProjectionKind::Subslice => String::from("Subslice"),
+ ProjectionKind::OpaqueCast => String::from("OpaqueCast"),
};
if i != 0 {
projections_str.push(',');
@@ -1968,7 +1964,7 @@ fn construct_capture_kind_reason_string<'tcx>(
let capture_kind_str = match capture_info.capture_kind {
ty::UpvarCapture::ByValue => "ByValue".into(),
- ty::UpvarCapture::ByRef(kind) => format!("{:?}", kind),
+ ty::UpvarCapture::ByRef(kind) => format!("{kind:?}"),
};
format!("{place_str} captured as {capture_kind_str} here")
@@ -1989,7 +1985,7 @@ fn construct_capture_info_string<'tcx>(
let capture_kind_str = match capture_info.capture_kind {
ty::UpvarCapture::ByValue => "ByValue".into(),
- ty::UpvarCapture::ByRef(kind) => format!("{:?}", kind),
+ ty::UpvarCapture::ByRef(kind) => format!("{kind:?}"),
};
format!("{place_str} -> {capture_kind_str}")
}
@@ -2003,7 +1999,7 @@ fn should_do_rust_2021_incompatible_closure_captures_analysis(
tcx: TyCtxt<'_>,
closure_id: hir::HirId,
) -> bool {
- if tcx.sess.rust_2021() {
+ if tcx.sess.at_least_rust_2021() {
return false;
}
@@ -2249,5 +2245,5 @@ fn truncate_capture_for_optimization(
fn enable_precise_capture(span: Span) -> bool {
// We use span here to ensure that if the closure was generated by a macro with a different
// edition.
- span.rust_2021()
+ span.at_least_rust_2021()
}
diff --git a/compiler/rustc_hir_typeck/src/writeback.rs b/compiler/rustc_hir_typeck/src/writeback.rs
index 106457536..603681bbc 100644
--- a/compiler/rustc_hir_typeck/src/writeback.rs
+++ b/compiler/rustc_hir_typeck/src/writeback.rs
@@ -3,23 +3,19 @@
// substitutions.
use crate::FnCtxt;
-use hir::def_id::LocalDefId;
-use rustc_data_structures::fx::FxHashMap;
+use rustc_data_structures::unord::ExtendUnord;
use rustc_errors::{ErrorGuaranteed, StashKey};
use rustc_hir as hir;
use rustc_hir::intravisit::{self, Visitor};
use rustc_infer::infer::error_reporting::TypeAnnotationNeeded::E0282;
-use rustc_middle::hir::place::Place as HirPlace;
-use rustc_middle::mir::FakeReadCause;
use rustc_middle::ty::adjustment::{Adjust, Adjustment, PointerCoercion};
use rustc_middle::ty::fold::{TypeFoldable, TypeFolder, TypeSuperFoldable};
-use rustc_middle::ty::visit::{TypeSuperVisitable, TypeVisitable, TypeVisitableExt};
-use rustc_middle::ty::{self, ClosureSizeProfileData, Ty, TyCtxt};
+use rustc_middle::ty::visit::TypeVisitableExt;
+use rustc_middle::ty::{self, Ty, TyCtxt};
use rustc_span::symbol::sym;
use rustc_span::Span;
use std::mem;
-use std::ops::ControlFlow;
///////////////////////////////////////////////////////////////////////////
// Entry point
@@ -42,9 +38,9 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> {
// This attribute causes us to dump some writeback information
// in the form of errors, which is used for unit tests.
- let rustc_dump_user_substs = self.tcx.has_attr(item_def_id, sym::rustc_dump_user_substs);
+ let rustc_dump_user_args = self.tcx.has_attr(item_def_id, sym::rustc_dump_user_args);
- let mut wbcx = WritebackCx::new(self, body, rustc_dump_user_substs);
+ let mut wbcx = WritebackCx::new(self, body, rustc_dump_user_args);
for param in body.params {
wbcx.visit_node_id(param.pat.span, param.hir_id);
}
@@ -102,14 +98,14 @@ struct WritebackCx<'cx, 'tcx> {
body: &'tcx hir::Body<'tcx>,
- rustc_dump_user_substs: bool,
+ rustc_dump_user_args: bool,
}
impl<'cx, 'tcx> WritebackCx<'cx, 'tcx> {
fn new(
fcx: &'cx FnCtxt<'cx, 'tcx>,
body: &'tcx hir::Body<'tcx>,
- rustc_dump_user_substs: bool,
+ rustc_dump_user_args: bool,
) -> WritebackCx<'cx, 'tcx> {
let owner = body.id().hir_id.owner;
@@ -117,7 +113,7 @@ impl<'cx, 'tcx> WritebackCx<'cx, 'tcx> {
fcx,
typeck_results: ty::TypeckResults::new(owner),
body,
- rustc_dump_user_substs,
+ rustc_dump_user_args,
};
// HACK: We specifically don't want the (opaque) error from tainting our
@@ -154,7 +150,7 @@ impl<'cx, 'tcx> WritebackCx<'cx, 'tcx> {
if inner_ty.is_scalar() {
self.typeck_results.type_dependent_defs_mut().remove(e.hir_id);
- self.typeck_results.node_substs_mut().remove(e.hir_id);
+ self.typeck_results.node_args_mut().remove(e.hir_id);
}
}
hir::ExprKind::Binary(ref op, lhs, rhs) | hir::ExprKind::AssignOp(ref op, lhs, rhs) => {
@@ -163,7 +159,7 @@ impl<'cx, 'tcx> WritebackCx<'cx, 'tcx> {
if lhs_ty.is_scalar() && rhs_ty.is_scalar() {
self.typeck_results.type_dependent_defs_mut().remove(e.hir_id);
- self.typeck_results.node_substs_mut().remove(e.hir_id);
+ self.typeck_results.node_args_mut().remove(e.hir_id);
match e.kind {
hir::ExprKind::Binary(..) => {
@@ -214,14 +210,14 @@ impl<'cx, 'tcx> WritebackCx<'cx, 'tcx> {
// to use builtin indexing because the index type is known to be
// usize-ish
fn fix_index_builtin_expr(&mut self, e: &hir::Expr<'_>) {
- if let hir::ExprKind::Index(ref base, ref index) = e.kind {
+ if let hir::ExprKind::Index(ref base, ref index, _) = e.kind {
// All valid indexing looks like this; might encounter non-valid indexes at this point.
let base_ty = self.typeck_results.expr_ty_adjusted_opt(base);
if base_ty.is_none() {
// When encountering `return [0][0]` outside of a `fn` body we can encounter a base
// that isn't in the type table. We assume more relevant errors have already been
// emitted, so we delay an ICE if none have. (#64638)
- self.tcx().sess.delay_span_bug(e.span, format!("bad base: `{:?}`", base));
+ self.tcx().sess.delay_span_bug(e.span, format!("bad base: `{base:?}`"));
}
if let Some(base_ty) = base_ty
&& let ty::Ref(_, base_ty_inner, _) = *base_ty.kind()
@@ -235,13 +231,13 @@ impl<'cx, 'tcx> WritebackCx<'cx, 'tcx> {
Ty::new_error_with_message(
self.fcx.tcx,
e.span,
- format!("bad index {:?} for base: `{:?}`", index, base),
+ format!("bad index {index:?} for base: `{base:?}`"),
)
});
if self.is_builtin_index(e, base_ty_inner, index_ty) {
// Remove the method call record
self.typeck_results.type_dependent_defs_mut().remove(e.hir_id);
- self.typeck_results.node_substs_mut().remove(e.hir_id);
+ self.typeck_results.node_args_mut().remove(e.hir_id);
if let Some(a) = self.typeck_results.adjustments_mut().get_mut(base.hir_id) {
// Discard the need for a mutable borrow
@@ -376,66 +372,75 @@ impl<'cx, 'tcx> Visitor<'tcx> for WritebackCx<'cx, 'tcx> {
impl<'cx, 'tcx> WritebackCx<'cx, 'tcx> {
fn eval_closure_size(&mut self) {
- let mut res: FxHashMap<LocalDefId, ClosureSizeProfileData<'tcx>> = Default::default();
- for (&closure_def_id, data) in self.fcx.typeck_results.borrow().closure_size_eval.iter() {
- let closure_hir_id = self.tcx().hir().local_def_id_to_hir_id(closure_def_id);
-
- let data = self.resolve(*data, &closure_hir_id);
-
- res.insert(closure_def_id, data);
- }
-
- self.typeck_results.closure_size_eval = res;
+ self.tcx().with_stable_hashing_context(|ref hcx| {
+ let fcx_typeck_results = self.fcx.typeck_results.borrow();
+
+ self.typeck_results.closure_size_eval = fcx_typeck_results
+ .closure_size_eval
+ .to_sorted(hcx, false)
+ .into_iter()
+ .map(|(&closure_def_id, data)| {
+ let closure_hir_id = self.tcx().hir().local_def_id_to_hir_id(closure_def_id);
+ let data = self.resolve(*data, &closure_hir_id);
+ (closure_def_id, data)
+ })
+ .collect();
+ })
}
- fn visit_min_capture_map(&mut self) {
- let mut min_captures_wb = ty::MinCaptureInformationMap::with_capacity_and_hasher(
- self.fcx.typeck_results.borrow().closure_min_captures.len(),
- Default::default(),
- );
- for (&closure_def_id, root_min_captures) in
- self.fcx.typeck_results.borrow().closure_min_captures.iter()
- {
- let mut root_var_map_wb = ty::RootVariableMinCaptureList::with_capacity_and_hasher(
- root_min_captures.len(),
- Default::default(),
- );
- for (var_hir_id, min_list) in root_min_captures.iter() {
- let min_list_wb = min_list
- .iter()
- .map(|captured_place| {
- let locatable = captured_place.info.path_expr_id.unwrap_or_else(|| {
- self.tcx().hir().local_def_id_to_hir_id(closure_def_id)
- });
-
- self.resolve(captured_place.clone(), &locatable)
- })
- .collect();
- root_var_map_wb.insert(*var_hir_id, min_list_wb);
- }
- min_captures_wb.insert(closure_def_id, root_var_map_wb);
- }
- self.typeck_results.closure_min_captures = min_captures_wb;
+ fn visit_min_capture_map(&mut self) {
+ self.tcx().with_stable_hashing_context(|ref hcx| {
+ let fcx_typeck_results = self.fcx.typeck_results.borrow();
+
+ self.typeck_results.closure_min_captures = fcx_typeck_results
+ .closure_min_captures
+ .to_sorted(hcx, false)
+ .into_iter()
+ .map(|(&closure_def_id, root_min_captures)| {
+ let root_var_map_wb = root_min_captures
+ .iter()
+ .map(|(var_hir_id, min_list)| {
+ let min_list_wb = min_list
+ .iter()
+ .map(|captured_place| {
+ let locatable =
+ captured_place.info.path_expr_id.unwrap_or_else(|| {
+ self.tcx().hir().local_def_id_to_hir_id(closure_def_id)
+ });
+ self.resolve(captured_place.clone(), &locatable)
+ })
+ .collect();
+ (*var_hir_id, min_list_wb)
+ })
+ .collect();
+ (closure_def_id, root_var_map_wb)
+ })
+ .collect();
+ })
}
fn visit_fake_reads_map(&mut self) {
- let mut resolved_closure_fake_reads: FxHashMap<
- LocalDefId,
- Vec<(HirPlace<'tcx>, FakeReadCause, hir::HirId)>,
- > = Default::default();
- for (&closure_def_id, fake_reads) in
- self.fcx.typeck_results.borrow().closure_fake_reads.iter()
- {
- let mut resolved_fake_reads = Vec::<(HirPlace<'tcx>, FakeReadCause, hir::HirId)>::new();
- for (place, cause, hir_id) in fake_reads.iter() {
- let locatable = self.tcx().hir().local_def_id_to_hir_id(closure_def_id);
-
- let resolved_fake_read = self.resolve(place.clone(), &locatable);
- resolved_fake_reads.push((resolved_fake_read, *cause, *hir_id));
- }
- resolved_closure_fake_reads.insert(closure_def_id, resolved_fake_reads);
- }
- self.typeck_results.closure_fake_reads = resolved_closure_fake_reads;
+ self.tcx().with_stable_hashing_context(move |ref hcx| {
+ let fcx_typeck_results = self.fcx.typeck_results.borrow();
+
+ self.typeck_results.closure_fake_reads = fcx_typeck_results
+ .closure_fake_reads
+ .to_sorted(hcx, true)
+ .into_iter()
+ .map(|(&closure_def_id, fake_reads)| {
+ let resolved_fake_reads = fake_reads
+ .iter()
+ .map(|(place, cause, hir_id)| {
+ let locatable = self.tcx().hir().local_def_id_to_hir_id(closure_def_id);
+ let resolved_fake_read = self.resolve(place.clone(), &locatable);
+ (resolved_fake_read, *cause, *hir_id)
+ })
+ .collect();
+
+ (closure_def_id, resolved_fake_reads)
+ })
+ .collect();
+ });
}
fn visit_closures(&mut self) {
@@ -470,7 +475,7 @@ impl<'cx, 'tcx> WritebackCx<'cx, 'tcx> {
assert_eq!(fcx_typeck_results.hir_owner, self.typeck_results.hir_owner);
let common_hir_owner = fcx_typeck_results.hir_owner;
- if self.rustc_dump_user_substs {
+ if self.rustc_dump_user_args {
let sorted_user_provided_types =
fcx_typeck_results.user_provided_types().items_in_stable_order();
@@ -478,15 +483,13 @@ impl<'cx, 'tcx> WritebackCx<'cx, 'tcx> {
for (local_id, c_ty) in sorted_user_provided_types {
let hir_id = hir::HirId { owner: common_hir_owner, local_id };
- if let ty::UserType::TypeOf(_, user_substs) = c_ty.value {
+ if let ty::UserType::TypeOf(_, user_args) = c_ty.value {
// This is a unit-testing mechanism.
let span = self.tcx().hir().span(hir_id);
// We need to buffer the errors in order to guarantee a consistent
// order when emitting them.
- let err = self
- .tcx()
- .sess
- .struct_span_err(span, format!("user substs: {:?}", user_substs));
+ let err =
+ self.tcx().sess.struct_span_err(span, format!("user args: {user_args:?}"));
err.buffer(&mut errors_buffer);
}
}
@@ -520,7 +523,7 @@ impl<'cx, 'tcx> WritebackCx<'cx, 'tcx> {
let fcx_typeck_results = self.fcx.typeck_results.borrow();
assert_eq!(fcx_typeck_results.hir_owner, self.typeck_results.hir_owner);
- self.typeck_results.user_provided_sigs.extend(
+ self.typeck_results.user_provided_sigs.extend_unord(
fcx_typeck_results.user_provided_sigs.items().map(|(&def_id, c_sig)| {
if cfg!(debug_assertions) && c_sig.has_infer() {
span_bug!(
@@ -540,10 +543,15 @@ impl<'cx, 'tcx> WritebackCx<'cx, 'tcx> {
assert_eq!(fcx_typeck_results.hir_owner, self.typeck_results.hir_owner);
self.typeck_results.generator_interior_types =
fcx_typeck_results.generator_interior_types.clone();
- for (&expr_def_id, predicates) in fcx_typeck_results.generator_interior_predicates.iter() {
- let predicates = self.resolve(predicates.clone(), &self.fcx.tcx.def_span(expr_def_id));
- self.typeck_results.generator_interior_predicates.insert(expr_def_id, predicates);
- }
+ self.tcx().with_stable_hashing_context(move |ref hcx| {
+ for (&expr_def_id, predicates) in
+ fcx_typeck_results.generator_interior_predicates.to_sorted(hcx, false).into_iter()
+ {
+ let predicates =
+ self.resolve(predicates.clone(), &self.fcx.tcx.def_span(expr_def_id));
+ self.typeck_results.generator_interior_predicates.insert(expr_def_id, predicates);
+ }
+ })
}
#[instrument(skip(self), level = "debug")]
@@ -553,23 +561,9 @@ impl<'cx, 'tcx> WritebackCx<'cx, 'tcx> {
let hidden_type = self.resolve(decl.hidden_type, &decl.hidden_type.span);
let opaque_type_key = self.resolve(opaque_type_key, &decl.hidden_type.span);
- struct RecursionChecker {
- def_id: LocalDefId,
- }
- impl<'tcx> ty::TypeVisitor<TyCtxt<'tcx>> for RecursionChecker {
- type BreakTy = ();
- fn visit_ty(&mut self, t: Ty<'tcx>) -> ControlFlow<Self::BreakTy> {
- if let ty::Alias(ty::Opaque, ty::AliasTy { def_id, .. }) = *t.kind() {
- if def_id == self.def_id.to_def_id() {
- return ControlFlow::Break(());
- }
- }
- t.super_visit_with(self)
- }
- }
- if hidden_type
- .visit_with(&mut RecursionChecker { def_id: opaque_type_key.def_id })
- .is_break()
+ if let ty::Alias(ty::Opaque, alias_ty) = hidden_type.ty.kind()
+ && alias_ty.def_id == opaque_type_key.def_id.to_def_id()
+ && alias_ty.args == opaque_type_key.args
{
continue;
}
@@ -619,11 +613,11 @@ impl<'cx, 'tcx> WritebackCx<'cx, 'tcx> {
debug!(?n_ty);
// Resolve any substitutions
- if let Some(substs) = self.fcx.typeck_results.borrow().node_substs_opt(hir_id) {
- let substs = self.resolve(substs, &span);
- debug!("write_substs_to_tcx({:?}, {:?})", hir_id, substs);
- assert!(!substs.has_infer() && !substs.has_placeholders());
- self.typeck_results.node_substs_mut().insert(hir_id, substs);
+ if let Some(args) = self.fcx.typeck_results.borrow().node_args_opt(hir_id) {
+ let args = self.resolve(args, &span);
+ debug!("write_args_to_tcx({:?}, {:?})", hir_id, args);
+ assert!(!args.has_infer() && !args.has_placeholders());
+ self.typeck_results.node_args_mut().insert(hir_id, args);
}
}
diff --git a/compiler/rustc_incremental/src/assert_dep_graph.rs b/compiler/rustc_incremental/src/assert_dep_graph.rs
index 52a84b204..5e7ae3ecd 100644
--- a/compiler/rustc_incremental/src/assert_dep_graph.rs
+++ b/compiler/rustc_incremental/src/assert_dep_graph.rs
@@ -241,16 +241,16 @@ fn dump_graph(query: &DepGraphQuery) {
{
// dump a .txt file with just the edges:
- let txt_path = format!("{}.txt", path);
+ let txt_path = format!("{path}.txt");
let mut file = BufWriter::new(File::create(&txt_path).unwrap());
for (source, target) in &edges {
- write!(file, "{:?} -> {:?}\n", source, target).unwrap();
+ write!(file, "{source:?} -> {target:?}\n").unwrap();
}
}
{
// dump a .dot file in graphviz format:
- let dot_path = format!("{}.dot", path);
+ let dot_path = format!("{path}.dot");
let mut v = Vec::new();
dot::render(&GraphvizDepGraph(nodes, edges), &mut v).unwrap();
fs::write(dot_path, v).unwrap();
@@ -285,7 +285,7 @@ impl<'a> dot::Labeller<'a> for GraphvizDepGraph {
dot::Id::new("DependencyGraph").unwrap()
}
fn node_id(&self, n: &DepKind) -> dot::Id<'_> {
- let s: String = format!("{:?}", n)
+ let s: String = format!("{n:?}")
.chars()
.map(|c| if c == '_' || c.is_alphanumeric() { c } else { '_' })
.collect();
@@ -293,7 +293,7 @@ impl<'a> dot::Labeller<'a> for GraphvizDepGraph {
dot::Id::new(s).unwrap()
}
fn node_label(&self, n: &DepKind) -> dot::LabelText<'_> {
- dot::LabelText::label(format!("{:?}", n))
+ dot::LabelText::label(format!("{n:?}"))
}
}
diff --git a/compiler/rustc_incremental/src/assert_module_sources.rs b/compiler/rustc_incremental/src/assert_module_sources.rs
index 0111a6d30..8e22ab408 100644
--- a/compiler/rustc_incremental/src/assert_module_sources.rs
+++ b/compiler/rustc_incremental/src/assert_module_sources.rs
@@ -6,6 +6,7 @@
//!
//! ```
//! # #![feature(rustc_attrs)]
+//! # #![allow(internal_features)]
//! #![rustc_partition_reused(module="spike", cfg="rpass2")]
//! #![rustc_partition_codegened(module="spike-x", cfg="rpass2")]
//! ```
diff --git a/compiler/rustc_incremental/src/persist/dirty_clean.rs b/compiler/rustc_incremental/src/persist/dirty_clean.rs
index f9cd01fd8..5dd06c6ca 100644
--- a/compiler/rustc_incremental/src/persist/dirty_clean.rs
+++ b/compiler/rustc_incremental/src/persist/dirty_clean.rs
@@ -300,7 +300,7 @@ impl<'tcx> DirtyCleanVisitor<'tcx> {
},
_ => self.tcx.sess.emit_fatal(errors::UndefinedCleanDirty {
span: attr.span,
- kind: format!("{:?}", node),
+ kind: format!("{node:?}"),
}),
};
let labels =
diff --git a/compiler/rustc_incremental/src/persist/fs.rs b/compiler/rustc_incremental/src/persist/fs.rs
index 7708deece..db8ea2bfe 100644
--- a/compiler/rustc_incremental/src/persist/fs.rs
+++ b/compiler/rustc_incremental/src/persist/fs.rs
@@ -427,13 +427,11 @@ fn copy_files(sess: &Session, target_dir: &Path, source_dir: &Path) -> Result<bo
if sess.opts.unstable_opts.incremental_info {
eprintln!(
"[incremental] session directory: \
- {} files hard-linked",
- files_linked
+ {files_linked} files hard-linked"
);
eprintln!(
"[incremental] session directory: \
- {} files copied",
- files_copied
+ {files_copied} files copied"
);
}
@@ -540,9 +538,13 @@ where
continue;
}
- let timestamp = extract_timestamp_from_session_dir(&directory_name).unwrap_or_else(|_| {
- bug!("unexpected incr-comp session dir: {}", session_dir.display())
- });
+ let timestamp = match extract_timestamp_from_session_dir(&directory_name) {
+ Ok(timestamp) => timestamp,
+ Err(e) => {
+ debug!("unexpected incr-comp session dir: {}: {}", session_dir.display(), e);
+ continue;
+ }
+ };
if timestamp > best_candidate.0 {
best_candidate = (timestamp, Some(session_dir.clone()));
@@ -564,14 +566,14 @@ fn is_session_directory_lock_file(file_name: &str) -> bool {
file_name.starts_with("s-") && file_name.ends_with(LOCK_FILE_EXT)
}
-fn extract_timestamp_from_session_dir(directory_name: &str) -> Result<SystemTime, ()> {
+fn extract_timestamp_from_session_dir(directory_name: &str) -> Result<SystemTime, &'static str> {
if !is_session_directory(directory_name) {
- return Err(());
+ return Err("not a directory");
}
let dash_indices: Vec<_> = directory_name.match_indices('-').map(|(idx, _)| idx).collect();
if dash_indices.len() != 3 {
- return Err(());
+ return Err("not three dashes in name");
}
string_to_timestamp(&directory_name[dash_indices[0] + 1..dash_indices[1]])
@@ -583,11 +585,11 @@ fn timestamp_to_string(timestamp: SystemTime) -> String {
base_n::encode(micros as u128, INT_ENCODE_BASE)
}
-fn string_to_timestamp(s: &str) -> Result<SystemTime, ()> {
+fn string_to_timestamp(s: &str) -> Result<SystemTime, &'static str> {
let micros_since_unix_epoch = u64::from_str_radix(s, INT_ENCODE_BASE as u32);
if micros_since_unix_epoch.is_err() {
- return Err(());
+ return Err("timestamp not an int");
}
let micros_since_unix_epoch = micros_since_unix_epoch.unwrap();
@@ -604,7 +606,7 @@ fn crate_path(sess: &Session, crate_name: Symbol, stable_crate_id: StableCrateId
let stable_crate_id = base_n::encode(stable_crate_id.as_u64() as u128, INT_ENCODE_BASE);
- let crate_name = format!("{}-{}", crate_name, stable_crate_id);
+ let crate_name = format!("{crate_name}-{stable_crate_id}");
incr_dir.join(crate_name)
}
@@ -730,13 +732,13 @@ pub fn garbage_collect_session_directories(sess: &Session) -> io::Result<()> {
debug!("garbage_collect_session_directories() - inspecting: {}", directory_name);
let Ok(timestamp) = extract_timestamp_from_session_dir(directory_name) else {
- debug!(
- "found session-dir with malformed timestamp: {}",
- crate_directory.join(directory_name).display()
- );
- // Ignore it
- return None;
- };
+ debug!(
+ "found session-dir with malformed timestamp: {}",
+ crate_directory.join(directory_name).display()
+ );
+ // Ignore it
+ return None;
+ };
if is_finalized(directory_name) {
let lock_file_path = crate_directory.join(lock_file_name);
diff --git a/compiler/rustc_incremental/src/persist/load.rs b/compiler/rustc_incremental/src/persist/load.rs
index bb479b5bd..8d67f6925 100644
--- a/compiler/rustc_incremental/src/persist/load.rs
+++ b/compiler/rustc_incremental/src/persist/load.rs
@@ -3,7 +3,7 @@
use crate::errors;
use rustc_data_structures::memmap::Mmap;
use rustc_data_structures::unord::UnordMap;
-use rustc_middle::dep_graph::{SerializedDepGraph, WorkProduct, WorkProductId};
+use rustc_middle::dep_graph::{SerializedDepGraph, WorkProductMap};
use rustc_middle::query::on_disk_cache::OnDiskCache;
use rustc_serialize::opaque::MemDecoder;
use rustc_serialize::Decodable;
@@ -16,8 +16,6 @@ use super::file_format;
use super::fs::*;
use super::work_product;
-type WorkProductMap = UnordMap<WorkProductId, WorkProduct>;
-
#[derive(Debug)]
/// Represents the result of an attempt to load incremental compilation data.
pub enum LoadResult<T> {
diff --git a/compiler/rustc_incremental/src/persist/save.rs b/compiler/rustc_incremental/src/persist/save.rs
index bfaa52f9c..0cfaf5837 100644
--- a/compiler/rustc_incremental/src/persist/save.rs
+++ b/compiler/rustc_incremental/src/persist/save.rs
@@ -1,7 +1,9 @@
use crate::errors;
use rustc_data_structures::fx::FxIndexMap;
use rustc_data_structures::sync::join;
-use rustc_middle::dep_graph::{DepGraph, SerializedDepGraph, WorkProduct, WorkProductId};
+use rustc_middle::dep_graph::{
+ DepGraph, SerializedDepGraph, WorkProduct, WorkProductId, WorkProductMap,
+};
use rustc_middle::ty::TyCtxt;
use rustc_serialize::opaque::{FileEncodeResult, FileEncoder};
use rustc_serialize::Encodable as RustcEncodable;
@@ -101,7 +103,7 @@ pub fn save_work_product_index(
// deleted during invalidation. Some object files don't change their
// content, they are just not needed anymore.
let previous_work_products = dep_graph.previous_work_products();
- for (id, wp) in previous_work_products.iter() {
+ for (id, wp) in previous_work_products.to_sorted_stable_ord().iter() {
if !new_work_products.contains_key(id) {
work_product::delete_workproduct_files(sess, wp);
debug_assert!(
@@ -146,7 +148,7 @@ fn encode_query_cache(tcx: TyCtxt<'_>, encoder: FileEncoder) -> FileEncodeResult
pub fn build_dep_graph(
sess: &Session,
prev_graph: SerializedDepGraph,
- prev_work_products: FxIndexMap<WorkProductId, WorkProduct>,
+ prev_work_products: WorkProductMap,
) -> Option<DepGraph> {
if sess.opts.incremental.is_none() {
// No incremental compilation.
diff --git a/compiler/rustc_index/src/bit_set.rs b/compiler/rustc_index/src/bit_set.rs
index 15bc3b4e3..12a7ecf81 100644
--- a/compiler/rustc_index/src/bit_set.rs
+++ b/compiler/rustc_index/src/bit_set.rs
@@ -804,9 +804,7 @@ impl<'a, T: Idx> Iterator for ChunkedBitIter<'a, T> {
// advance the iterator to the start of the next chunk, before proceeding in chunk sized
// steps.
while self.index % CHUNK_BITS != 0 {
- let Some(item) = self.next() else {
- return init
- };
+ let Some(item) = self.next() else { return init };
init = f(init, item);
}
let start_chunk = self.index / CHUNK_BITS;
diff --git a/compiler/rustc_index/src/lib.rs b/compiler/rustc_index/src/lib.rs
index 6fd9f34b2..9942c70c4 100644
--- a/compiler/rustc_index/src/lib.rs
+++ b/compiler/rustc_index/src/lib.rs
@@ -12,6 +12,7 @@
test
)
)]
+#![cfg_attr(all(not(bootstrap), feature = "nightly"), allow(internal_features))]
#[cfg(feature = "nightly")]
pub mod bit_set;
diff --git a/compiler/rustc_infer/src/errors/mod.rs b/compiler/rustc_infer/src/errors/mod.rs
index 7e1fa08f2..a7e045e1e 100644
--- a/compiler/rustc_infer/src/errors/mod.rs
+++ b/compiler/rustc_infer/src/errors/mod.rs
@@ -210,10 +210,8 @@ impl<'a> SourceKindMultiSuggestion<'a> {
_ => ("", ""),
};
let (start_span, start_span_code, end_span) = match should_wrap_expr {
- Some(end_span) => {
- (data.span(), format!("{}{}{}{{ ", arrow, ty_info, post), Some(end_span))
- }
- None => (data.span(), format!("{}{}{}", arrow, ty_info, post), None),
+ Some(end_span) => (data.span(), format!("{arrow}{ty_info}{post}{{ "), Some(end_span)),
+ None => (data.span(), format!("{arrow}{ty_info}{post}"), None),
};
Self::ClosureReturn { start_span, start_span_code, end_span }
}
@@ -363,7 +361,8 @@ impl AddToDiagnostic for AddLifetimeParamsSuggestion<'_> {
let (
hir::Ty { kind: hir::TyKind::Ref(lifetime_sub, _), .. },
hir::Ty { kind: hir::TyKind::Ref(lifetime_sup, _), .. },
- ) = (self.ty_sub, self.ty_sup) else {
+ ) = (self.ty_sub, self.ty_sup)
+ else {
return false;
};
@@ -403,9 +402,9 @@ impl AddToDiagnostic for AddLifetimeParamsSuggestion<'_> {
debug!(?lifetime_sub.ident.span);
let make_suggestion = |ident: Ident| {
let sugg = if ident.name == kw::Empty {
- format!("{}, ", suggestion_param_name)
+ format!("{suggestion_param_name}, ")
} else if ident.name == kw::UnderscoreLifetime && ident.span.is_empty() {
- format!("{} ", suggestion_param_name)
+ format!("{suggestion_param_name} ")
} else {
suggestion_param_name.clone()
};
@@ -418,9 +417,9 @@ impl AddToDiagnostic for AddLifetimeParamsSuggestion<'_> {
let new_param_suggestion = if let Some(first) =
generics.params.iter().find(|p| !p.name.ident().span.is_empty())
{
- (first.span.shrink_to_lo(), format!("{}, ", suggestion_param_name))
+ (first.span.shrink_to_lo(), format!("{suggestion_param_name}, "))
} else {
- (generics.span, format!("<{}>", suggestion_param_name))
+ (generics.span, format!("<{suggestion_param_name}>"))
};
suggestions.push(new_param_suggestion);
@@ -1319,7 +1318,7 @@ impl AddToDiagnostic for SuggestTuplePatternMany {
message,
self.compatible_variants.into_iter().map(|variant| {
vec![
- (self.cause_span.shrink_to_lo(), format!("{}(", variant)),
+ (self.cause_span.shrink_to_lo(), format!("{variant}(")),
(self.cause_span.shrink_to_hi(), ")".to_string()),
]
}),
diff --git a/compiler/rustc_infer/src/errors/note_and_explain.rs b/compiler/rustc_infer/src/errors/note_and_explain.rs
index 7328241df..bd168f047 100644
--- a/compiler/rustc_infer/src/errors/note_and_explain.rs
+++ b/compiler/rustc_infer/src/errors/note_and_explain.rs
@@ -80,7 +80,7 @@ impl<'a> DescriptionCtx<'a> {
// We shouldn't really be having unification failures with ReVar
// and ReLateBound though.
ty::ReVar(_) | ty::ReLateBound(..) | ty::ReErased => {
- (alt_span, "revar", format!("{:?}", region))
+ (alt_span, "revar", format!("{region:?}"))
}
};
Some(DescriptionCtx { span, kind, arg })
diff --git a/compiler/rustc_infer/src/infer/at.rs b/compiler/rustc_infer/src/infer/at.rs
index 433735e82..6d5db3336 100644
--- a/compiler/rustc_infer/src/infer/at.rs
+++ b/compiler/rustc_infer/src/infer/at.rs
@@ -481,3 +481,31 @@ impl<'tcx> ToTrace<'tcx> for ty::FnSig<'tcx> {
TypeTrace { cause: cause.clone(), values: Sigs(ExpectedFound::new(a_is_expected, a, b)) }
}
}
+
+impl<'tcx> ToTrace<'tcx> for ty::PolyExistentialTraitRef<'tcx> {
+ fn to_trace(
+ cause: &ObligationCause<'tcx>,
+ a_is_expected: bool,
+ a: Self,
+ b: Self,
+ ) -> TypeTrace<'tcx> {
+ TypeTrace {
+ cause: cause.clone(),
+ values: ExistentialTraitRef(ExpectedFound::new(a_is_expected, a, b)),
+ }
+ }
+}
+
+impl<'tcx> ToTrace<'tcx> for ty::PolyExistentialProjection<'tcx> {
+ fn to_trace(
+ cause: &ObligationCause<'tcx>,
+ a_is_expected: bool,
+ a: Self,
+ b: Self,
+ ) -> TypeTrace<'tcx> {
+ TypeTrace {
+ cause: cause.clone(),
+ values: ExistentialProjection(ExpectedFound::new(a_is_expected, a, b)),
+ }
+ }
+}
diff --git a/compiler/rustc_infer/src/infer/canonical/canonicalizer.rs b/compiler/rustc_infer/src/infer/canonical/canonicalizer.rs
index e57532e2d..9d7a9fefd 100644
--- a/compiler/rustc_infer/src/infer/canonical/canonicalizer.rs
+++ b/compiler/rustc_infer/src/infer/canonical/canonicalizer.rs
@@ -11,7 +11,7 @@ use crate::infer::canonical::{
use crate::infer::InferCtxt;
use rustc_middle::ty::flags::FlagComputation;
use rustc_middle::ty::fold::{TypeFoldable, TypeFolder, TypeSuperFoldable};
-use rustc_middle::ty::subst::GenericArg;
+use rustc_middle::ty::GenericArg;
use rustc_middle::ty::{self, BoundVar, InferConst, List, Ty, TyCtxt, TypeFlags, TypeVisitableExt};
use std::sync::atomic::Ordering;
@@ -205,7 +205,7 @@ impl CanonicalizeMode for CanonicalizeQueryResponse {
// `delay_span_bug` to allow type error over an ICE.
canonicalizer.tcx.sess.delay_span_bug(
rustc_span::DUMMY_SP,
- format!("unexpected region in query response: `{:?}`", r),
+ format!("unexpected region in query response: `{r:?}`"),
);
r
}
@@ -562,15 +562,9 @@ impl<'cx, 'tcx> Canonicalizer<'cx, 'tcx> {
V: TypeFoldable<TyCtxt<'tcx>>,
{
let needs_canonical_flags = if canonicalize_region_mode.any() {
- TypeFlags::HAS_INFER |
- TypeFlags::HAS_FREE_REGIONS | // `HAS_RE_PLACEHOLDER` implies `HAS_FREE_REGIONS`
- TypeFlags::HAS_TY_PLACEHOLDER |
- TypeFlags::HAS_CT_PLACEHOLDER
+ TypeFlags::HAS_INFER | TypeFlags::HAS_PLACEHOLDER | TypeFlags::HAS_FREE_REGIONS
} else {
- TypeFlags::HAS_INFER
- | TypeFlags::HAS_RE_PLACEHOLDER
- | TypeFlags::HAS_TY_PLACEHOLDER
- | TypeFlags::HAS_CT_PLACEHOLDER
+ TypeFlags::HAS_INFER | TypeFlags::HAS_PLACEHOLDER
};
// Fast path: nothing that needs to be canonicalized.
diff --git a/compiler/rustc_infer/src/infer/canonical/mod.rs b/compiler/rustc_infer/src/infer/canonical/mod.rs
index f765c41a3..8ca2e4030 100644
--- a/compiler/rustc_infer/src/infer/canonical/mod.rs
+++ b/compiler/rustc_infer/src/infer/canonical/mod.rs
@@ -25,7 +25,7 @@ use crate::infer::{ConstVariableOrigin, ConstVariableOriginKind};
use crate::infer::{InferCtxt, RegionVariableOrigin, TypeVariableOrigin, TypeVariableOriginKind};
use rustc_index::IndexVec;
use rustc_middle::ty::fold::TypeFoldable;
-use rustc_middle::ty::subst::GenericArg;
+use rustc_middle::ty::GenericArg;
use rustc_middle::ty::{self, List, Ty, TyCtxt};
use rustc_span::source_map::Span;
@@ -88,7 +88,7 @@ impl<'tcx> InferCtxt<'tcx> {
universe_map: impl Fn(ty::UniverseIndex) -> ty::UniverseIndex,
) -> CanonicalVarValues<'tcx> {
CanonicalVarValues {
- var_values: self.tcx.mk_substs_from_iter(
+ var_values: self.tcx.mk_args_from_iter(
variables
.iter()
.map(|info| self.instantiate_canonical_var(span, info, &universe_map)),
diff --git a/compiler/rustc_infer/src/infer/canonical/query_response.rs b/compiler/rustc_infer/src/infer/canonical/query_response.rs
index 9c3ab04de..ed1010821 100644
--- a/compiler/rustc_infer/src/infer/canonical/query_response.rs
+++ b/compiler/rustc_infer/src/infer/canonical/query_response.rs
@@ -25,8 +25,8 @@ use rustc_middle::arena::ArenaAllocatable;
use rustc_middle::mir::ConstraintCategory;
use rustc_middle::ty::fold::TypeFoldable;
use rustc_middle::ty::relate::TypeRelation;
-use rustc_middle::ty::subst::{GenericArg, GenericArgKind};
use rustc_middle::ty::{self, BoundVar, ToPredicate, Ty, TyCtxt};
+use rustc_middle::ty::{GenericArg, GenericArgKind};
use rustc_span::{Span, Symbol};
use std::fmt::Debug;
use std::iter;
@@ -484,7 +484,7 @@ impl<'tcx> InferCtxt<'tcx> {
// given variable in the loop above, use that. Otherwise, use
// a fresh inference variable.
let result_subst = CanonicalVarValues {
- var_values: self.tcx.mk_substs_from_iter(
+ var_values: self.tcx.mk_args_from_iter(
query_response.variables.iter().enumerate().map(|(index, info)| {
if info.is_existential() {
match opt_values[BoundVar::new(index)] {
@@ -520,7 +520,7 @@ impl<'tcx> InferCtxt<'tcx> {
self.at(cause, param_env)
.eq(
DefineOpaqueTypes::Yes,
- Ty::new_opaque(self.tcx, a.def_id.to_def_id(), a.substs),
+ Ty::new_opaque(self.tcx, a.def_id.to_def_id(), a.args),
b,
)?
.obligations,
diff --git a/compiler/rustc_infer/src/infer/canonical/substitute.rs b/compiler/rustc_infer/src/infer/canonical/substitute.rs
index cac3b4072..f368b30fb 100644
--- a/compiler/rustc_infer/src/infer/canonical/substitute.rs
+++ b/compiler/rustc_infer/src/infer/canonical/substitute.rs
@@ -8,7 +8,7 @@
use crate::infer::canonical::{Canonical, CanonicalVarValues};
use rustc_middle::ty::fold::{FnMutDelegate, TypeFoldable};
-use rustc_middle::ty::subst::GenericArgKind;
+use rustc_middle::ty::GenericArgKind;
use rustc_middle::ty::{self, TyCtxt};
/// FIXME(-Ztrait-solver=next): This or public because it is shared with the
diff --git a/compiler/rustc_infer/src/infer/combine.rs b/compiler/rustc_infer/src/infer/combine.rs
index a9cdb8c51..ddc8e7e50 100644
--- a/compiler/rustc_infer/src/infer/combine.rs
+++ b/compiler/rustc_infer/src/infer/combine.rs
@@ -177,7 +177,7 @@ impl<'tcx> InferCtxt<'tcx> {
self.tcx.check_tys_might_be_eq(canonical).map_err(|_| {
self.tcx.sess.delay_span_bug(
DUMMY_SP,
- format!("cannot relate consts of different types (a={:?}, b={:?})", a, b,),
+ format!("cannot relate consts of different types (a={a:?}, b={b:?})",),
)
})
});
@@ -254,7 +254,7 @@ impl<'tcx> InferCtxt<'tcx> {
/// in `ct` with `ct` itself.
///
/// This is especially important as unevaluated consts use their parents generics.
- /// They therefore often contain unused substs, making these errors far more likely.
+ /// They therefore often contain unused args, making these errors far more likely.
///
/// A good example of this is the following:
///
@@ -272,12 +272,12 @@ impl<'tcx> InferCtxt<'tcx> {
/// ```
///
/// Here `3 + 4` ends up as `ConstKind::Unevaluated` which uses the generics
- /// of `fn bind` (meaning that its substs contain `N`).
+ /// of `fn bind` (meaning that its args contain `N`).
///
/// `bind(arr)` now infers that the type of `arr` must be `[u8; N]`.
/// The assignment `arr = bind(arr)` now tries to equate `N` with `3 + 4`.
///
- /// As `3 + 4` contains `N` in its substs, this must not succeed.
+ /// As `3 + 4` contains `N` in its args, this must not succeed.
///
/// See `tests/ui/const-generics/occurs-check/` for more examples where this is relevant.
#[instrument(level = "debug", skip(self))]
diff --git a/compiler/rustc_infer/src/infer/equate.rs b/compiler/rustc_infer/src/infer/equate.rs
index 495c250a7..1dbab48fd 100644
--- a/compiler/rustc_infer/src/infer/equate.rs
+++ b/compiler/rustc_infer/src/infer/equate.rs
@@ -5,7 +5,7 @@ use super::combine::{CombineFields, ObligationEmittingRelation};
use super::Subtype;
use rustc_middle::ty::relate::{self, Relate, RelateResult, TypeRelation};
-use rustc_middle::ty::subst::SubstsRef;
+use rustc_middle::ty::GenericArgsRef;
use rustc_middle::ty::TyVar;
use rustc_middle::ty::{self, Ty, TyCtxt, TypeVisitableExt};
@@ -43,12 +43,12 @@ impl<'tcx> TypeRelation<'tcx> for Equate<'_, '_, 'tcx> {
self.a_is_expected
}
- fn relate_item_substs(
+ fn relate_item_args(
&mut self,
_item_def_id: DefId,
- a_subst: SubstsRef<'tcx>,
- b_subst: SubstsRef<'tcx>,
- ) -> RelateResult<'tcx, SubstsRef<'tcx>> {
+ a_arg: GenericArgsRef<'tcx>,
+ b_arg: GenericArgsRef<'tcx>,
+ ) -> RelateResult<'tcx, GenericArgsRef<'tcx>> {
// N.B., once we are equating types, we don't care about
// variance, so don't try to lookup the variance here. This
// also avoids some cycles (e.g., #41849) since looking up
@@ -56,7 +56,7 @@ impl<'tcx> TypeRelation<'tcx> for Equate<'_, '_, 'tcx> {
// performing trait matching (which then performs equality
// unification).
- relate::relate_substs(self, a_subst, b_subst)
+ relate::relate_args(self, a_arg, b_arg)
}
fn relate_with_variance<T: Relate<'tcx>>(
diff --git a/compiler/rustc_infer/src/infer/error_reporting/mod.rs b/compiler/rustc_infer/src/infer/error_reporting/mod.rs
index b826ced04..ac5468f3d 100644
--- a/compiler/rustc_infer/src/infer/error_reporting/mod.rs
+++ b/compiler/rustc_infer/src/infer/error_reporting/mod.rs
@@ -238,7 +238,7 @@ fn msg_span_from_named_region<'tcx>(
let text = if name == kw::UnderscoreLifetime {
"the anonymous lifetime as defined here".to_string()
} else {
- format!("the lifetime `{}` as defined here", name)
+ format!("the lifetime `{name}` as defined here")
};
(text, Some(span))
}
@@ -250,7 +250,7 @@ fn msg_span_from_named_region<'tcx>(
})
),
_ => (
- format!("the lifetime `{}` as defined here", region),
+ format!("the lifetime `{region}` as defined here"),
Some(tcx.def_span(scope)),
),
}
@@ -264,11 +264,11 @@ fn msg_span_from_named_region<'tcx>(
ty::RePlaceholder(ty::PlaceholderRegion {
bound: ty::BoundRegion { kind: ty::BoundRegionKind::BrAnon(Some(span)), .. },
..
- }) => (format!("the anonymous lifetime defined here"), Some(span)),
+ }) => ("the anonymous lifetime defined here".to_owned(), Some(span)),
ty::RePlaceholder(ty::PlaceholderRegion {
bound: ty::BoundRegion { kind: ty::BoundRegionKind::BrAnon(None), .. },
..
- }) => (format!("an anonymous lifetime"), None),
+ }) => ("an anonymous lifetime".to_owned(), None),
_ => bug!("{:?}", region),
}
}
@@ -280,7 +280,7 @@ fn emit_msg_span(
span: Option<Span>,
suffix: &str,
) {
- let message = format!("{}{}{}", prefix, description, suffix);
+ let message = format!("{prefix}{description}{suffix}");
if let Some(span) = span {
err.span_note(span, message);
@@ -296,7 +296,7 @@ fn label_msg_span(
span: Option<Span>,
suffix: &str,
) {
- let message = format!("{}{}{}", prefix, description, suffix);
+ let message = format!("{prefix}{description}{suffix}");
if let Some(span) = span {
err.span_label(span, message);
@@ -315,7 +315,7 @@ pub fn unexpected_hidden_region_diagnostic<'tcx>(
) -> DiagnosticBuilder<'tcx, ErrorGuaranteed> {
let mut err = tcx.sess.create_err(errors::OpaqueCapturesLifetime {
span,
- opaque_ty: Ty::new_opaque(tcx, opaque_ty_key.def_id.to_def_id(), opaque_ty_key.substs),
+ opaque_ty: Ty::new_opaque(tcx, opaque_ty_key.def_id.to_def_id(), opaque_ty_key.args),
opaque_ty_span: tcx.def_span(opaque_ty_key.def_id),
});
@@ -333,7 +333,7 @@ pub fn unexpected_hidden_region_diagnostic<'tcx>(
explain_free_region(
tcx,
&mut err,
- &format!("hidden type `{}` captures ", hidden_ty),
+ &format!("hidden type `{hidden_ty}` captures "),
hidden_region,
"",
);
@@ -345,12 +345,21 @@ pub fn unexpected_hidden_region_diagnostic<'tcx>(
fn_returns,
hidden_region.to_string(),
None,
- format!("captures `{}`", hidden_region),
+ format!("captures `{hidden_region}`"),
None,
Some(reg_info.def_id),
)
}
}
+ ty::RePlaceholder(_) => {
+ explain_free_region(
+ tcx,
+ &mut err,
+ &format!("hidden type `{}` captures ", hidden_ty),
+ hidden_region,
+ "",
+ );
+ }
ty::ReError(_) => {
err.delay_as_bug();
}
@@ -373,7 +382,7 @@ pub fn unexpected_hidden_region_diagnostic<'tcx>(
note_and_explain_region(
tcx,
&mut err,
- &format!("hidden type `{}` captures ", hidden_ty),
+ &format!("hidden type `{hidden_ty}` captures "),
hidden_region,
"",
None,
@@ -386,16 +395,16 @@ pub fn unexpected_hidden_region_diagnostic<'tcx>(
impl<'tcx> InferCtxt<'tcx> {
pub fn get_impl_future_output_ty(&self, ty: Ty<'tcx>) -> Option<Ty<'tcx>> {
- let (def_id, substs) = match *ty.kind() {
- ty::Alias(_, ty::AliasTy { def_id, substs, .. })
+ let (def_id, args) = match *ty.kind() {
+ ty::Alias(_, ty::AliasTy { def_id, args, .. })
if matches!(self.tcx.def_kind(def_id), DefKind::OpaqueTy) =>
{
- (def_id, substs)
+ (def_id, args)
}
- ty::Alias(_, ty::AliasTy { def_id, substs, .. })
+ ty::Alias(_, ty::AliasTy { def_id, args, .. })
if self.tcx.is_impl_trait_in_trait(def_id) =>
{
- (def_id, substs)
+ (def_id, args)
}
_ => return None,
};
@@ -403,7 +412,7 @@ impl<'tcx> InferCtxt<'tcx> {
let future_trait = self.tcx.require_lang_item(LangItem::Future, None);
let item_def_id = self.tcx.associated_item_def_ids(future_trait)[0];
- self.tcx.explicit_item_bounds(def_id).subst_iter_copied(self.tcx, substs).find_map(
+ self.tcx.explicit_item_bounds(def_id).iter_instantiated_copied(self.tcx, args).find_map(
|(predicate, _)| {
predicate
.kind()
@@ -573,7 +582,7 @@ impl<'tcx> TypeErrCtxt<'_, 'tcx> {
use hir::def_id::CrateNum;
use rustc_hir::definitions::DisambiguatedDefPathData;
use ty::print::Printer;
- use ty::subst::GenericArg;
+ use ty::GenericArg;
struct AbsolutePathPrinter<'tcx> {
tcx: TyCtxt<'tcx>,
@@ -711,12 +720,12 @@ impl<'tcx> TypeErrCtxt<'_, 'tcx> {
{
// don't show type `_`
if span.desugaring_kind() == Some(DesugaringKind::ForLoop)
- && let ty::Adt(def, substs) = ty.kind()
+ && let ty::Adt(def, args) = ty.kind()
&& Some(def.did()) == self.tcx.get_diagnostic_item(sym::Option)
{
- err.span_label(span, format!("this is an iterator with items of type `{}`", substs.type_at(0)));
+ err.span_label(span, format!("this is an iterator with items of type `{}`", args.type_at(0)));
} else {
- err.span_label(span, format!("this expression has type `{}`", ty));
+ err.span_label(span, format!("this expression has type `{ty}`"));
}
}
if let Some(ty::error::ExpectedFound { found, .. }) = exp_found
@@ -726,7 +735,7 @@ impl<'tcx> TypeErrCtxt<'_, 'tcx> {
err.span_suggestion(
span,
"consider dereferencing the boxed value",
- format!("*{}", snippet),
+ format!("*{snippet}"),
Applicability::MachineApplicable,
);
}
@@ -734,6 +743,35 @@ impl<'tcx> TypeErrCtxt<'_, 'tcx> {
ObligationCauseCode::Pattern { origin_expr: false, span: Some(span), .. } => {
err.span_label(span, "expected due to this");
}
+ ObligationCauseCode::BlockTailExpression(
+ _,
+ hir::MatchSource::TryDesugar(scrut_hir_id),
+ ) => {
+ if let Some(ty::error::ExpectedFound { expected, .. }) = exp_found {
+ let scrut_expr = self.tcx.hir().expect_expr(scrut_hir_id);
+ let scrut_ty = if let hir::ExprKind::Call(_, args) = &scrut_expr.kind {
+ let arg_expr = args.first().expect("try desugaring call w/out arg");
+ self.typeck_results.as_ref().and_then(|typeck_results| {
+ typeck_results.expr_ty_opt(arg_expr)
+ })
+ } else {
+ bug!("try desugaring w/out call expr as scrutinee");
+ };
+
+ match scrut_ty {
+ Some(ty) if expected == ty => {
+ let source_map = self.tcx.sess.source_map();
+ err.span_suggestion(
+ source_map.end_point(cause.span()),
+ "try removing this `?`",
+ "",
+ Applicability::MachineApplicable,
+ );
+ }
+ _ => {}
+ }
+ }
+ },
ObligationCauseCode::MatchExpressionArm(box MatchExpressionArmCause {
arm_block_id,
arm_span,
@@ -743,12 +781,11 @@ impl<'tcx> TypeErrCtxt<'_, 'tcx> {
prior_arm_ty,
source,
ref prior_arms,
- scrut_hir_id,
opt_suggest_box_span,
scrut_span,
..
}) => match source {
- hir::MatchSource::TryDesugar => {
+ hir::MatchSource::TryDesugar(scrut_hir_id) => {
if let Some(ty::error::ExpectedFound { expected, .. }) = exp_found {
let scrut_expr = self.tcx.hir().expect_expr(scrut_hir_id);
let scrut_ty = if let hir::ExprKind::Call(_, args) = &scrut_expr.kind {
@@ -764,7 +801,7 @@ impl<'tcx> TypeErrCtxt<'_, 'tcx> {
Some(ty) if expected == ty => {
let source_map = self.tcx.sess.source_map();
err.span_suggestion(
- source_map.end_point(cause.span),
+ source_map.end_point(cause.span()),
"try removing this `?`",
"",
Applicability::MachineApplicable,
@@ -785,13 +822,13 @@ impl<'tcx> TypeErrCtxt<'_, 'tcx> {
if prior_arms.len() <= 4 {
for sp in prior_arms {
any_multiline_arm |= source_map.is_multiline(*sp);
- err.span_label(*sp, format!("this is found to be of type `{}`", t));
+ err.span_label(*sp, format!("this is found to be of type `{t}`"));
}
} else if let Some(sp) = prior_arms.last() {
any_multiline_arm |= source_map.is_multiline(*sp);
err.span_label(
*sp,
- format!("this and all prior arms are found to be of type `{}`", t),
+ format!("this and all prior arms are found to be of type `{t}`"),
);
}
let outer = if any_multiline_arm || !source_map.is_multiline(cause.span) {
@@ -908,7 +945,7 @@ impl<'tcx> TypeErrCtxt<'_, 'tcx> {
value: &mut DiagnosticStyledString,
other_value: &mut DiagnosticStyledString,
name: String,
- sub: ty::subst::SubstsRef<'tcx>,
+ sub: ty::GenericArgsRef<'tcx>,
pos: usize,
other_ty: Ty<'tcx>,
) {
@@ -986,9 +1023,9 @@ impl<'tcx> TypeErrCtxt<'_, 'tcx> {
other_path: String,
other_ty: Ty<'tcx>,
) -> Option<()> {
- // FIXME/HACK: Go back to `SubstsRef` to use its inherent methods,
+ // FIXME/HACK: Go back to `GenericArgsRef` to use its inherent methods,
// ideally that shouldn't be necessary.
- let sub = self.tcx.mk_substs(sub);
+ let sub = self.tcx.mk_args(sub);
for (i, ta) in sub.types().enumerate() {
if ta == other_ty {
self.highlight_outer(&mut t1_out, &mut t2_out, path, sub, i, other_ty);
@@ -1180,9 +1217,9 @@ impl<'tcx> TypeErrCtxt<'_, 'tcx> {
let did1 = def1.did();
let did2 = def2.did();
let sub_no_defaults_1 =
- self.tcx.generics_of(did1).own_substs_no_defaults(self.tcx, sub1);
+ self.tcx.generics_of(did1).own_args_no_defaults(self.tcx, sub1);
let sub_no_defaults_2 =
- self.tcx.generics_of(did2).own_substs_no_defaults(self.tcx, sub2);
+ self.tcx.generics_of(did2).own_args_no_defaults(self.tcx, sub2);
let mut values = (DiagnosticStyledString::new(), DiagnosticStyledString::new());
let path1 = self.tcx.def_path_str(did1);
let path2 = self.tcx.def_path_str(did2);
@@ -1403,11 +1440,11 @@ impl<'tcx> TypeErrCtxt<'_, 'tcx> {
}
// When encountering tuples of the same size, highlight only the differing types
- (&ty::Tuple(substs1), &ty::Tuple(substs2)) if substs1.len() == substs2.len() => {
+ (&ty::Tuple(args1), &ty::Tuple(args2)) if args1.len() == args2.len() => {
let mut values =
(DiagnosticStyledString::normal("("), DiagnosticStyledString::normal("("));
- let len = substs1.len();
- for (i, (left, right)) in substs1.iter().zip(substs2).enumerate() {
+ let len = args1.len();
+ for (i, (left, right)) in args1.iter().zip(args2).enumerate() {
let (x1, x2) = self.cmp(left, right);
(values.0).0.extend(x1.0);
(values.1).0.extend(x2.0);
@@ -1423,35 +1460,34 @@ impl<'tcx> TypeErrCtxt<'_, 'tcx> {
values
}
- (ty::FnDef(did1, substs1), ty::FnDef(did2, substs2)) => {
- let sig1 = self.tcx.fn_sig(*did1).subst(self.tcx, substs1);
- let sig2 = self.tcx.fn_sig(*did2).subst(self.tcx, substs2);
+ (ty::FnDef(did1, args1), ty::FnDef(did2, args2)) => {
+ let sig1 = self.tcx.fn_sig(*did1).instantiate(self.tcx, args1);
+ let sig2 = self.tcx.fn_sig(*did2).instantiate(self.tcx, args2);
let mut values = self.cmp_fn_sig(&sig1, &sig2);
- let path1 = format!(" {{{}}}", self.tcx.def_path_str_with_substs(*did1, substs1));
- let path2 = format!(" {{{}}}", self.tcx.def_path_str_with_substs(*did2, substs2));
+ let path1 = format!(" {{{}}}", self.tcx.def_path_str_with_args(*did1, args1));
+ let path2 = format!(" {{{}}}", self.tcx.def_path_str_with_args(*did2, args2));
let same_path = path1 == path2;
values.0.push(path1, !same_path);
values.1.push(path2, !same_path);
values
}
- (ty::FnDef(did1, substs1), ty::FnPtr(sig2)) => {
- let sig1 = self.tcx.fn_sig(*did1).subst(self.tcx, substs1);
+ (ty::FnDef(did1, args1), ty::FnPtr(sig2)) => {
+ let sig1 = self.tcx.fn_sig(*did1).instantiate(self.tcx, args1);
let mut values = self.cmp_fn_sig(&sig1, sig2);
values.0.push_highlighted(format!(
" {{{}}}",
- self.tcx.def_path_str_with_substs(*did1, substs1)
+ self.tcx.def_path_str_with_args(*did1, args1)
));
values
}
- (ty::FnPtr(sig1), ty::FnDef(did2, substs2)) => {
- let sig2 = self.tcx.fn_sig(*did2).subst(self.tcx, substs2);
+ (ty::FnPtr(sig1), ty::FnDef(did2, args2)) => {
+ let sig2 = self.tcx.fn_sig(*did2).instantiate(self.tcx, args2);
let mut values = self.cmp_fn_sig(sig1, &sig2);
- values.1.push_normal(format!(
- " {{{}}}",
- self.tcx.def_path_str_with_substs(*did2, substs2)
- ));
+ values
+ .1
+ .push_normal(format!(" {{{}}}", self.tcx.def_path_str_with_args(*did2, args2)));
values
}
@@ -1636,6 +1672,12 @@ impl<'tcx> TypeErrCtxt<'_, 'tcx> {
(false, Mismatch::Fixed(self.tcx.def_descr(expected.def_id)))
}
ValuePairs::Regions(_) => (false, Mismatch::Fixed("lifetime")),
+ ValuePairs::ExistentialTraitRef(_) => {
+ (false, Mismatch::Fixed("existential trait ref"))
+ }
+ ValuePairs::ExistentialProjection(_) => {
+ (false, Mismatch::Fixed("existential projection"))
+ }
};
let Some(vals) = self.values_str(values) else {
// Derived error. Cancel the emitter.
@@ -1662,7 +1704,7 @@ impl<'tcx> TypeErrCtxt<'_, 'tcx> {
..
})) = values
{
- Cow::from(format!("expected this to be `{}`", expected))
+ Cow::from(format!("expected this to be `{expected}`"))
} else {
terr.to_string(self.tcx)
};
@@ -1913,7 +1955,12 @@ impl<'tcx> TypeErrCtxt<'_, 'tcx> {
true
};
- if should_suggest_fixes {
+ // FIXME(#73154): For now, we do leak check when coercing function
+ // pointers in typeck, instead of only during borrowck. This can lead
+ // to these `RegionsInsufficientlyPolymorphic` errors that aren't helpful.
+ if should_suggest_fixes
+ && !matches!(terr, TypeError::RegionsInsufficientlyPolymorphic(..))
+ {
self.suggest_tuple_pattern(cause, &exp_found, diag);
self.suggest_accessing_field_where_appropriate(cause, &exp_found, diag);
self.suggest_await_on_expect_found(cause, span, &exp_found, diag);
@@ -1959,7 +2006,7 @@ impl<'tcx> TypeErrCtxt<'_, 'tcx> {
trace: &TypeTrace<'tcx>,
terr: TypeError<'tcx>,
) -> Vec<TypeErrorAdditionalDiags> {
- use crate::traits::ObligationCauseCode::MatchExpressionArm;
+ use crate::traits::ObligationCauseCode::{BlockTailExpression, MatchExpressionArm};
let mut suggestions = Vec::new();
let span = trace.cause.span();
let values = self.resolve_vars_if_possible(trace.values);
@@ -1977,11 +2024,17 @@ impl<'tcx> TypeErrCtxt<'_, 'tcx> {
// specify a byte literal
(ty::Uint(ty::UintTy::U8), ty::Char) => {
if let Ok(code) = self.tcx.sess().source_map().span_to_snippet(span)
- && let Some(code) = code.strip_prefix('\'').and_then(|s| s.strip_suffix('\''))
- && !code.starts_with("\\u") // forbid all Unicode escapes
- && code.chars().next().is_some_and(|c| c.is_ascii()) // forbids literal Unicode characters beyond ASCII
+ && let Some(code) =
+ code.strip_prefix('\'').and_then(|s| s.strip_suffix('\''))
+ // forbid all Unicode escapes
+ && !code.starts_with("\\u")
+ // forbids literal Unicode characters beyond ASCII
+ && code.chars().next().is_some_and(|c| c.is_ascii())
{
- suggestions.push(TypeErrorAdditionalDiags::MeantByteLiteral { span, code: escape_literal(code) })
+ suggestions.push(TypeErrorAdditionalDiags::MeantByteLiteral {
+ span,
+ code: escape_literal(code),
+ })
}
}
// If a character was expected and the found expression is a string literal
@@ -1992,7 +2045,10 @@ impl<'tcx> TypeErrCtxt<'_, 'tcx> {
&& let Some(code) = code.strip_prefix('"').and_then(|s| s.strip_suffix('"'))
&& code.chars().count() == 1
{
- suggestions.push(TypeErrorAdditionalDiags::MeantCharLiteral { span, code: escape_literal(code) })
+ suggestions.push(TypeErrorAdditionalDiags::MeantCharLiteral {
+ span,
+ code: escape_literal(code),
+ })
}
}
// If a string was expected and the found expression is a character literal,
@@ -2002,7 +2058,10 @@ impl<'tcx> TypeErrCtxt<'_, 'tcx> {
if let Some(code) =
code.strip_prefix('\'').and_then(|s| s.strip_suffix('\''))
{
- suggestions.push(TypeErrorAdditionalDiags::MeantStrLiteral { span, code: escape_literal(code) })
+ suggestions.push(TypeErrorAdditionalDiags::MeantStrLiteral {
+ span,
+ code: escape_literal(code),
+ })
}
}
}
@@ -2011,17 +2070,24 @@ impl<'tcx> TypeErrCtxt<'_, 'tcx> {
(ty::Bool, ty::Tuple(list)) => if list.len() == 0 {
suggestions.extend(self.suggest_let_for_letchains(&trace.cause, span));
}
- (ty::Array(_, _), ty::Array(_, _)) => suggestions.extend(self.suggest_specify_actual_length(terr, trace, span)),
+ (ty::Array(_, _), ty::Array(_, _)) => {
+ suggestions.extend(self.suggest_specify_actual_length(terr, trace, span))
+ }
_ => {}
}
}
let code = trace.cause.code();
- if let &MatchExpressionArm(box MatchExpressionArmCause { source, .. }) = code
- && let hir::MatchSource::TryDesugar = source
- && let Some((expected_ty, found_ty, _, _)) = self.values_str(trace.values)
- {
- suggestions.push(TypeErrorAdditionalDiags::TryCannotConvert { found: found_ty.content(), expected: expected_ty.content() });
- }
+ if let &(MatchExpressionArm(box MatchExpressionArmCause { source, .. })
+ | BlockTailExpression(.., source)
+ ) = code
+ && let hir::MatchSource::TryDesugar(_) = source
+ && let Some((expected_ty, found_ty, _, _)) = self.values_str(trace.values)
+ {
+ suggestions.push(TypeErrorAdditionalDiags::TryCannotConvert {
+ found: found_ty.content(),
+ expected: expected_ty.content(),
+ });
+ }
suggestions
}
@@ -2069,7 +2135,7 @@ impl<'tcx> TypeErrCtxt<'_, 'tcx> {
visitor.visit_body(body);
visitor.result.map(|r| &r.peel_refs().kind)
}
- Some(hir::Node::Item(hir::Item { kind: hir::ItemKind::Const(ty, _), .. })) => {
+ Some(hir::Node::Item(hir::Item { kind: hir::ItemKind::Const(ty, _, _), .. })) => {
Some(&ty.peel_refs().kind)
}
_ => None,
@@ -2109,14 +2175,13 @@ impl<'tcx> TypeErrCtxt<'_, 'tcx> {
found: Ty<'tcx>,
expected_fields: &List<Ty<'tcx>>,
) -> Option<TypeErrorAdditionalDiags> {
- let [expected_tup_elem] = expected_fields[..] else { return None};
+ let [expected_tup_elem] = expected_fields[..] else { return None };
if !self.same_type_modulo_infer(expected_tup_elem, found) {
return None;
}
- let Ok(code) = self.tcx.sess().source_map().span_to_snippet(span)
- else { return None };
+ let Ok(code) = self.tcx.sess().source_map().span_to_snippet(span) else { return None };
let sugg = if code.starts_with('(') && code.ends_with(')') {
let before_close = span.hi() - BytePos::from_u32(1);
@@ -2141,6 +2206,8 @@ impl<'tcx> TypeErrCtxt<'_, 'tcx> {
infer::Regions(exp_found) => self.expected_found_str(exp_found),
infer::Terms(exp_found) => self.expected_found_str_term(exp_found),
infer::Aliases(exp_found) => self.expected_found_str(exp_found),
+ infer::ExistentialTraitRef(exp_found) => self.expected_found_str(exp_found),
+ infer::ExistentialProjection(exp_found) => self.expected_found_str(exp_found),
infer::TraitRefs(exp_found) => {
let pretty_exp_found = ty::error::ExpectedFound {
expected: exp_found.expected.print_only_trait_path(),
@@ -2356,7 +2423,7 @@ impl<'tcx> TypeErrCtxt<'_, 'tcx> {
if let Ok(snip) = self.tcx.sess.source_map().span_to_next_source(p.span)
&& snip.starts_with(' ')
{
- format!("{new_lt}")
+ new_lt.to_string()
} else {
format!("{new_lt} ")
}
@@ -2370,13 +2437,13 @@ impl<'tcx> TypeErrCtxt<'_, 'tcx> {
}
let labeled_user_string = match bound_kind {
- GenericKind::Param(ref p) => format!("the parameter type `{}`", p),
+ GenericKind::Param(ref p) => format!("the parameter type `{p}`"),
GenericKind::Alias(ref p) => match p.kind(self.tcx) {
ty::AliasKind::Projection | ty::AliasKind::Inherent => {
- format!("the associated type `{}`", p)
+ format!("the associated type `{p}`")
}
- ty::AliasKind::Weak => format!("the type alias `{}`", p),
- ty::AliasKind::Opaque => format!("the opaque type `{}`", p),
+ ty::AliasKind::Weak => format!("the type alias `{p}`"),
+ ty::AliasKind::Opaque => format!("the opaque type `{p}`"),
},
};
@@ -2390,7 +2457,7 @@ impl<'tcx> TypeErrCtxt<'_, 'tcx> {
span,
impl_item_def_id,
trait_item_def_id,
- &format!("`{}: {}`", bound_kind, sub),
+ &format!("`{bound_kind}: {sub}`"),
);
}
@@ -2404,7 +2471,7 @@ impl<'tcx> TypeErrCtxt<'_, 'tcx> {
let msg = "consider adding an explicit lifetime bound";
if let Some((sp, has_lifetimes)) = type_param_span {
let suggestion =
- if has_lifetimes { format!(" + {}", sub) } else { format!(": {}", sub) };
+ if has_lifetimes { format!(" + {sub}") } else { format!(": {sub}") };
let mut suggestions = vec![(sp, suggestion)];
for add_lt_sugg in add_lt_suggs.into_iter().flatten() {
suggestions.push(add_lt_sugg);
@@ -2415,7 +2482,7 @@ impl<'tcx> TypeErrCtxt<'_, 'tcx> {
Applicability::MaybeIncorrect, // Issue #41966
);
} else {
- let consider = format!("{} `{}: {}`...", msg, bound_kind, sub);
+ let consider = format!("{msg} `{bound_kind}: {sub}`...");
err.help(consider);
}
}
@@ -2424,13 +2491,10 @@ impl<'tcx> TypeErrCtxt<'_, 'tcx> {
|err: &mut Diagnostic, type_param_span: Option<(Span, bool)>| {
let msg = "consider introducing an explicit lifetime bound";
if let Some((sp, has_lifetimes)) = type_param_span {
- let suggestion = if has_lifetimes {
- format!(" + {}", new_lt)
- } else {
- format!(": {}", new_lt)
- };
+ let suggestion =
+ if has_lifetimes { format!(" + {new_lt}") } else { format!(": {new_lt}") };
let mut sugg =
- vec![(sp, suggestion), (span.shrink_to_hi(), format!(" + {}", new_lt))];
+ vec![(sp, suggestion), (span.shrink_to_hi(), format!(" + {new_lt}"))];
for lt in add_lt_suggs.clone().into_iter().flatten() {
sugg.push(lt);
sugg.rotate_right(1);
@@ -2510,7 +2574,7 @@ impl<'tcx> TypeErrCtxt<'_, 'tcx> {
"{} may not live long enough",
labeled_user_string
);
- let pred = format!("{}: {}", bound_kind, sub);
+ let pred = format!("{bound_kind}: {sub}");
let suggestion = format!("{} {}", generics.add_where_or_trailing_comma(), pred,);
err.span_suggestion(
generics.tail_span_for_predicate_suggestion(),
@@ -2566,21 +2630,19 @@ impl<'tcx> TypeErrCtxt<'_, 'tcx> {
note_and_explain_region(
self.tcx,
&mut err,
- &format!("{} must be valid for ", labeled_user_string),
+ &format!("{labeled_user_string} must be valid for "),
sub,
"...",
None,
);
if let Some(infer::RelateParamBound(_, t, _)) = origin {
- let return_impl_trait =
- self.tcx.return_type_impl_trait(generic_param_scope).is_some();
let t = self.resolve_vars_if_possible(t);
match t.kind() {
// We've got:
// fn get_later<G, T>(g: G, dest: &mut T) -> impl FnOnce() + '_
// suggest:
// fn get_later<'a, G: 'a, T>(g: G, dest: &mut T) -> impl FnOnce() + '_ + 'a
- ty::Closure(..) | ty::Alias(ty::Opaque, ..) if return_impl_trait => {
+ ty::Closure(..) | ty::Alias(ty::Opaque, ..) => {
new_binding_suggestion(&mut err, type_param_span);
}
_ => {
@@ -2816,10 +2878,10 @@ impl<'tcx> InferCtxt<'tcx> {
br_string(br),
self.tcx.associated_item(def_id).name
),
- infer::EarlyBoundRegion(_, name) => format!(" for lifetime parameter `{}`", name),
+ infer::EarlyBoundRegion(_, name) => format!(" for lifetime parameter `{name}`"),
infer::UpvarRegion(ref upvar_id, _) => {
let var_name = self.tcx.hir().name(upvar_id.var_path.hir_id);
- format!(" for capture of `{}` by closure", var_name)
+ format!(" for capture of `{var_name}` by closure")
}
infer::Nll(..) => bug!("NLL variable found in lexical phase"),
};
@@ -2895,8 +2957,11 @@ impl<'tcx> ObligationCauseExt<'tcx> for ObligationCause<'tcx> {
CompareImplItemObligation { kind: ty::AssocKind::Const, .. } => {
ObligationCauseFailureCode::ConstCompat { span, subdiags }
}
+ BlockTailExpression(.., hir::MatchSource::TryDesugar(_)) => {
+ ObligationCauseFailureCode::TryCompat { span, subdiags }
+ }
MatchExpressionArm(box MatchExpressionArmCause { source, .. }) => match source {
- hir::MatchSource::TryDesugar => {
+ hir::MatchSource::TryDesugar(_) => {
ObligationCauseFailureCode::TryCompat { span, subdiags }
}
_ => ObligationCauseFailureCode::MatchCompat { span, subdiags },
diff --git a/compiler/rustc_infer/src/infer/error_reporting/need_type_info.rs b/compiler/rustc_infer/src/infer/error_reporting/need_type_info.rs
index bb75ecc6a..f2a3c47bd 100644
--- a/compiler/rustc_infer/src/infer/error_reporting/need_type_info.rs
+++ b/compiler/rustc_infer/src/infer/error_reporting/need_type_info.rs
@@ -18,7 +18,7 @@ use rustc_middle::infer::unify_key::{ConstVariableOrigin, ConstVariableOriginKin
use rustc_middle::ty::adjustment::{Adjust, Adjustment, AutoBorrow};
use rustc_middle::ty::print::{FmtPrinter, PrettyPrinter, Print, Printer};
use rustc_middle::ty::{self, InferConst};
-use rustc_middle::ty::{GenericArg, GenericArgKind, SubstsRef};
+use rustc_middle::ty::{GenericArg, GenericArgKind, GenericArgsRef};
use rustc_middle::ty::{IsSuggestable, Ty, TyCtxt, TypeckResults};
use rustc_span::symbol::{kw, sym, Ident};
use rustc_span::{BytePos, Span};
@@ -162,10 +162,18 @@ fn fmt_printer<'a, 'tcx>(infcx: &'a InferCtxt<'tcx>, ns: Namespace) -> FmtPrinte
let mut infcx_inner = infcx.inner.borrow_mut();
let ty_vars = infcx_inner.type_variables();
let var_origin = ty_vars.var_origin(ty_vid);
- if let TypeVariableOriginKind::TypeParameterDefinition(name, _) = var_origin.kind
- && !var_origin.span.from_expansion()
+ if let TypeVariableOriginKind::TypeParameterDefinition(name, def_id) = var_origin.kind
+ && name != kw::SelfUpper && !var_origin.span.from_expansion()
{
- Some(name)
+ let generics = infcx.tcx.generics_of(infcx.tcx.parent(def_id));
+ let idx = generics.param_def_id_to_index(infcx.tcx, def_id).unwrap();
+ let generic_param_def = generics.param_at(idx as usize, infcx.tcx);
+ if let ty::GenericParamDefKind::Type { synthetic: true, .. } = generic_param_def.kind
+ {
+ None
+ } else {
+ Some(name)
+ }
} else {
None
}
@@ -218,8 +226,8 @@ fn ty_to_string<'tcx>(
/// something users are familiar with. Directly printing the `fn_sig` of closures also
/// doesn't work as they actually use the "rust-call" API.
fn closure_as_fn_str<'tcx>(infcx: &InferCtxt<'tcx>, ty: Ty<'tcx>) -> String {
- let ty::Closure(_, substs) = ty.kind() else { unreachable!() };
- let fn_sig = substs.as_closure().sig();
+ let ty::Closure(_, args) = ty.kind() else { unreachable!() };
+ let fn_sig = args.as_closure().sig();
let args = fn_sig
.inputs()
.skip_binder()
@@ -238,7 +246,7 @@ fn closure_as_fn_str<'tcx>(infcx: &InferCtxt<'tcx>, ty: Ty<'tcx>) -> String {
} else {
format!(" -> {}", ty_to_string(infcx, fn_sig.output().skip_binder(), None))
};
- format!("fn({}){}", args, ret)
+ format!("fn({args}){ret}")
}
impl<'tcx> InferCtxt<'tcx> {
@@ -411,7 +419,7 @@ impl<'tcx> TypeErrCtxt<'_, 'tcx> {
}
let Some(InferSource { span, kind }) = local_visitor.infer_source else {
- return self.bad_inference_failure_err(failure_span, arg_data, error_code)
+ return self.bad_inference_failure_err(failure_span, arg_data, error_code);
};
let (source_kind, name) = kind.ty_localized_msg(self);
@@ -516,9 +524,9 @@ impl<'tcx> TypeErrCtxt<'_, 'tcx> {
});
}
}
- InferSourceKind::FullyQualifiedMethodCall { receiver, successor, substs, def_id } => {
+ InferSourceKind::FullyQualifiedMethodCall { receiver, successor, args, def_id } => {
let printer = fmt_printer(self, Namespace::ValueNS);
- let def_path = printer.print_def_path(def_id, substs).unwrap().into_buffer();
+ let def_path = printer.print_def_path(def_id, args).unwrap().into_buffer();
// We only care about whether we have to add `&` or `&mut ` for now.
// This is the case if the last adjustment is a borrow and the
@@ -651,7 +659,7 @@ enum InferSourceKind<'tcx> {
/// If the method has other arguments, this is ", " and the start of the first argument,
/// while for methods without arguments this is ")" and the end of the method call.
successor: (&'static str, BytePos),
- substs: SubstsRef<'tcx>,
+ args: GenericArgsRef<'tcx>,
def_id: DefId,
},
ClosureReturn {
@@ -702,7 +710,7 @@ impl<'tcx> InferSourceKind<'tcx> {
#[derive(Debug)]
struct InsertableGenericArgs<'tcx> {
insert_span: Span,
- substs: SubstsRef<'tcx>,
+ args: GenericArgsRef<'tcx>,
generics_def_id: DefId,
def_id: DefId,
have_turbofish: bool,
@@ -766,11 +774,11 @@ impl<'a, 'tcx> FindInferSourceVisitor<'a, 'tcx> {
ty::Closure(..) => 1000,
ty::FnDef(..) => 150,
ty::FnPtr(..) => 30,
- ty::Adt(def, substs) => {
+ ty::Adt(def, args) => {
5 + self
.tcx
.generics_of(def.did())
- .own_substs_no_defaults(self.tcx, substs)
+ .own_args_no_defaults(self.tcx, args)
.iter()
.map(|&arg| self.arg_cost(arg))
.sum::<usize>()
@@ -797,8 +805,8 @@ impl<'a, 'tcx> FindInferSourceVisitor<'a, 'tcx> {
};
variant_cost + generic_args.iter().map(|&arg| ctx.arg_cost(arg)).sum::<usize>()
}
- InferSourceKind::FullyQualifiedMethodCall { substs, .. } => {
- 20 + substs.iter().map(|arg| ctx.arg_cost(arg)).sum::<usize>()
+ InferSourceKind::FullyQualifiedMethodCall { args, .. } => {
+ 20 + args.iter().map(|arg| ctx.arg_cost(arg)).sum::<usize>()
}
InferSourceKind::ClosureReturn { ty, should_wrap_expr, .. } => {
30 + ctx.ty_cost(ty) + if should_wrap_expr.is_some() { 10 } else { 0 }
@@ -832,9 +840,9 @@ impl<'a, 'tcx> FindInferSourceVisitor<'a, 'tcx> {
}
}
- fn node_substs_opt(&self, hir_id: HirId) -> Option<SubstsRef<'tcx>> {
- let substs = self.typeck_results.node_substs_opt(hir_id);
- self.infcx.resolve_vars_if_possible(substs)
+ fn node_args_opt(&self, hir_id: HirId) -> Option<GenericArgsRef<'tcx>> {
+ let args = self.typeck_results.node_args_opt(hir_id);
+ self.infcx.resolve_vars_if_possible(args)
}
fn opt_node_type(&self, hir_id: HirId) -> Option<Ty<'tcx>> {
@@ -915,15 +923,15 @@ impl<'a, 'tcx> FindInferSourceVisitor<'a, 'tcx> {
false
}
- fn expr_inferred_subst_iter(
+ fn expr_inferred_arg_iter(
&self,
expr: &'tcx hir::Expr<'tcx>,
) -> Box<dyn Iterator<Item = InsertableGenericArgs<'tcx>> + 'a> {
let tcx = self.infcx.tcx;
match expr.kind {
hir::ExprKind::Path(ref path) => {
- if let Some(substs) = self.node_substs_opt(expr.hir_id) {
- return self.path_inferred_subst_iter(expr.hir_id, substs, path);
+ if let Some(args) = self.node_args_opt(expr.hir_id) {
+ return self.path_inferred_arg_iter(expr.hir_id, args, path);
}
}
// FIXME(#98711): Ideally we would also deal with type relative
@@ -935,7 +943,7 @@ impl<'a, 'tcx> FindInferSourceVisitor<'a, 'tcx> {
// However, the `type_dependent_def_id` for `Self::Output` in an
// impl is currently the `DefId` of `Output` in the trait definition
// which makes this somewhat difficult and prevents us from just
- // using `self.path_inferred_subst_iter` here.
+ // using `self.path_inferred_arg_iter` here.
hir::ExprKind::Struct(&hir::QPath::Resolved(_self_ty, path), _, _)
// FIXME(TaKO8Ki): Ideally we should support this. For that
// we have to map back from the self type to the
@@ -943,11 +951,11 @@ impl<'a, 'tcx> FindInferSourceVisitor<'a, 'tcx> {
//
// See the `need_type_info/issue-103053.rs` test for
// a example.
- if !matches!(path.res, Res::Def(DefKind::TyAlias, _)) => {
+ if !matches!(path.res, Res::Def(DefKind::TyAlias { .. }, _)) => {
if let Some(ty) = self.opt_node_type(expr.hir_id)
- && let ty::Adt(_, substs) = ty.kind()
+ && let ty::Adt(_, args) = ty.kind()
{
- return Box::new(self.resolved_path_inferred_subst_iter(path, substs));
+ return Box::new(self.resolved_path_inferred_arg_iter(path, args));
}
}
hir::ExprKind::MethodCall(segment, ..) => {
@@ -957,12 +965,12 @@ impl<'a, 'tcx> FindInferSourceVisitor<'a, 'tcx> {
if generics.has_impl_trait() {
None?
}
- let substs = self.node_substs_opt(expr.hir_id)?;
+ let args = self.node_args_opt(expr.hir_id)?;
let span = tcx.hir().span(segment.hir_id);
let insert_span = segment.ident.span.shrink_to_hi().with_hi(span.hi());
InsertableGenericArgs {
insert_span,
- substs,
+ args,
generics_def_id: def_id,
def_id,
have_turbofish: false,
@@ -977,10 +985,10 @@ impl<'a, 'tcx> FindInferSourceVisitor<'a, 'tcx> {
Box::new(iter::empty())
}
- fn resolved_path_inferred_subst_iter(
+ fn resolved_path_inferred_arg_iter(
&self,
path: &'tcx hir::Path<'tcx>,
- substs: SubstsRef<'tcx>,
+ args: GenericArgsRef<'tcx>,
) -> impl Iterator<Item = InsertableGenericArgs<'tcx>> + 'a {
let tcx = self.infcx.tcx;
let have_turbofish = path.segments.iter().any(|segment| {
@@ -1001,7 +1009,7 @@ impl<'a, 'tcx> FindInferSourceVisitor<'a, 'tcx> {
path.segments.last().unwrap().ident.span.shrink_to_hi().with_hi(path.span.hi());
InsertableGenericArgs {
insert_span,
- substs,
+ args,
generics_def_id,
def_id: path.res.def_id(),
have_turbofish,
@@ -1021,7 +1029,7 @@ impl<'a, 'tcx> FindInferSourceVisitor<'a, 'tcx> {
let insert_span = segment.ident.span.shrink_to_hi().with_hi(span.hi());
Some(InsertableGenericArgs {
insert_span,
- substs,
+ args,
generics_def_id,
def_id: res.def_id(),
have_turbofish,
@@ -1030,16 +1038,16 @@ impl<'a, 'tcx> FindInferSourceVisitor<'a, 'tcx> {
.chain(last_segment_using_path_data)
}
- fn path_inferred_subst_iter(
+ fn path_inferred_arg_iter(
&self,
hir_id: HirId,
- substs: SubstsRef<'tcx>,
+ args: GenericArgsRef<'tcx>,
qpath: &'tcx hir::QPath<'tcx>,
) -> Box<dyn Iterator<Item = InsertableGenericArgs<'tcx>> + 'a> {
let tcx = self.infcx.tcx;
match qpath {
hir::QPath::Resolved(_self_ty, path) => {
- Box::new(self.resolved_path_inferred_subst_iter(path, substs))
+ Box::new(self.resolved_path_inferred_arg_iter(path, args))
}
hir::QPath::TypeRelative(ty, segment) => {
let Some(def_id) = self.typeck_results.type_dependent_def_id(hir_id) else {
@@ -1055,7 +1063,7 @@ impl<'a, 'tcx> FindInferSourceVisitor<'a, 'tcx> {
let insert_span = segment.ident.span.shrink_to_hi().with_hi(span.hi());
InsertableGenericArgs {
insert_span,
- substs,
+ args,
generics_def_id: def_id,
def_id,
have_turbofish: false,
@@ -1064,15 +1072,15 @@ impl<'a, 'tcx> FindInferSourceVisitor<'a, 'tcx> {
let parent_def_id = generics.parent.unwrap();
if let DefKind::Impl { .. } = tcx.def_kind(parent_def_id) {
- let parent_ty = tcx.type_of(parent_def_id).subst(tcx, substs);
+ let parent_ty = tcx.type_of(parent_def_id).instantiate(tcx, args);
match (parent_ty.kind(), &ty.kind) {
(
- ty::Adt(def, substs),
+ ty::Adt(def, args),
hir::TyKind::Path(hir::QPath::Resolved(_self_ty, path)),
) => {
if tcx.res_generics_def_id(path.res) != Some(def.did()) {
match path.res {
- Res::Def(DefKind::TyAlias, _) => {
+ Res::Def(DefKind::TyAlias { .. }, _) => {
// FIXME: Ideally we should support this. For that
// we have to map back from the self type to the
// type alias though. That's difficult.
@@ -1084,14 +1092,13 @@ impl<'a, 'tcx> FindInferSourceVisitor<'a, 'tcx> {
// so there's nothing for us to do here.
Res::SelfTyParam { .. } | Res::SelfTyAlias { .. } => {}
_ => warn!(
- "unexpected path: def={:?} substs={:?} path={:?}",
- def, substs, path,
+ "unexpected path: def={:?} args={:?} path={:?}",
+ def, args, path,
),
}
} else {
return Box::new(
- self.resolved_path_inferred_subst_iter(path, substs)
- .chain(segment),
+ self.resolved_path_inferred_arg_iter(path, args).chain(segment),
);
}
}
@@ -1149,9 +1156,7 @@ impl<'a, 'tcx> Visitor<'tcx> for FindInferSourceVisitor<'a, 'tcx> {
continue;
}
- let Some(param_ty) = self.opt_node_type(param.hir_id) else {
- continue
- };
+ let Some(param_ty) = self.opt_node_type(param.hir_id) else { continue };
if self.generic_arg_contains_target(param_ty.into()) {
self.update_infer_source(InferSource {
@@ -1181,27 +1186,27 @@ impl<'a, 'tcx> Visitor<'tcx> for FindInferSourceVisitor<'a, 'tcx> {
_ => intravisit::walk_expr(self, expr),
}
- for args in self.expr_inferred_subst_iter(expr) {
+ for args in self.expr_inferred_arg_iter(expr) {
debug!(?args);
let InsertableGenericArgs {
insert_span,
- substs,
+ args,
generics_def_id,
def_id,
have_turbofish,
} = args;
let generics = tcx.generics_of(generics_def_id);
if let Some(mut argument_index) = generics
- .own_substs(substs)
+ .own_args(args)
.iter()
.position(|&arg| self.generic_arg_contains_target(arg))
{
if generics.parent.is_none() && generics.has_self {
argument_index += 1;
}
- let substs = self.infcx.resolve_vars_if_possible(substs);
- let generic_args = &generics.own_substs_no_defaults(tcx, substs)
- [generics.own_counts().lifetimes..];
+ let args = self.infcx.resolve_vars_if_possible(args);
+ let generic_args =
+ &generics.own_args_no_defaults(tcx, args)[generics.own_counts().lifetimes..];
let span = match expr.kind {
ExprKind::MethodCall(path, ..) => path.ident.span,
_ => expr.span,
@@ -1224,10 +1229,10 @@ impl<'a, 'tcx> Visitor<'tcx> for FindInferSourceVisitor<'a, 'tcx> {
if let Some(node_ty) = self.opt_node_type(expr.hir_id) {
if let (
&ExprKind::Closure(&Closure { fn_decl, body, fn_decl_span, .. }),
- ty::Closure(_, substs),
+ ty::Closure(_, args),
) = (&expr.kind, node_ty.kind())
{
- let output = substs.as_closure().sig().output().skip_binder();
+ let output = args.as_closure().sig().output().skip_binder();
if self.generic_arg_contains_target(output.into()) {
let body = self.infcx.tcx.hir().body(body);
let should_wrap_expr = if matches!(body.value.kind, ExprKind::Block(..)) {
@@ -1253,22 +1258,22 @@ impl<'a, 'tcx> Visitor<'tcx> for FindInferSourceVisitor<'a, 'tcx> {
})
.any(|generics| generics.has_impl_trait())
};
- if let ExprKind::MethodCall(path, receiver, args, span) = expr.kind
- && let Some(substs) = self.node_substs_opt(expr.hir_id)
- && substs.iter().any(|arg| self.generic_arg_contains_target(arg))
+ if let ExprKind::MethodCall(path, receiver, method_args, span) = expr.kind
+ && let Some(args) = self.node_args_opt(expr.hir_id)
+ && args.iter().any(|arg| self.generic_arg_contains_target(arg))
&& let Some(def_id) = self.typeck_results.type_dependent_def_id(expr.hir_id)
&& self.infcx.tcx.trait_of_item(def_id).is_some()
&& !has_impl_trait(def_id)
{
let successor =
- args.get(0).map_or_else(|| (")", span.hi()), |arg| (", ", arg.span.lo()));
- let substs = self.infcx.resolve_vars_if_possible(substs);
+ method_args.get(0).map_or_else(|| (")", span.hi()), |arg| (", ", arg.span.lo()));
+ let args = self.infcx.resolve_vars_if_possible(args);
self.update_infer_source(InferSource {
span: path.ident.span,
kind: InferSourceKind::FullyQualifiedMethodCall {
receiver,
successor,
- substs,
+ args,
def_id,
}
})
diff --git a/compiler/rustc_infer/src/infer/error_reporting/nice_region_error/mismatched_static_lifetime.rs b/compiler/rustc_infer/src/infer/error_reporting/nice_region_error/mismatched_static_lifetime.rs
index 2c63a3904..6901955af 100644
--- a/compiler/rustc_infer/src/infer/error_reporting/nice_region_error/mismatched_static_lifetime.rs
+++ b/compiler/rustc_infer/src/infer/error_reporting/nice_region_error/mismatched_static_lifetime.rs
@@ -38,8 +38,9 @@ impl<'a, 'tcx> NiceRegionError<'a, 'tcx> {
let ObligationCauseCode::MatchImpl(parent, impl_def_id) = code else {
return None;
};
- let (ObligationCauseCode::BindingObligation(_, binding_span) | ObligationCauseCode::ExprBindingObligation(_, binding_span, ..))
- = *parent.code() else {
+ let (ObligationCauseCode::BindingObligation(_, binding_span)
+ | ObligationCauseCode::ExprBindingObligation(_, binding_span, ..)) = *parent.code()
+ else {
return None;
};
@@ -67,12 +68,13 @@ impl<'a, 'tcx> NiceRegionError<'a, 'tcx> {
let hir::Node::Item(hir::Item {
kind: hir::ItemKind::Impl(hir::Impl { self_ty: impl_self_ty, .. }),
..
- }) = impl_node else {
+ }) = impl_node
+ else {
bug!("Node not an impl.");
};
// Next, let's figure out the set of trait objects with implicit static bounds
- let ty = self.tcx().type_of(*impl_def_id).subst_identity();
+ let ty = self.tcx().type_of(*impl_def_id).instantiate_identity();
let mut v = super::static_impl_trait::TraitObjectVisitor(FxIndexSet::default());
v.visit_ty(ty);
let mut traits = vec![];
diff --git a/compiler/rustc_infer/src/infer/error_reporting/nice_region_error/named_anon_conflict.rs b/compiler/rustc_infer/src/infer/error_reporting/nice_region_error/named_anon_conflict.rs
index 4e13ec902..07f04ec1e 100644
--- a/compiler/rustc_infer/src/infer/error_reporting/nice_region_error/named_anon_conflict.rs
+++ b/compiler/rustc_infer/src/infer/error_reporting/nice_region_error/named_anon_conflict.rs
@@ -29,25 +29,15 @@ impl<'a, 'tcx> NiceRegionError<'a, 'tcx> {
// version new_ty of its type where the anonymous region is replaced
// with the named one.
let (named, anon, anon_param_info, region_info) = if sub.has_name()
- && self.tcx().is_suitable_region(sup).is_some()
- && self.find_param_with_region(sup, sub).is_some()
+ && let Some(region_info) = self.tcx().is_suitable_region(sup)
+ && let Some(anon_param_info) = self.find_param_with_region(sup, sub)
{
- (
- sub,
- sup,
- self.find_param_with_region(sup, sub).unwrap(),
- self.tcx().is_suitable_region(sup).unwrap(),
- )
+ (sub, sup, anon_param_info, region_info)
} else if sup.has_name()
- && self.tcx().is_suitable_region(sub).is_some()
- && self.find_param_with_region(sub, sup).is_some()
+ && let Some(region_info) = self.tcx().is_suitable_region(sub)
+ && let Some(anon_param_info) = self.find_param_with_region(sub, sup)
{
- (
- sup,
- sub,
- self.find_param_with_region(sub, sup).unwrap(),
- self.tcx().is_suitable_region(sub).unwrap(),
- )
+ (sup, sub, anon_param_info, region_info)
} else {
return None; // inapplicable
};
diff --git a/compiler/rustc_infer/src/infer/error_reporting/nice_region_error/placeholder_error.rs b/compiler/rustc_infer/src/infer/error_reporting/nice_region_error/placeholder_error.rs
index 0b3bc1ce6..f903f7a49 100644
--- a/compiler/rustc_infer/src/infer/error_reporting/nice_region_error/placeholder_error.rs
+++ b/compiler/rustc_infer/src/infer/error_reporting/nice_region_error/placeholder_error.rs
@@ -13,7 +13,7 @@ use rustc_hir::def::Namespace;
use rustc_hir::def_id::DefId;
use rustc_middle::ty::error::ExpectedFound;
use rustc_middle::ty::print::{FmtPrinter, Print, RegionHighlightMode};
-use rustc_middle::ty::subst::SubstsRef;
+use rustc_middle::ty::GenericArgsRef;
use rustc_middle::ty::{self, RePlaceholder, Region, TyCtxt};
use std::fmt;
@@ -196,11 +196,11 @@ impl<'tcx> NiceRegionError<'_, 'tcx> {
sup_placeholder: Option<Region<'tcx>>,
value_pairs: &ValuePairs<'tcx>,
) -> Option<DiagnosticBuilder<'tcx, ErrorGuaranteed>> {
- let (expected_substs, found_substs, trait_def_id) = match value_pairs {
+ let (expected_args, found_args, trait_def_id) = match value_pairs {
ValuePairs::TraitRefs(ExpectedFound { expected, found })
if expected.def_id == found.def_id =>
{
- (expected.substs, found.substs, expected.def_id)
+ (expected.args, found.args, expected.def_id)
}
ValuePairs::PolyTraitRefs(ExpectedFound { expected, found })
if expected.def_id() == found.def_id() =>
@@ -208,7 +208,7 @@ impl<'tcx> NiceRegionError<'_, 'tcx> {
// It's possible that the placeholders come from a binder
// outside of this value pair. Use `no_bound_vars` as a
// simple heuristic for that.
- (expected.no_bound_vars()?.substs, found.no_bound_vars()?.substs, expected.def_id())
+ (expected.no_bound_vars()?.args, found.no_bound_vars()?.args, expected.def_id())
}
_ => return None,
};
@@ -219,8 +219,8 @@ impl<'tcx> NiceRegionError<'_, 'tcx> {
sub_placeholder,
sup_placeholder,
trait_def_id,
- expected_substs,
- found_substs,
+ expected_args,
+ found_args,
))
}
@@ -241,8 +241,8 @@ impl<'tcx> NiceRegionError<'_, 'tcx> {
sub_placeholder: Option<Region<'tcx>>,
sup_placeholder: Option<Region<'tcx>>,
trait_def_id: DefId,
- expected_substs: SubstsRef<'tcx>,
- actual_substs: SubstsRef<'tcx>,
+ expected_args: GenericArgsRef<'tcx>,
+ actual_args: GenericArgsRef<'tcx>,
) -> DiagnosticBuilder<'tcx, ErrorGuaranteed> {
let span = cause.span();
@@ -264,12 +264,12 @@ impl<'tcx> NiceRegionError<'_, 'tcx> {
let expected_trait_ref = self.cx.resolve_vars_if_possible(ty::TraitRef::new(
self.cx.tcx,
trait_def_id,
- expected_substs,
+ expected_args,
));
let actual_trait_ref = self.cx.resolve_vars_if_possible(ty::TraitRef::new(
self.cx.tcx,
trait_def_id,
- actual_substs,
+ actual_args,
));
// Search the expected and actual trait references to see (a)
@@ -413,9 +413,9 @@ impl<'tcx> NiceRegionError<'_, 'tcx> {
if self_ty.value.is_closure() && self.tcx().is_fn_trait(expected_trait_ref.value.def_id)
{
let closure_sig = self_ty.map(|closure| {
- if let ty::Closure(_, substs) = closure.kind() {
+ if let ty::Closure(_, args) = closure.kind() {
self.tcx().signature_unclosure(
- substs.as_closure().sig(),
+ args.as_closure().sig(),
rustc_hir::Unsafety::Normal,
)
} else {
diff --git a/compiler/rustc_infer/src/infer/error_reporting/nice_region_error/static_impl_trait.rs b/compiler/rustc_infer/src/infer/error_reporting/nice_region_error/static_impl_trait.rs
index a9b485a6f..3cfda0cc5 100644
--- a/compiler/rustc_infer/src/infer/error_reporting/nice_region_error/static_impl_trait.rs
+++ b/compiler/rustc_infer/src/infer/error_reporting/nice_region_error/static_impl_trait.rs
@@ -146,7 +146,7 @@ impl<'a, 'tcx> NiceRegionError<'a, 'tcx> {
if let SubregionOrigin::Subtype(box TypeTrace { cause, .. }) = sub_origin {
if let ObligationCauseCode::ReturnValue(hir_id)
- | ObligationCauseCode::BlockTailExpression(hir_id) = cause.code()
+ | ObligationCauseCode::BlockTailExpression(hir_id, ..) = cause.code()
{
let parent_id = tcx.hir().get_parent_item(*hir_id);
if let Some(fn_decl) = tcx.hir().fn_decl_by_hir_id(parent_id.into()) {
@@ -235,10 +235,10 @@ impl<'a, 'tcx> NiceRegionError<'a, 'tcx> {
}
let arg = match param.param.pat.simple_ident() {
- Some(simple_ident) => format!("argument `{}`", simple_ident),
+ Some(simple_ident) => format!("argument `{simple_ident}`"),
None => "the argument".to_string(),
};
- let captures = format!("captures data from {}", arg);
+ let captures = format!("captures data from {arg}");
suggest_new_region_bound(
tcx,
&mut err,
@@ -269,11 +269,11 @@ pub fn suggest_new_region_bound(
// FIXME: account for the need of parens in `&(dyn Trait + '_)`
let consider = "consider changing";
let declare = "to declare that";
- let explicit = format!("you can add an explicit `{}` lifetime bound", lifetime_name);
+ let explicit = format!("you can add an explicit `{lifetime_name}` lifetime bound");
let explicit_static =
- arg.map(|arg| format!("explicit `'static` bound to the lifetime of {}", arg));
+ arg.map(|arg| format!("explicit `'static` bound to the lifetime of {arg}"));
let add_static_bound = "alternatively, add an explicit `'static` bound to this reference";
- let plus_lt = format!(" + {}", lifetime_name);
+ let plus_lt = format!(" + {lifetime_name}");
for fn_return in fn_returns {
if fn_return.span.desugaring_kind().is_some() {
// Skip `async` desugaring `impl Future`.
@@ -288,7 +288,7 @@ pub fn suggest_new_region_bound(
// Get the identity type for this RPIT
let did = item_id.owner_id.to_def_id();
- let ty = Ty::new_opaque(tcx, did, ty::InternalSubsts::identity_for_item(tcx, did));
+ let ty = Ty::new_opaque(tcx, did, ty::GenericArgs::identity_for_item(tcx, did));
if let Some(span) = opaque.bounds.iter().find_map(|arg| match arg {
GenericBound::Outlives(Lifetime {
@@ -333,11 +333,7 @@ pub fn suggest_new_region_bound(
} else {
None
};
- let name = if let Some(name) = &existing_lt_name {
- format!("{}", name)
- } else {
- format!("'a")
- };
+ let name = if let Some(name) = &existing_lt_name { name } else { "'a" };
// if there are more than one elided lifetimes in inputs, the explicit `'_` lifetime cannot be used.
// introducing a new lifetime `'a` or making use of one from existing named lifetimes if any
if let Some(id) = scope_def_id
@@ -350,7 +346,7 @@ pub fn suggest_new_region_bound(
if p.span.hi() - p.span.lo() == rustc_span::BytePos(1) { // Ampersand (elided without '_)
(p.span.shrink_to_hi(),format!("{name} "))
} else { // Underscore (elided with '_)
- (p.span, format!("{name}"))
+ (p.span, name.to_string())
}
)
.collect::<Vec<_>>()
@@ -387,12 +383,7 @@ pub fn suggest_new_region_bound(
if let LifetimeName::ImplicitObjectLifetimeDefault = lt.res {
err.span_suggestion_verbose(
fn_return.span.shrink_to_hi(),
- format!(
- "{declare} the trait object {captures}, {explicit}",
- declare = declare,
- captures = captures,
- explicit = explicit,
- ),
+ format!("{declare} the trait object {captures}, {explicit}",),
&plus_lt,
Applicability::MaybeIncorrect,
);
@@ -404,7 +395,7 @@ pub fn suggest_new_region_bound(
if let Some(explicit_static) = &explicit_static {
err.span_suggestion_verbose(
lt.ident.span,
- format!("{} the trait object's {}", consider, explicit_static),
+ format!("{consider} the trait object's {explicit_static}"),
&lifetime_name,
Applicability::MaybeIncorrect,
);
@@ -493,7 +484,7 @@ impl<'a, 'tcx> NiceRegionError<'a, 'tcx> {
tcx,
ctxt.param_env,
ctxt.assoc_item.def_id,
- self.cx.resolve_vars_if_possible(ctxt.substs),
+ self.cx.resolve_vars_if_possible(ctxt.args),
) else {
return false;
};
@@ -503,7 +494,9 @@ impl<'a, 'tcx> NiceRegionError<'a, 'tcx> {
// Get the `Ident` of the method being called and the corresponding `impl` (to point at
// `Bar` in `impl Foo for dyn Bar {}` and the definition of the method being called).
- let Some((ident, self_ty)) = NiceRegionError::get_impl_ident_and_self_ty_from_trait(tcx, instance.def_id(), &v.0) else {
+ let Some((ident, self_ty)) =
+ NiceRegionError::get_impl_ident_and_self_ty_from_trait(tcx, instance.def_id(), &v.0)
+ else {
return false;
};
diff --git a/compiler/rustc_infer/src/infer/error_reporting/nice_region_error/util.rs b/compiler/rustc_infer/src/infer/error_reporting/nice_region_error/util.rs
index c5ef48fe3..be6d1a375 100644
--- a/compiler/rustc_infer/src/infer/error_reporting/nice_region_error/util.rs
+++ b/compiler/rustc_infer/src/infer/error_reporting/nice_region_error/util.rs
@@ -64,8 +64,8 @@ pub fn find_param_with_region<'tcx>(
let body_id = hir.maybe_body_owned_by(def_id)?;
let owner_id = hir.body_owner(body_id);
- let fn_decl = hir.fn_decl_by_hir_id(owner_id).unwrap();
- let poly_fn_sig = tcx.fn_sig(id).subst_identity();
+ let fn_decl = hir.fn_decl_by_hir_id(owner_id)?;
+ let poly_fn_sig = tcx.fn_sig(id).instantiate_identity();
let fn_sig = tcx.liberate_late_bound_regions(id, poly_fn_sig);
let body = hir.body(body_id);
@@ -123,7 +123,7 @@ impl<'a, 'tcx> NiceRegionError<'a, 'tcx> {
br: ty::BoundRegionKind,
hir_sig: &hir::FnSig<'_>,
) -> Option<Span> {
- let fn_ty = self.tcx().type_of(scope_def_id).subst_identity();
+ let fn_ty = self.tcx().type_of(scope_def_id).instantiate_identity();
if let ty::FnDef(_, _) = fn_ty.kind() {
let ret_ty = fn_ty.fn_sig(self.tcx()).output();
let span = hir_sig.decl.output.span();
diff --git a/compiler/rustc_infer/src/infer/error_reporting/note.rs b/compiler/rustc_infer/src/infer/error_reporting/note.rs
index e55e9e75f..8d3cd23b7 100644
--- a/compiler/rustc_infer/src/infer/error_reporting/note.rs
+++ b/compiler/rustc_infer/src/infer/error_reporting/note.rs
@@ -227,7 +227,7 @@ impl<'tcx> TypeErrCtxt<'_, 'tcx> {
span,
impl_item_def_id,
trait_item_def_id,
- &format!("`{}: {}`", sup, sub),
+ &format!("`{sup}: {sub}`"),
);
// We should only suggest rewriting the `where` clause if the predicate is within that `where` clause
if let Some(generics) = self.tcx.hir().get_generics(impl_item_def_id)
@@ -243,12 +243,18 @@ impl<'tcx> TypeErrCtxt<'_, 'tcx> {
}
infer::CheckAssociatedTypeBounds { impl_item_def_id, trait_item_def_id, parent } => {
let mut err = self.report_concrete_failure(*parent, sub, sup);
- let trait_item_span = self.tcx.def_span(trait_item_def_id);
- let item_name = self.tcx.item_name(impl_item_def_id.to_def_id());
- err.span_label(
- trait_item_span,
- format!("definition of `{}` from trait", item_name),
- );
+
+ // Don't mention the item name if it's an RPITIT, since that'll just confuse
+ // folks.
+ if !self.tcx.is_impl_trait_in_trait(impl_item_def_id.to_def_id()) {
+ let trait_item_span = self.tcx.def_span(trait_item_def_id);
+ let item_name = self.tcx.item_name(impl_item_def_id.to_def_id());
+ err.span_label(
+ trait_item_span,
+ format!("definition of `{item_name}` from trait"),
+ );
+ }
+
self.suggest_copy_trait_method_bounds(
trait_item_def_id,
impl_item_def_id,
@@ -295,34 +301,40 @@ impl<'tcx> TypeErrCtxt<'_, 'tcx> {
// but right now it's not really very smart when it comes to implicit `Sized`
// predicates and bounds on the trait itself.
- let Some(impl_def_id) =
- self.tcx.associated_item(impl_item_def_id).impl_container(self.tcx) else { return; };
- let Some(trait_ref) = self
- .tcx
- .impl_trait_ref(impl_def_id)
- else { return; };
- let trait_substs = trait_ref
- .subst_identity()
+ let Some(impl_def_id) = self.tcx.associated_item(impl_item_def_id).impl_container(self.tcx)
+ else {
+ return;
+ };
+ let Some(trait_ref) = self.tcx.impl_trait_ref(impl_def_id) else {
+ return;
+ };
+ let trait_args = trait_ref
+ .instantiate_identity()
// Replace the explicit self type with `Self` for better suggestion rendering
.with_self_ty(self.tcx, Ty::new_param(self.tcx, 0, kw::SelfUpper))
- .substs;
- let trait_item_substs = ty::InternalSubsts::identity_for_item(self.tcx, impl_item_def_id)
- .rebase_onto(self.tcx, impl_def_id, trait_substs);
+ .args;
+ let trait_item_args = ty::GenericArgs::identity_for_item(self.tcx, impl_item_def_id)
+ .rebase_onto(self.tcx, impl_def_id, trait_args);
- let Ok(trait_predicates) = self
- .tcx
- .explicit_predicates_of(trait_item_def_id)
- .instantiate_own(self.tcx, trait_item_substs)
- .map(|(pred, _)| {
- if pred.is_suggestable(self.tcx, false) {
- Ok(pred.to_string())
- } else {
- Err(())
- }
- })
- .collect::<Result<Vec<_>, ()>>() else { return; };
+ let Ok(trait_predicates) =
+ self.tcx
+ .explicit_predicates_of(trait_item_def_id)
+ .instantiate_own(self.tcx, trait_item_args)
+ .map(|(pred, _)| {
+ if pred.is_suggestable(self.tcx, false) {
+ Ok(pred.to_string())
+ } else {
+ Err(())
+ }
+ })
+ .collect::<Result<Vec<_>, ()>>()
+ else {
+ return;
+ };
- let Some(generics) = self.tcx.hir().get_generics(impl_item_def_id) else { return; };
+ let Some(generics) = self.tcx.hir().get_generics(impl_item_def_id) else {
+ return;
+ };
let suggestion = if trait_predicates.is_empty() {
WhereClauseSuggestions::Remove { span: generics.where_clause_span }
diff --git a/compiler/rustc_infer/src/infer/error_reporting/note_and_explain.rs b/compiler/rustc_infer/src/infer/error_reporting/note_and_explain.rs
index 63613b590..372539d73 100644
--- a/compiler/rustc_infer/src/infer/error_reporting/note_and_explain.rs
+++ b/compiler/rustc_infer/src/infer/error_reporting/note_and_explain.rs
@@ -47,7 +47,7 @@ impl<'tcx> TypeErrCtxt<'_, 'tcx> {
diag.span_suggestion(
sp,
"use a float literal",
- format!("{}.0", snippet),
+ format!("{snippet}.0"),
MachineApplicable,
);
}
@@ -100,9 +100,9 @@ impl<'tcx> TypeErrCtxt<'_, 'tcx> {
{
// Synthesize the associated type restriction `Add<Output = Expected>`.
// FIXME: extract this logic for use in other diagnostics.
- let (trait_ref, assoc_substs) = proj.trait_ref_and_own_substs(tcx);
+ let (trait_ref, assoc_args) = proj.trait_ref_and_own_args(tcx);
let item_name = tcx.item_name(proj.def_id);
- let item_args = self.format_generic_args(assoc_substs);
+ let item_args = self.format_generic_args(assoc_args);
// Here, we try to see if there's an existing
// trait implementation that matches the one that
@@ -134,7 +134,7 @@ impl<'tcx> TypeErrCtxt<'_, 'tcx> {
if matched_end_of_args {
// Append suggestion to the end of our args
- let path = format!(", {}{} = {}",item_name, item_args, p);
+ let path = format!(", {item_name}{item_args} = {p}");
note = !suggest_constraining_type_param(
tcx,
generics,
@@ -148,7 +148,7 @@ impl<'tcx> TypeErrCtxt<'_, 'tcx> {
// Suggest adding a bound to an existing trait
// or if the trait doesn't exist, add the trait
// and the suggested bounds.
- let path = format!("<{}{} = {}>", item_name, item_args, p);
+ let path = format!("<{item_name}{item_args} = {p}>");
note = !suggest_constraining_type_param(
tcx,
generics,
@@ -213,8 +213,7 @@ impl<T> Trait<T> for X {
}
diag.help(format!(
"every closure has a distinct type and so could not always match the \
- caller-chosen type of parameter `{}`",
- p
+ caller-chosen type of parameter `{p}`"
));
}
(ty::Param(p), _) | (_, ty::Param(p)) => {
@@ -316,7 +315,7 @@ impl<T> Trait<T> for X {
) -> bool {
let tcx = self.tcx;
let assoc = tcx.associated_item(proj_ty.def_id);
- let (trait_ref, assoc_substs) = proj_ty.trait_ref_and_own_substs(tcx);
+ let (trait_ref, assoc_args) = proj_ty.trait_ref_and_own_args(tcx);
if let Some(item) = tcx.hir().get_if_local(body_owner_def_id) {
if let Some(hir_generics) = item.generics() {
// Get the `DefId` for the type parameter corresponding to `A` in `<A as T>::Foo`.
@@ -339,7 +338,7 @@ impl<T> Trait<T> for X {
&trait_ref,
pred.bounds,
assoc,
- assoc_substs,
+ assoc_args,
ty,
&msg,
false,
@@ -488,14 +487,14 @@ fn foo(&self) -> Self::T { String::new() }
return false;
};
- let (trait_ref, assoc_substs) = proj_ty.trait_ref_and_own_substs(tcx);
+ let (trait_ref, assoc_args) = proj_ty.trait_ref_and_own_args(tcx);
self.constrain_generic_bound_associated_type_structured_suggestion(
diag,
&trait_ref,
opaque_hir_ty.bounds,
assoc,
- assoc_substs,
+ assoc_args,
ty,
msg,
true,
@@ -527,7 +526,7 @@ fn foo(&self) -> Self::T { String::new() }
&& !tcx.is_doc_hidden(item.def_id)
})
.filter_map(|item| {
- let method = tcx.fn_sig(item.def_id).subst_identity();
+ let method = tcx.fn_sig(item.def_id).instantiate_identity();
match *method.output().skip_binder().kind() {
ty::Alias(ty::Projection, ty::AliasTy { def_id: item_def_id, .. })
if item_def_id == proj_ty_item_def_id =>
@@ -597,7 +596,7 @@ fn foo(&self) -> Self::T { String::new() }
if let hir::Defaultness::Default { has_value: true } =
tcx.defaultness(item.id.owner_id)
{
- let assoc_ty = tcx.type_of(item.id.owner_id).subst_identity();
+ let assoc_ty = tcx.type_of(item.id.owner_id).instantiate_identity();
if self.infcx.can_eq(param_env, assoc_ty, found) {
diag.span_label(
item.span,
@@ -618,7 +617,7 @@ fn foo(&self) -> Self::T { String::new() }
})) => {
for item in &items[..] {
if let hir::AssocItemKind::Type = item.kind {
- let assoc_ty = tcx.type_of(item.id.owner_id).subst_identity();
+ let assoc_ty = tcx.type_of(item.id.owner_id).instantiate_identity();
if self.infcx.can_eq(param_env, assoc_ty, found) {
diag.span_label(item.span, "expected this associated type");
@@ -645,7 +644,7 @@ fn foo(&self) -> Self::T { String::new() }
trait_ref: &ty::TraitRef<'tcx>,
bounds: hir::GenericBounds<'_>,
assoc: ty::AssocItem,
- assoc_substs: &[ty::GenericArg<'tcx>],
+ assoc_args: &[ty::GenericArg<'tcx>],
ty: Ty<'tcx>,
msg: impl Fn() -> String,
is_bound_surely_present: bool,
@@ -671,14 +670,7 @@ fn foo(&self) -> Self::T { String::new() }
_ => return false,
};
- self.constrain_associated_type_structured_suggestion(
- diag,
- span,
- assoc,
- assoc_substs,
- ty,
- msg,
- )
+ self.constrain_associated_type_structured_suggestion(diag, span, assoc, assoc_args, ty, msg)
}
/// Given a span corresponding to a bound, provide a structured suggestion to set an
@@ -688,7 +680,7 @@ fn foo(&self) -> Self::T { String::new() }
diag: &mut Diagnostic,
span: Span,
assoc: ty::AssocItem,
- assoc_substs: &[ty::GenericArg<'tcx>],
+ assoc_args: &[ty::GenericArg<'tcx>],
ty: Ty<'tcx>,
msg: impl Fn() -> String,
) -> bool {
@@ -702,7 +694,7 @@ fn foo(&self) -> Self::T { String::new() }
let span = Span::new(pos, pos, span.ctxt(), span.parent());
(span, format!(", {} = {}", assoc.ident(tcx), ty))
} else {
- let item_args = self.format_generic_args(assoc_substs);
+ let item_args = self.format_generic_args(assoc_args);
(span.shrink_to_hi(), format!("<{}{} = {}>", assoc.ident(tcx), item_args, ty))
};
diag.span_suggestion_verbose(span, msg(), sugg, MaybeIncorrect);
diff --git a/compiler/rustc_infer/src/infer/error_reporting/suggest.rs b/compiler/rustc_infer/src/infer/error_reporting/suggest.rs
index 1422bdc9e..f1d53cb59 100644
--- a/compiler/rustc_infer/src/infer/error_reporting/suggest.rs
+++ b/compiler/rustc_infer/src/infer/error_reporting/suggest.rs
@@ -105,7 +105,7 @@ impl<'tcx> TypeErrCtxt<'_, 'tcx> {
// Heavily inspired by `FnCtxt::suggest_compatible_variants`, with
// some modifications due to that being in typeck and this being in infer.
if let ObligationCauseCode::Pattern { .. } = cause.code() {
- if let ty::Adt(expected_adt, substs) = exp_found.expected.kind() {
+ if let ty::Adt(expected_adt, args) = exp_found.expected.kind() {
let compatible_variants: Vec<_> = expected_adt
.variants()
.iter()
@@ -114,7 +114,7 @@ impl<'tcx> TypeErrCtxt<'_, 'tcx> {
})
.filter_map(|variant| {
let sole_field = &variant.single_field();
- let sole_field_ty = sole_field.ty(self.tcx, substs);
+ let sole_field_ty = sole_field.ty(self.tcx, args);
if self.same_type_modulo_infer(sole_field_ty, exp_found.found) {
let variant_path =
with_no_trimmed_paths!(self.tcx.def_path_str(variant.def_id));
@@ -260,7 +260,7 @@ impl<'tcx> TypeErrCtxt<'_, 'tcx> {
"suggest_accessing_field_where_appropriate(cause={:?}, exp_found={:?})",
cause, exp_found
);
- if let ty::Adt(expected_def, expected_substs) = exp_found.expected.kind() {
+ if let ty::Adt(expected_def, expected_args) = exp_found.expected.kind() {
if expected_def.is_enum() {
return;
}
@@ -270,7 +270,7 @@ impl<'tcx> TypeErrCtxt<'_, 'tcx> {
.fields
.iter()
.filter(|field| field.vis.is_accessible_from(field.did, self.tcx))
- .map(|field| (field.name, field.ty(self.tcx, expected_substs)))
+ .map(|field| (field.name, field.ty(self.tcx, expected_args)))
.find(|(_, ty)| self.same_type_modulo_infer(*ty, exp_found.found))
{
if let ObligationCauseCode::Pattern { span: Some(span), .. } = *cause.code() {
@@ -304,12 +304,12 @@ impl<'tcx> TypeErrCtxt<'_, 'tcx> {
return;
}
match (&expected_inner.kind(), &found_inner.kind()) {
- (ty::FnPtr(sig), ty::FnDef(did, substs)) => {
+ (ty::FnPtr(sig), ty::FnDef(did, args)) => {
let expected_sig = &(self.normalize_fn_sig)(*sig);
let found_sig =
- &(self.normalize_fn_sig)(self.tcx.fn_sig(*did).subst(self.tcx, substs));
+ &(self.normalize_fn_sig)(self.tcx.fn_sig(*did).instantiate(self.tcx, args));
- let fn_name = self.tcx.def_path_str_with_substs(*did, substs);
+ let fn_name = self.tcx.def_path_str_with_args(*did, args);
if !self.same_type_modulo_infer(*found_sig, *expected_sig)
|| !sig.is_suggestable(self.tcx, true)
@@ -332,11 +332,11 @@ impl<'tcx> TypeErrCtxt<'_, 'tcx> {
};
diag.subdiagnostic(sugg);
}
- (ty::FnDef(did1, substs1), ty::FnDef(did2, substs2)) => {
+ (ty::FnDef(did1, args1), ty::FnDef(did2, args2)) => {
let expected_sig =
- &(self.normalize_fn_sig)(self.tcx.fn_sig(*did1).subst(self.tcx, substs1));
+ &(self.normalize_fn_sig)(self.tcx.fn_sig(*did1).instantiate(self.tcx, args1));
let found_sig =
- &(self.normalize_fn_sig)(self.tcx.fn_sig(*did2).subst(self.tcx, substs2));
+ &(self.normalize_fn_sig)(self.tcx.fn_sig(*did2).instantiate(self.tcx, args2));
if self.same_type_modulo_infer(*expected_sig, *found_sig) {
diag.subdiagnostic(FnUniqTypes);
@@ -351,7 +351,7 @@ impl<'tcx> TypeErrCtxt<'_, 'tcx> {
return;
}
- let fn_name = self.tcx.def_path_str_with_substs(*did2, substs2);
+ let fn_name = self.tcx.def_path_str_with_args(*did2, args2);
let sug = if found.is_ref() {
FunctionPointerSuggestion::CastBothRef {
span,
@@ -370,16 +370,16 @@ impl<'tcx> TypeErrCtxt<'_, 'tcx> {
diag.subdiagnostic(sug);
}
- (ty::FnDef(did, substs), ty::FnPtr(sig)) => {
+ (ty::FnDef(did, args), ty::FnPtr(sig)) => {
let expected_sig =
- &(self.normalize_fn_sig)(self.tcx.fn_sig(*did).subst(self.tcx, substs));
+ &(self.normalize_fn_sig)(self.tcx.fn_sig(*did).instantiate(self.tcx, args));
let found_sig = &(self.normalize_fn_sig)(*sig);
if !self.same_type_modulo_infer(*found_sig, *expected_sig) {
return;
}
- let fn_name = self.tcx.def_path_str_with_substs(*did, substs);
+ let fn_name = self.tcx.def_path_str_with_args(*did, args);
let casting = if expected.is_ref() {
format!("&({fn_name} as {found_sig})")
@@ -400,10 +400,10 @@ impl<'tcx> TypeErrCtxt<'_, 'tcx> {
expected: Ty<'tcx>,
found: Ty<'tcx>,
) -> Option<SuggestAsRefKind> {
- if let (ty::Adt(exp_def, exp_substs), ty::Ref(_, found_ty, _)) =
+ if let (ty::Adt(exp_def, exp_args), ty::Ref(_, found_ty, _)) =
(expected.kind(), found.kind())
{
- if let ty::Adt(found_def, found_substs) = *found_ty.kind() {
+ if let ty::Adt(found_def, found_args) = *found_ty.kind() {
if exp_def == &found_def {
let have_as_ref = &[
(sym::Option, SuggestAsRefKind::Option),
@@ -414,7 +414,7 @@ impl<'tcx> TypeErrCtxt<'_, 'tcx> {
}) {
let mut show_suggestion = true;
for (exp_ty, found_ty) in
- std::iter::zip(exp_substs.types(), found_substs.types())
+ std::iter::zip(exp_args.types(), found_args.types())
{
match *exp_ty.kind() {
ty::Ref(_, exp_ty, _) => {
@@ -464,52 +464,53 @@ impl<'tcx> TypeErrCtxt<'_, 'tcx> {
span: Span,
) -> Option<TypeErrorAdditionalDiags> {
let hir = self.tcx.hir();
- if let Some(node) = self.tcx.hir().find_by_def_id(cause.body_id) &&
- let hir::Node::Item(hir::Item {
- kind: hir::ItemKind::Fn(_sig, _, body_id), ..
- }) = node {
- let body = hir.body(*body_id);
-
- /// Find the if expression with given span
- struct IfVisitor {
- pub result: bool,
- pub found_if: bool,
- pub err_span: Span,
- }
+ if let Some(body_id) = self.tcx.hir().maybe_body_owned_by(cause.body_id) {
+ let body = hir.body(body_id);
+
+ /// Find the if expression with given span
+ struct IfVisitor {
+ pub result: bool,
+ pub found_if: bool,
+ pub err_span: Span,
+ }
- impl<'v> Visitor<'v> for IfVisitor {
- fn visit_expr(&mut self, ex: &'v hir::Expr<'v>) {
- if self.result { return; }
- match ex.kind {
- hir::ExprKind::If(cond, _, _) => {
- self.found_if = true;
- walk_expr(self, cond);
- self.found_if = false;
+ impl<'v> Visitor<'v> for IfVisitor {
+ fn visit_expr(&mut self, ex: &'v hir::Expr<'v>) {
+ if self.result {
+ return;
+ }
+ match ex.kind {
+ hir::ExprKind::If(cond, _, _) => {
+ self.found_if = true;
+ walk_expr(self, cond);
+ self.found_if = false;
+ }
+ _ => walk_expr(self, ex),
}
- _ => walk_expr(self, ex),
}
- }
- fn visit_stmt(&mut self, ex: &'v hir::Stmt<'v>) {
- if let hir::StmtKind::Local(hir::Local {
- span, pat: hir::Pat{..}, ty: None, init: Some(_), ..
- }) = &ex.kind
- && self.found_if
- && span.eq(&self.err_span) {
- self.result = true;
+ fn visit_stmt(&mut self, ex: &'v hir::Stmt<'v>) {
+ if let hir::StmtKind::Local(hir::Local {
+ span, pat: hir::Pat{..}, ty: None, init: Some(_), ..
+ }) = &ex.kind
+ && self.found_if
+ && span.eq(&self.err_span) {
+ self.result = true;
+ }
+ walk_stmt(self, ex);
}
- walk_stmt(self, ex);
- }
- fn visit_body(&mut self, body: &'v hir::Body<'v>) {
- hir::intravisit::walk_body(self, body);
+ fn visit_body(&mut self, body: &'v hir::Body<'v>) {
+ hir::intravisit::walk_body(self, body);
+ }
}
- }
- let mut visitor = IfVisitor { err_span: span, found_if: false, result: false };
- visitor.visit_body(&body);
- if visitor.result {
- return Some(TypeErrorAdditionalDiags::AddLetForLetChains{span: span.shrink_to_lo()});
+ let mut visitor = IfVisitor { err_span: span, found_if: false, result: false };
+ visitor.visit_body(&body);
+ if visitor.result {
+ return Some(TypeErrorAdditionalDiags::AddLetForLetChains {
+ span: span.shrink_to_lo(),
+ });
}
}
None
@@ -525,13 +526,23 @@ impl<'tcx> TypeErrCtxt<'_, 'tcx> {
diag: &mut Diagnostic,
) {
// 0. Extract fn_decl from hir
- let hir::Node::Expr(hir::Expr { kind: hir::ExprKind::Closure(hir::Closure { body, fn_decl, .. }), .. }) = hir else { return; };
+ let hir::Node::Expr(hir::Expr {
+ kind: hir::ExprKind::Closure(hir::Closure { body, fn_decl, .. }),
+ ..
+ }) = hir
+ else {
+ return;
+ };
let hir::Body { params, .. } = self.tcx.hir().body(*body);
- // 1. Get the substs of the closure.
+ // 1. Get the args of the closure.
// 2. Assume exp_found is FnOnce / FnMut / Fn, we can extract function parameters from [1].
- let Some(expected) = exp_found.expected.skip_binder().substs.get(1) else { return; };
- let Some(found) = exp_found.found.skip_binder().substs.get(1) else { return; };
+ let Some(expected) = exp_found.expected.skip_binder().args.get(1) else {
+ return;
+ };
+ let Some(found) = exp_found.found.skip_binder().args.get(1) else {
+ return;
+ };
let expected = expected.unpack();
let found = found.unpack();
// 3. Extract the tuple type from Fn trait and suggest the change.
@@ -564,12 +575,12 @@ impl<'tcx> TypeErrCtxt<'_, 'tcx> {
if param_hir.pat.span == param_hir.ty_span {
// for `|x|`, `|_|`, `|x: impl Foo|`
let Ok(pat) = self.tcx.sess.source_map().span_to_snippet(param_hir.pat.span) else { return; };
- suggestion += &format!("{}: &_", pat);
+ suggestion += &format!("{pat}: &_");
} else {
// for `|x: ty|`, `|_: ty|`
let Ok(pat) = self.tcx.sess.source_map().span_to_snippet(param_hir.pat.span) else { return; };
let Ok(ty) = self.tcx.sess.source_map().span_to_snippet(param_hir.ty_span) else { return; };
- suggestion += &format!("{}: &{}", pat, ty);
+ suggestion += &format!("{pat}: &{ty}");
}
has_suggestion = true;
} else {
@@ -620,8 +631,8 @@ impl<'tcx> TypeErrCtxt<'_, 'tcx> {
ty::Alias(ty::Opaque, ty::AliasTy { def_id: exp_def_id, .. }),
) if last_def_id == exp_def_id => StatementAsExpression::CorrectType,
(
- ty::Alias(ty::Opaque, ty::AliasTy { def_id: last_def_id, substs: last_bounds, .. }),
- ty::Alias(ty::Opaque, ty::AliasTy { def_id: exp_def_id, substs: exp_bounds, .. }),
+ ty::Alias(ty::Opaque, ty::AliasTy { def_id: last_def_id, args: last_bounds, .. }),
+ ty::Alias(ty::Opaque, ty::AliasTy { def_id: exp_def_id, args: exp_bounds, .. }),
) => {
debug!(
"both opaque, likely future {:?} {:?} {:?} {:?}",
@@ -710,7 +721,9 @@ impl<'tcx> TypeErrCtxt<'_, 'tcx> {
let hir = self.tcx.hir();
for stmt in blk.stmts.iter().rev() {
- let hir::StmtKind::Local(local) = &stmt.kind else { continue; };
+ let hir::StmtKind::Local(local) = &stmt.kind else {
+ continue;
+ };
local.pat.walk(&mut find_compatible_candidates);
}
match hir.find_parent(blk.hir_id) {
diff --git a/compiler/rustc_infer/src/infer/freshen.rs b/compiler/rustc_infer/src/infer/freshen.rs
index 05769b790..689945d64 100644
--- a/compiler/rustc_infer/src/infer/freshen.rs
+++ b/compiler/rustc_infer/src/infer/freshen.rs
@@ -11,7 +11,7 @@
//!
//! To handle closures, freshened types also have to contain the signature and kind of any
//! closure in the local inference context, as otherwise the cache key might be invalidated.
-//! The way this is done is somewhat hacky - the closure signature is appended to the substs,
+//! The way this is done is somewhat hacky - the closure signature is appended to the args,
//! as well as the closure kind "encoded" as a type. Also, special handling is needed when
//! the closure signature contains a reference to the original closure.
//!
diff --git a/compiler/rustc_infer/src/infer/generalize.rs b/compiler/rustc_infer/src/infer/generalize.rs
index 780250167..cf674d5dd 100644
--- a/compiler/rustc_infer/src/infer/generalize.rs
+++ b/compiler/rustc_infer/src/infer/generalize.rs
@@ -173,21 +173,21 @@ where
true
}
- fn relate_item_substs(
+ fn relate_item_args(
&mut self,
item_def_id: DefId,
- a_subst: ty::SubstsRef<'tcx>,
- b_subst: ty::SubstsRef<'tcx>,
- ) -> RelateResult<'tcx, ty::SubstsRef<'tcx>> {
+ a_subst: ty::GenericArgsRef<'tcx>,
+ b_subst: ty::GenericArgsRef<'tcx>,
+ ) -> RelateResult<'tcx, ty::GenericArgsRef<'tcx>> {
if self.ambient_variance == ty::Variance::Invariant {
// Avoid fetching the variance if we are in an invariant
// context; no need, and it can induce dependency cycles
// (e.g., #41849).
- relate::relate_substs(self, a_subst, b_subst)
+ relate::relate_args(self, a_subst, b_subst)
} else {
let tcx = self.tcx();
let opt_variances = tcx.variances_of(item_def_id);
- relate::relate_substs_with_variances(
+ relate::relate_args_with_variances(
self,
item_def_id,
opt_variances,
@@ -405,16 +405,16 @@ where
}
// FIXME: remove this branch once `structurally_relate_consts` is fully
// structural.
- ty::ConstKind::Unevaluated(ty::UnevaluatedConst { def, substs }) => {
- let substs = self.relate_with_variance(
+ ty::ConstKind::Unevaluated(ty::UnevaluatedConst { def, args }) => {
+ let args = self.relate_with_variance(
ty::Variance::Invariant,
ty::VarianceDiagInfo::default(),
- substs,
- substs,
+ args,
+ args,
)?;
Ok(ty::Const::new_unevaluated(
self.tcx(),
- ty::UnevaluatedConst { def, substs },
+ ty::UnevaluatedConst { def, args },
c.ty(),
))
}
diff --git a/compiler/rustc_infer/src/infer/lexical_region_resolve/mod.rs b/compiler/rustc_infer/src/infer/lexical_region_resolve/mod.rs
index 485e34fe2..60d9d6578 100644
--- a/compiler/rustc_infer/src/infer/lexical_region_resolve/mod.rs
+++ b/compiler/rustc_infer/src/infer/lexical_region_resolve/mod.rs
@@ -837,9 +837,8 @@ impl<'cx, 'tcx> LexicalResolver<'cx, 'tcx> {
self.var_infos[node_idx].origin.span(),
format!(
"collect_error_for_expanding_node() could not find \
- error for var {:?} in universe {:?}, lower_bounds={:#?}, \
- upper_bounds={:#?}",
- node_idx, node_universe, lower_bounds, upper_bounds
+ error for var {node_idx:?} in universe {node_universe:?}, lower_bounds={lower_bounds:#?}, \
+ upper_bounds={upper_bounds:#?}"
),
);
}
@@ -943,6 +942,10 @@ impl<'cx, 'tcx> LexicalResolver<'cx, 'tcx> {
generic_ty: Ty<'tcx>,
min: ty::Region<'tcx>,
) -> bool {
+ if let ty::ReError(_) = *min {
+ return true;
+ }
+
match bound {
VerifyBound::IfEq(verify_if_eq_b) => {
let verify_if_eq_b = var_values.normalize(self.region_rels.tcx, *verify_if_eq_b);
diff --git a/compiler/rustc_infer/src/infer/mod.rs b/compiler/rustc_infer/src/infer/mod.rs
index fca32b73d..aaabf1482 100644
--- a/compiler/rustc_infer/src/infer/mod.rs
+++ b/compiler/rustc_infer/src/infer/mod.rs
@@ -30,11 +30,11 @@ use rustc_middle::ty::error::{ExpectedFound, TypeError};
use rustc_middle::ty::fold::BoundVarReplacerDelegate;
use rustc_middle::ty::fold::{TypeFoldable, TypeFolder, TypeSuperFoldable};
use rustc_middle::ty::relate::RelateResult;
-use rustc_middle::ty::subst::{GenericArg, GenericArgKind, InternalSubsts, SubstsRef};
use rustc_middle::ty::visit::{TypeVisitable, TypeVisitableExt};
pub use rustc_middle::ty::IntVarValue;
use rustc_middle::ty::{self, GenericParamDefKind, InferConst, InferTy, Ty, TyCtxt};
use rustc_middle::ty::{ConstVid, FloatVid, IntVid, TyVid};
+use rustc_middle::ty::{GenericArg, GenericArgKind, GenericArgs, GenericArgsRef};
use rustc_span::symbol::Symbol;
use rustc_span::Span;
@@ -332,6 +332,39 @@ pub struct InferCtxt<'tcx> {
next_trait_solver: bool,
}
+impl<'tcx> ty::InferCtxtLike<TyCtxt<'tcx>> for InferCtxt<'tcx> {
+ fn universe_of_ty(&self, ty: ty::InferTy) -> Option<ty::UniverseIndex> {
+ use InferTy::*;
+ match ty {
+ // FIXME(BoxyUwU): this is kind of jank and means that printing unresolved
+ // ty infers will give you the universe of the var it resolved to not the universe
+ // it actually had. It also means that if you have a `?0.1` and infer it to `u8` then
+ // try to print out `?0.1` it will just print `?0`.
+ TyVar(ty_vid) => match self.probe_ty_var(ty_vid) {
+ Err(universe) => Some(universe),
+ Ok(_) => None,
+ },
+ IntVar(_) | FloatVar(_) | FreshTy(_) | FreshIntTy(_) | FreshFloatTy(_) => None,
+ }
+ }
+
+ fn universe_of_ct(&self, ct: ty::InferConst<'tcx>) -> Option<ty::UniverseIndex> {
+ use ty::InferConst::*;
+ match ct {
+ // Same issue as with `universe_of_ty`
+ Var(ct_vid) => match self.probe_const_var(ct_vid) {
+ Err(universe) => Some(universe),
+ Ok(_) => None,
+ },
+ Fresh(_) => None,
+ }
+ }
+
+ fn universe_of_lt(&self, lt: ty::RegionVid) -> Option<ty::UniverseIndex> {
+ Some(self.universe_of_region_vid(lt))
+ }
+}
+
/// See the `error_reporting` module for more details.
#[derive(Clone, Copy, Debug, PartialEq, Eq, TypeFoldable, TypeVisitable)]
pub enum ValuePairs<'tcx> {
@@ -341,6 +374,8 @@ pub enum ValuePairs<'tcx> {
TraitRefs(ExpectedFound<ty::TraitRef<'tcx>>),
PolyTraitRefs(ExpectedFound<ty::PolyTraitRef<'tcx>>),
Sigs(ExpectedFound<ty::FnSig<'tcx>>),
+ ExistentialTraitRef(ExpectedFound<ty::PolyExistentialTraitRef<'tcx>>),
+ ExistentialProjection(ExpectedFound<ty::PolyExistentialProjection<'tcx>>),
}
impl<'tcx> ValuePairs<'tcx> {
@@ -1068,6 +1103,11 @@ impl<'tcx> InferCtxt<'tcx> {
self.inner.borrow_mut().unwrap_region_constraints().universe(r)
}
+ /// Return the universe that the region variable `r` was created in.
+ pub fn universe_of_region_vid(&self, vid: ty::RegionVid) -> ty::UniverseIndex {
+ self.inner.borrow_mut().unwrap_region_constraints().var_universe(vid)
+ }
+
/// Number of region variables created so far.
pub fn num_region_vars(&self) -> usize {
self.inner.borrow_mut().unwrap_region_constraints().num_region_vars()
@@ -1146,8 +1186,8 @@ impl<'tcx> InferCtxt<'tcx> {
/// Given a set of generics defined on a type or impl, returns a substitution mapping each
/// type/region parameter to a fresh inference variable.
- pub fn fresh_substs_for_item(&self, span: Span, def_id: DefId) -> SubstsRef<'tcx> {
- InternalSubsts::for_item(self.tcx, def_id, |param, _| self.var_for_def(span, param))
+ pub fn fresh_args_for_item(&self, span: Span, def_id: DefId) -> GenericArgsRef<'tcx> {
+ GenericArgs::for_item(self.tcx, def_id, |param, _| self.var_for_def(span, param))
}
/// Returns `true` if errors have been reported since this infcx was
@@ -1436,8 +1476,8 @@ impl<'tcx> InferCtxt<'tcx> {
/// Obtains the latest type of the given closure; this may be a
/// closure in the current function, in which case its
/// `ClosureKind` may not yet be known.
- pub fn closure_kind(&self, closure_substs: SubstsRef<'tcx>) -> Option<ty::ClosureKind> {
- let closure_kind_ty = closure_substs.as_closure().kind_ty();
+ pub fn closure_kind(&self, closure_args: GenericArgsRef<'tcx>) -> Option<ty::ClosureKind> {
+ let closure_kind_ty = closure_args.as_closure().kind_ty();
let closure_kind_ty = self.shallow_resolve(closure_kind_ty);
closure_kind_ty.to_opt_closure_kind()
}
@@ -1496,7 +1536,7 @@ impl<'tcx> InferCtxt<'tcx> {
/// too generic for the constant to be evaluated then `Err(ErrorHandled::TooGeneric)` is
/// returned.
///
- /// This handles inferences variables within both `param_env` and `substs` by
+ /// This handles inferences variables within both `param_env` and `args` by
/// performing the operation on their respective canonical forms.
#[instrument(skip(self), level = "debug")]
pub fn const_eval_resolve(
@@ -1505,34 +1545,34 @@ impl<'tcx> InferCtxt<'tcx> {
unevaluated: ty::UnevaluatedConst<'tcx>,
span: Option<Span>,
) -> EvalToValTreeResult<'tcx> {
- let mut substs = self.resolve_vars_if_possible(unevaluated.substs);
- debug!(?substs);
+ let mut args = self.resolve_vars_if_possible(unevaluated.args);
+ debug!(?args);
- // Postpone the evaluation of constants whose substs depend on inference
+ // Postpone the evaluation of constants whose args depend on inference
// variables
let tcx = self.tcx;
- if substs.has_non_region_infer() {
+ if args.has_non_region_infer() {
if let Some(ct) = tcx.thir_abstract_const(unevaluated.def)? {
- let ct = tcx.expand_abstract_consts(ct.subst(tcx, substs));
+ let ct = tcx.expand_abstract_consts(ct.instantiate(tcx, args));
if let Err(e) = ct.error_reported() {
return Err(ErrorHandled::Reported(e.into()));
} else if ct.has_non_region_infer() || ct.has_non_region_param() {
return Err(ErrorHandled::TooGeneric);
} else {
- substs = replace_param_and_infer_substs_with_placeholder(tcx, substs);
+ args = replace_param_and_infer_args_with_placeholder(tcx, args);
}
} else {
- substs = InternalSubsts::identity_for_item(tcx, unevaluated.def);
+ args = GenericArgs::identity_for_item(tcx, unevaluated.def);
param_env = tcx.param_env(unevaluated.def);
}
}
let param_env_erased = tcx.erase_regions(param_env);
- let substs_erased = tcx.erase_regions(substs);
+ let args_erased = tcx.erase_regions(args);
debug!(?param_env_erased);
- debug!(?substs_erased);
+ debug!(?args_erased);
- let unevaluated = ty::UnevaluatedConst { def: unevaluated.def, substs: substs_erased };
+ let unevaluated = ty::UnevaluatedConst { def: unevaluated.def, args: args_erased };
// The return value is the evaluated value which doesn't contain any reference to inference
// variables, thus we don't need to substitute back the original values.
@@ -1921,13 +1961,13 @@ impl RegionVariableOrigin {
}
}
-/// Replaces substs that reference param or infer variables with suitable
+/// Replaces args that reference param or infer variables with suitable
/// placeholders. This function is meant to remove these param and infer
-/// substs when they're not actually needed to evaluate a constant.
-fn replace_param_and_infer_substs_with_placeholder<'tcx>(
+/// args when they're not actually needed to evaluate a constant.
+fn replace_param_and_infer_args_with_placeholder<'tcx>(
tcx: TyCtxt<'tcx>,
- substs: SubstsRef<'tcx>,
-) -> SubstsRef<'tcx> {
+ args: GenericArgsRef<'tcx>,
+) -> GenericArgsRef<'tcx> {
struct ReplaceParamAndInferWithPlaceholder<'tcx> {
tcx: TyCtxt<'tcx>,
idx: u32,
@@ -1985,5 +2025,5 @@ fn replace_param_and_infer_substs_with_placeholder<'tcx>(
}
}
- substs.fold_with(&mut ReplaceParamAndInferWithPlaceholder { tcx, idx: 0 })
+ args.fold_with(&mut ReplaceParamAndInferWithPlaceholder { tcx, idx: 0 })
}
diff --git a/compiler/rustc_infer/src/infer/nll_relate/mod.rs b/compiler/rustc_infer/src/infer/nll_relate/mod.rs
index 71c07f31b..c80491643 100644
--- a/compiler/rustc_infer/src/infer/nll_relate/mod.rs
+++ b/compiler/rustc_infer/src/infer/nll_relate/mod.rs
@@ -557,7 +557,7 @@ where
// Forbid inference variables in the RHS.
self.infcx.tcx.sess.delay_span_bug(
self.delegate.span(),
- format!("unexpected inference var {:?}", b,),
+ format!("unexpected inference var {b:?}",),
);
Ok(a)
}
diff --git a/compiler/rustc_infer/src/infer/opaque_types.rs b/compiler/rustc_infer/src/infer/opaque_types.rs
index 5927f79a1..1c3a5c360 100644
--- a/compiler/rustc_infer/src/infer/opaque_types.rs
+++ b/compiler/rustc_infer/src/infer/opaque_types.rs
@@ -64,7 +64,7 @@ impl<'tcx> InferCtxt<'tcx> {
ct_op: |ct| ct,
ty_op: |ty| match *ty.kind() {
ty::Alias(ty::Opaque, ty::AliasTy { def_id, .. })
- if replace_opaque_type(def_id) =>
+ if replace_opaque_type(def_id) && !ty.has_escaping_bound_vars() =>
{
let def_span = self.tcx.def_span(def_id);
let span = if span.contains(def_span) { def_span } else { span };
@@ -103,7 +103,7 @@ impl<'tcx> InferCtxt<'tcx> {
}
let (a, b) = if a_is_expected { (a, b) } else { (b, a) };
let process = |a: Ty<'tcx>, b: Ty<'tcx>, a_is_expected| match *a.kind() {
- ty::Alias(ty::Opaque, ty::AliasTy { def_id, substs, .. }) if def_id.is_local() => {
+ ty::Alias(ty::Opaque, ty::AliasTy { def_id, args, .. }) if def_id.is_local() => {
let def_id = def_id.expect_local();
match self.defining_use_anchor {
DefiningAnchor::Bind(_) => {
@@ -165,7 +165,7 @@ impl<'tcx> InferCtxt<'tcx> {
}
}
Some(self.register_hidden_type(
- OpaqueTypeKey { def_id, substs },
+ OpaqueTypeKey { def_id, args },
cause.clone(),
param_env,
b,
@@ -214,12 +214,12 @@ impl<'tcx> InferCtxt<'tcx> {
/// fn foo<'a, 'b>(..) -> (Foo1<'a>, Foo2<'b>) { .. }
/// // ^^^^ ^^
/// // | |
- /// // | substs
+ /// // | args
/// // def_id
/// ```
///
/// As indicating in the comments above, each of those references
- /// is (in the compiler) basically a substitution (`substs`)
+ /// is (in the compiler) basically a substitution (`args`)
/// applied to the type of a suitable `def_id` (which identifies
/// `Foo1` or `Foo2`).
///
@@ -278,7 +278,7 @@ impl<'tcx> InferCtxt<'tcx> {
///
/// We generally prefer to make `<=` constraints, since they
/// integrate best into the region solver. To do that, we find the
- /// "minimum" of all the arguments that appear in the substs: that
+ /// "minimum" of all the arguments that appear in the args: that
/// is, some region which is less than all the others. In the case
/// of `Foo1<'a>`, that would be `'a` (it's the only choice, after
/// all). Then we apply that as a least bound to the variables
@@ -350,7 +350,7 @@ impl<'tcx> InferCtxt<'tcx> {
// opaque type definition.
let choice_regions: Lrc<Vec<ty::Region<'tcx>>> = Lrc::new(
opaque_type_key
- .substs
+ .args
.iter()
.enumerate()
.filter(|(i, _)| variances[*i] == ty::Variance::Invariant)
@@ -445,28 +445,32 @@ where
}
match ty.kind() {
- ty::Closure(_, ref substs) => {
+ ty::Closure(_, ref args) => {
// Skip lifetime parameters of the enclosing item(s)
- substs.as_closure().tupled_upvars_ty().visit_with(self);
- substs.as_closure().sig_as_fn_ptr_ty().visit_with(self);
+ for upvar in args.as_closure().upvar_tys() {
+ upvar.visit_with(self);
+ }
+ args.as_closure().sig_as_fn_ptr_ty().visit_with(self);
}
- ty::Generator(_, ref substs, _) => {
+ ty::Generator(_, ref args, _) => {
// Skip lifetime parameters of the enclosing item(s)
// Also skip the witness type, because that has no free regions.
- substs.as_generator().tupled_upvars_ty().visit_with(self);
- substs.as_generator().return_ty().visit_with(self);
- substs.as_generator().yield_ty().visit_with(self);
- substs.as_generator().resume_ty().visit_with(self);
+ for upvar in args.as_generator().upvar_tys() {
+ upvar.visit_with(self);
+ }
+ args.as_generator().return_ty().visit_with(self);
+ args.as_generator().yield_ty().visit_with(self);
+ args.as_generator().resume_ty().visit_with(self);
}
- ty::Alias(ty::Opaque, ty::AliasTy { def_id, ref substs, .. }) => {
+ ty::Alias(ty::Opaque, ty::AliasTy { def_id, ref args, .. }) => {
// Skip lifetime parameters that are not captures.
let variances = self.tcx.variances_of(*def_id);
- for (v, s) in std::iter::zip(variances, substs.iter()) {
+ for (v, s) in std::iter::zip(variances, args.iter()) {
if *v != ty::Variance::Bivariant {
s.visit_with(self);
}
@@ -519,7 +523,7 @@ impl<'tcx> InferCtxt<'tcx> {
self.add_item_bounds_for_hidden_type(
opaque_type_key.def_id.to_def_id(),
- opaque_type_key.substs,
+ opaque_type_key.args,
cause,
param_env,
hidden_ty,
@@ -582,7 +586,7 @@ impl<'tcx> InferCtxt<'tcx> {
pub fn add_item_bounds_for_hidden_type(
&self,
def_id: DefId,
- substs: ty::SubstsRef<'tcx>,
+ args: ty::GenericArgsRef<'tcx>,
cause: ObligationCause<'tcx>,
param_env: ty::ParamEnv<'tcx>,
hidden_ty: Ty<'tcx>,
@@ -591,7 +595,7 @@ impl<'tcx> InferCtxt<'tcx> {
let tcx = self.tcx;
let item_bounds = tcx.explicit_item_bounds(def_id);
- for (predicate, _) in item_bounds.subst_iter_copied(tcx, substs) {
+ for (predicate, _) in item_bounds.iter_instantiated_copied(tcx, args) {
let predicate = predicate.fold_with(&mut BottomUpFolder {
tcx,
ty_op: |ty| match *ty.kind() {
@@ -614,17 +618,11 @@ impl<'tcx> InferCtxt<'tcx> {
}
// Replace all other mentions of the same opaque type with the hidden type,
// as the bounds must hold on the hidden type after all.
- ty::Alias(ty::Opaque, ty::AliasTy { def_id: def_id2, substs: substs2, .. })
- if def_id == def_id2 && substs == substs2 =>
+ ty::Alias(ty::Opaque, ty::AliasTy { def_id: def_id2, args: args2, .. })
+ if def_id == def_id2 && args == args2 =>
{
hidden_ty
}
- // FIXME(RPITIT): This can go away when we move to associated types
- // FIXME(inherent_associated_types): Extend this to support `ty::Inherent`, too.
- ty::Alias(
- ty::Projection,
- ty::AliasTy { def_id: def_id2, substs: substs2, .. },
- ) if def_id == def_id2 && substs == substs2 => hidden_ty,
_ => ty,
},
lt_op: |lt| lt,
diff --git a/compiler/rustc_infer/src/infer/outlives/components.rs b/compiler/rustc_infer/src/infer/outlives/components.rs
index cb63d2f18..2ac9568f6 100644
--- a/compiler/rustc_infer/src/infer/outlives/components.rs
+++ b/compiler/rustc_infer/src/infer/outlives/components.rs
@@ -3,8 +3,8 @@
// RFC for reference.
use rustc_data_structures::sso::SsoHashSet;
-use rustc_middle::ty::subst::{GenericArg, GenericArgKind};
use rustc_middle::ty::{self, Ty, TyCtxt, TypeVisitableExt};
+use rustc_middle::ty::{GenericArg, GenericArgKind};
use smallvec::{smallvec, SmallVec};
#[derive(Debug)]
@@ -71,15 +71,15 @@ fn compute_components<'tcx>(
// in the `subtys` iterator (e.g., when encountering a
// projection).
match *ty.kind() {
- ty::FnDef(_, substs) => {
- // HACK(eddyb) ignore lifetimes found shallowly in `substs`.
- // This is inconsistent with `ty::Adt` (including all substs)
- // and with `ty::Closure` (ignoring all substs other than
+ ty::FnDef(_, args) => {
+ // HACK(eddyb) ignore lifetimes found shallowly in `args`.
+ // This is inconsistent with `ty::Adt` (including all args)
+ // and with `ty::Closure` (ignoring all args other than
// upvars, of which a `ty::FnDef` doesn't have any), but
// consistent with previous (accidental) behavior.
// See https://github.com/rust-lang/rust/issues/70917
// for further background and discussion.
- for child in substs {
+ for child in args {
match child.unpack() {
GenericArgKind::Type(ty) => {
compute_components(tcx, ty, out, visited);
@@ -97,14 +97,14 @@ fn compute_components<'tcx>(
compute_components(tcx, element, out, visited);
}
- ty::Closure(_, ref substs) => {
- let tupled_ty = substs.as_closure().tupled_upvars_ty();
+ ty::Closure(_, ref args) => {
+ let tupled_ty = args.as_closure().tupled_upvars_ty();
compute_components(tcx, tupled_ty, out, visited);
}
- ty::Generator(_, ref substs, _) => {
+ ty::Generator(_, ref args, _) => {
// Same as the closure case
- let tupled_ty = substs.as_generator().tupled_upvars_ty();
+ let tupled_ty = args.as_generator().tupled_upvars_ty();
compute_components(tcx, tupled_ty, out, visited);
// We ignore regions in the generator interior as we don't
@@ -189,7 +189,7 @@ fn compute_components<'tcx>(
}
}
-/// Collect [Component]s for *all* the substs of `parent`.
+/// Collect [Component]s for *all* the args of `parent`.
///
/// This should not be used to get the components of `parent` itself.
/// Use [push_outlives_components] instead.
@@ -201,7 +201,7 @@ pub(super) fn compute_alias_components_recursive<'tcx>(
) {
let ty::Alias(kind, alias_ty) = alias_ty.kind() else { bug!() };
let opt_variances = if *kind == ty::Opaque { tcx.variances_of(alias_ty.def_id) } else { &[] };
- for (index, child) in alias_ty.substs.iter().enumerate() {
+ for (index, child) in alias_ty.args.iter().enumerate() {
if opt_variances.get(index) == Some(&ty::Bivariant) {
continue;
}
@@ -225,7 +225,7 @@ pub(super) fn compute_alias_components_recursive<'tcx>(
}
}
-/// Collect [Component]s for *all* the substs of `parent`.
+/// Collect [Component]s for *all* the args of `parent`.
///
/// This should not be used to get the components of `parent` itself.
/// Use [push_outlives_components] instead.
diff --git a/compiler/rustc_infer/src/infer/outlives/obligations.rs b/compiler/rustc_infer/src/infer/outlives/obligations.rs
index 73df6d03f..f36802e12 100644
--- a/compiler/rustc_infer/src/infer/outlives/obligations.rs
+++ b/compiler/rustc_infer/src/infer/outlives/obligations.rs
@@ -68,8 +68,8 @@ use crate::infer::{
use crate::traits::{ObligationCause, ObligationCauseCode};
use rustc_data_structures::undo_log::UndoLogs;
use rustc_middle::mir::ConstraintCategory;
-use rustc_middle::ty::subst::GenericArgKind;
-use rustc_middle::ty::{self, Region, SubstsRef, Ty, TyCtxt, TypeVisitableExt};
+use rustc_middle::ty::GenericArgKind;
+use rustc_middle::ty::{self, GenericArgsRef, Region, Ty, TyCtxt, TypeVisitableExt};
use smallvec::smallvec;
use super::env::OutlivesEnvironment;
@@ -253,7 +253,7 @@ where
// this point it never will be
self.tcx.sess.delay_span_bug(
origin.span(),
- format!("unresolved inference variable in outlives: {:?}", v),
+ format!("unresolved inference variable in outlives: {v:?}"),
);
}
}
@@ -279,7 +279,7 @@ where
alias_ty: ty::AliasTy<'tcx>,
) {
// An optimization for a common case with opaque types.
- if alias_ty.substs.is_empty() {
+ if alias_ty.args.is_empty() {
return;
}
@@ -348,7 +348,7 @@ where
{
debug!("no declared bounds");
let opt_variances = is_opaque.then(|| self.tcx.variances_of(alias_ty.def_id));
- self.substs_must_outlive(alias_ty.substs, origin, region, opt_variances);
+ self.args_must_outlive(alias_ty.args, origin, region, opt_variances);
return;
}
@@ -395,15 +395,15 @@ where
}
#[instrument(level = "debug", skip(self))]
- fn substs_must_outlive(
+ fn args_must_outlive(
&mut self,
- substs: SubstsRef<'tcx>,
+ args: GenericArgsRef<'tcx>,
origin: infer::SubregionOrigin<'tcx>,
region: ty::Region<'tcx>,
opt_variances: Option<&[ty::Variance]>,
) {
let constraint = origin.to_constraint_category();
- for (index, k) in substs.iter().enumerate() {
+ for (index, k) in args.iter().enumerate() {
match k.unpack() {
GenericArgKind::Lifetime(lt) => {
let variance = if let Some(variances) = opt_variances {
diff --git a/compiler/rustc_infer/src/infer/outlives/test_type_match.rs b/compiler/rustc_infer/src/infer/outlives/test_type_match.rs
index cd2462d3c..fefa89595 100644
--- a/compiler/rustc_infer/src/infer/outlives/test_type_match.rs
+++ b/compiler/rustc_infer/src/infer/outlives/test_type_match.rs
@@ -157,7 +157,7 @@ impl<'tcx> TypeRelation<'tcx> for Match<'tcx> {
a: T,
b: T,
) -> RelateResult<'tcx, T> {
- // Opaque types substs have lifetime parameters.
+ // Opaque types args have lifetime parameters.
// We must not check them to be equal, as we never insert anything to make them so.
if variance != ty::Bivariant { self.relate(a, b) } else { Ok(a) }
}
diff --git a/compiler/rustc_infer/src/infer/outlives/verify.rs b/compiler/rustc_infer/src/infer/outlives/verify.rs
index 1a5e2b520..4279d0ab7 100644
--- a/compiler/rustc_infer/src/infer/outlives/verify.rs
+++ b/compiler/rustc_infer/src/infer/outlives/verify.rs
@@ -179,7 +179,7 @@ impl<'cx, 'tcx> VerifyBoundCx<'cx, 'tcx> {
// this point it never will be
self.tcx.sess.delay_span_bug(
rustc_span::DUMMY_SP,
- format!("unresolved inference variable in outlives: {:?}", v),
+ format!("unresolved inference variable in outlives: {v:?}"),
);
// add a bound that never holds
VerifyBound::AnyBound(vec![])
@@ -295,7 +295,7 @@ impl<'cx, 'tcx> VerifyBoundCx<'cx, 'tcx> {
let bounds = tcx.item_bounds(alias_ty.def_id);
trace!("{:#?}", bounds.skip_binder());
bounds
- .subst_iter(tcx, alias_ty.substs)
+ .iter_instantiated(tcx, alias_ty.args)
.filter_map(|p| p.as_type_outlives_clause())
.filter_map(|p| p.no_bound_vars())
.map(|OutlivesPredicate(_, r)| r)
diff --git a/compiler/rustc_infer/src/infer/region_constraints/leak_check.rs b/compiler/rustc_infer/src/infer/region_constraints/leak_check.rs
index dd65f66cc..b6ff8f2f5 100644
--- a/compiler/rustc_infer/src/infer/region_constraints/leak_check.rs
+++ b/compiler/rustc_infer/src/infer/region_constraints/leak_check.rs
@@ -425,9 +425,11 @@ impl<'tcx> MiniGraph<'tcx> {
}
}
} else {
- for (constraint, _origin) in &region_constraints.data().constraints {
- each_constraint(constraint)
- }
+ region_constraints
+ .data()
+ .constraints
+ .keys()
+ .for_each(|constraint| each_constraint(constraint));
}
}
diff --git a/compiler/rustc_infer/src/infer/region_constraints/mod.rs b/compiler/rustc_infer/src/infer/region_constraints/mod.rs
index 613da8a0b..708c51cab 100644
--- a/compiler/rustc_infer/src/infer/region_constraints/mod.rs
+++ b/compiler/rustc_infer/src/infer/region_constraints/mod.rs
@@ -704,8 +704,8 @@ impl fmt::Debug for RegionSnapshot {
impl<'tcx> fmt::Debug for GenericKind<'tcx> {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
match *self {
- GenericKind::Param(ref p) => write!(f, "{:?}", p),
- GenericKind::Alias(ref p) => write!(f, "{:?}", p),
+ GenericKind::Param(ref p) => write!(f, "{p:?}"),
+ GenericKind::Alias(ref p) => write!(f, "{p:?}"),
}
}
}
@@ -713,8 +713,8 @@ impl<'tcx> fmt::Debug for GenericKind<'tcx> {
impl<'tcx> fmt::Display for GenericKind<'tcx> {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
match *self {
- GenericKind::Param(ref p) => write!(f, "{}", p),
- GenericKind::Alias(ref p) => write!(f, "{}", p),
+ GenericKind::Param(ref p) => write!(f, "{p}"),
+ GenericKind::Alias(ref p) => write!(f, "{p}"),
}
}
}
diff --git a/compiler/rustc_infer/src/infer/type_variable.rs b/compiler/rustc_infer/src/infer/type_variable.rs
index 01c11d163..bc83f8d3f 100644
--- a/compiler/rustc_infer/src/infer/type_variable.rs
+++ b/compiler/rustc_infer/src/infer/type_variable.rs
@@ -125,7 +125,7 @@ pub enum TypeVariableOriginKind {
OpaqueTypeInference(DefId),
TypeParameterDefinition(Symbol, DefId),
- /// One of the upvars or closure kind parameters in a `ClosureSubsts`
+ /// One of the upvars or closure kind parameters in a `ClosureArgs`
/// (before it has been determined).
// FIXME(eddyb) distinguish upvar inference variables from the rest.
ClosureSynthetic,
diff --git a/compiler/rustc_infer/src/traits/engine.rs b/compiler/rustc_infer/src/traits/engine.rs
index 11f434694..64b9714c7 100644
--- a/compiler/rustc_infer/src/traits/engine.rs
+++ b/compiler/rustc_infer/src/traits/engine.rs
@@ -25,7 +25,7 @@ pub trait TraitEngine<'tcx>: 'tcx {
cause,
recursion_depth: 0,
param_env,
- predicate: ty::Binder::dummy(trait_ref).without_const().to_predicate(infcx.tcx),
+ predicate: ty::Binder::dummy(trait_ref).to_predicate(infcx.tcx),
},
);
}
diff --git a/compiler/rustc_infer/src/traits/error_reporting/mod.rs b/compiler/rustc_infer/src/traits/error_reporting/mod.rs
index 9f440f398..e72a43630 100644
--- a/compiler/rustc_infer/src/traits/error_reporting/mod.rs
+++ b/compiler/rustc_infer/src/traits/error_reporting/mod.rs
@@ -28,11 +28,11 @@ impl<'tcx> InferCtxt<'tcx> {
if !self.tcx.is_impl_trait_in_trait(trait_item_def_id) {
if let Some(span) = self.tcx.hir().span_if_local(trait_item_def_id) {
let item_name = self.tcx.item_name(impl_item_def_id.to_def_id());
- err.span_label(span, format!("definition of `{}` from trait", item_name));
+ err.span_label(span, format!("definition of `{item_name}` from trait"));
}
}
- err.span_label(error_span, format!("impl has extra requirement {}", requirement));
+ err.span_label(error_span, format!("impl has extra requirement {requirement}"));
err
}
@@ -56,7 +56,7 @@ pub fn report_object_safety_error<'tcx>(
"the trait `{}` cannot be made into an object",
trait_str
);
- err.span_label(span, format!("`{}` cannot be made into an object", trait_str));
+ err.span_label(span, format!("`{trait_str}` cannot be made into an object"));
let mut reported_violations = FxIndexSet::default();
let mut multi_span = vec![];
diff --git a/compiler/rustc_infer/src/traits/mod.rs b/compiler/rustc_infer/src/traits/mod.rs
index 626dd9359..a5b2ccce8 100644
--- a/compiler/rustc_infer/src/traits/mod.rs
+++ b/compiler/rustc_infer/src/traits/mod.rs
@@ -9,6 +9,7 @@ mod structural_impls;
pub mod util;
use std::cmp;
+use std::hash::{Hash, Hasher};
use hir::def_id::LocalDefId;
use rustc_hir as hir;
@@ -36,7 +37,7 @@ pub use rustc_middle::traits::*;
/// either identifying an `impl` (e.g., `impl Eq for i32`) that
/// satisfies the obligation, or else finding a bound that is in
/// scope. The eventual result is usually a `Selection` (defined below).
-#[derive(Clone, PartialEq, Eq, Hash)]
+#[derive(Clone)]
pub struct Obligation<'tcx, T> {
/// The reason we have to prove this thing.
pub cause: ObligationCause<'tcx>,
@@ -55,6 +56,27 @@ pub struct Obligation<'tcx, T> {
pub recursion_depth: usize,
}
+impl<'tcx, T: PartialEq> PartialEq<Obligation<'tcx, T>> for Obligation<'tcx, T> {
+ #[inline]
+ fn eq(&self, other: &Obligation<'tcx, T>) -> bool {
+ // Ignore `cause` and `recursion_depth`. This is a small performance
+ // win for a few crates, and a huge performance win for the crate in
+ // https://github.com/rust-lang/rustc-perf/pull/1680, which greatly
+ // stresses the trait system.
+ self.param_env == other.param_env && self.predicate == other.predicate
+ }
+}
+
+impl<T: Eq> Eq for Obligation<'_, T> {}
+
+impl<T: Hash> Hash for Obligation<'_, T> {
+ fn hash<H: Hasher>(&self, state: &mut H) -> () {
+ // See the comment on `Obligation::eq`.
+ self.param_env.hash(state);
+ self.predicate.hash(state);
+ }
+}
+
impl<'tcx, P> From<Obligation<'tcx, P>> for solve::Goal<'tcx, P> {
fn from(value: Obligation<'tcx, P>) -> Self {
solve::Goal { param_env: value.param_env, predicate: value.predicate }
@@ -77,25 +99,9 @@ impl<'tcx> PredicateObligation<'tcx> {
recursion_depth: self.recursion_depth,
})
}
-
- pub fn without_const(mut self, tcx: TyCtxt<'tcx>) -> PredicateObligation<'tcx> {
- self.param_env = self.param_env.without_const();
- if let ty::PredicateKind::Clause(ty::ClauseKind::Trait(trait_pred)) = self.predicate.kind().skip_binder() && trait_pred.is_const_if_const() {
- self.predicate = tcx.mk_predicate(self.predicate.kind().map_bound(|_| ty::PredicateKind::Clause(ty::ClauseKind::Trait(trait_pred.without_const()))));
- }
- self
- }
}
impl<'tcx> PolyTraitObligation<'tcx> {
- /// Returns `true` if the trait predicate is considered `const` in its ParamEnv.
- pub fn is_const(&self) -> bool {
- matches!(
- (self.predicate.skip_binder().constness, self.param_env.constness()),
- (ty::BoundConstness::ConstIfConst, hir::Constness::Const)
- )
- }
-
pub fn derived_cause(
&self,
variant: impl FnOnce(DerivedObligationCause<'tcx>) -> ObligationCauseCode<'tcx>,
diff --git a/compiler/rustc_infer/src/traits/project.rs b/compiler/rustc_infer/src/traits/project.rs
index e375d6119..afba2e50a 100644
--- a/compiler/rustc_infer/src/traits/project.rs
+++ b/compiler/rustc_infer/src/traits/project.rs
@@ -190,7 +190,7 @@ impl<'tcx> ProjectionCache<'_, 'tcx> {
}
let fresh_key =
map.insert(key, ProjectionCacheEntry::NormalizedTy { ty: value, complete: None });
- assert!(!fresh_key, "never started projecting `{:?}`", key);
+ assert!(!fresh_key, "never started projecting `{key:?}`");
}
/// Mark the relevant projection cache key as having its derived obligations
@@ -229,7 +229,7 @@ impl<'tcx> ProjectionCache<'_, 'tcx> {
/// be different).
pub fn ambiguous(&mut self, key: ProjectionCacheKey<'tcx>) {
let fresh = self.map().insert(key, ProjectionCacheEntry::Ambiguous);
- assert!(!fresh, "never started projecting `{:?}`", key);
+ assert!(!fresh, "never started projecting `{key:?}`");
}
/// Indicates that while trying to normalize `key`, `key` was required to
@@ -237,14 +237,14 @@ impl<'tcx> ProjectionCache<'_, 'tcx> {
/// an error here.
pub fn recur(&mut self, key: ProjectionCacheKey<'tcx>) {
let fresh = self.map().insert(key, ProjectionCacheEntry::Recur);
- assert!(!fresh, "never started projecting `{:?}`", key);
+ assert!(!fresh, "never started projecting `{key:?}`");
}
/// Indicates that trying to normalize `key` resulted in
/// error.
pub fn error(&mut self, key: ProjectionCacheKey<'tcx>) {
let fresh = self.map().insert(key, ProjectionCacheEntry::Error);
- assert!(!fresh, "never started projecting `{:?}`", key);
+ assert!(!fresh, "never started projecting `{key:?}`");
}
}
diff --git a/compiler/rustc_infer/src/traits/structural_impls.rs b/compiler/rustc_infer/src/traits/structural_impls.rs
index 1563d92af..8a7c59da0 100644
--- a/compiler/rustc_infer/src/traits/structural_impls.rs
+++ b/compiler/rustc_infer/src/traits/structural_impls.rs
@@ -38,17 +38,17 @@ impl<'tcx> fmt::Debug for traits::FulfillmentError<'tcx> {
impl<'tcx> fmt::Debug for traits::FulfillmentErrorCode<'tcx> {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
match *self {
- super::CodeSelectionError(ref e) => write!(f, "{:?}", e),
- super::CodeProjectionError(ref e) => write!(f, "{:?}", e),
+ super::CodeSelectionError(ref e) => write!(f, "{e:?}"),
+ super::CodeProjectionError(ref e) => write!(f, "{e:?}"),
super::CodeSubtypeError(ref a, ref b) => {
- write!(f, "CodeSubtypeError({:?}, {:?})", a, b)
+ write!(f, "CodeSubtypeError({a:?}, {b:?})")
}
super::CodeConstEquateError(ref a, ref b) => {
- write!(f, "CodeConstEquateError({:?}, {:?})", a, b)
+ write!(f, "CodeConstEquateError({a:?}, {b:?})")
}
super::CodeAmbiguity { overflow: false } => write!(f, "Ambiguity"),
super::CodeAmbiguity { overflow: true } => write!(f, "Overflow"),
- super::CodeCycle(ref cycle) => write!(f, "Cycle({:?})", cycle),
+ super::CodeCycle(ref cycle) => write!(f, "Cycle({cycle:?})"),
}
}
}
diff --git a/compiler/rustc_infer/src/traits/util.rs b/compiler/rustc_infer/src/traits/util.rs
index 074ff7ec9..93dfbe63b 100644
--- a/compiler/rustc_infer/src/traits/util.rs
+++ b/compiler/rustc_infer/src/traits/util.rs
@@ -25,6 +25,13 @@ impl<'tcx> PredicateSet<'tcx> {
Self { tcx, set: Default::default() }
}
+ /// Adds a predicate to the set.
+ ///
+ /// Returns whether the predicate was newly inserted. That is:
+ /// - If the set did not previously contain this predicate, `true` is returned.
+ /// - If the set already contained this predicate, `false` is returned,
+ /// and the set is not modified: original predicate is not replaced,
+ /// and the predicate passed as argument is dropped.
pub fn insert(&mut self, pred: ty::Predicate<'tcx>) -> bool {
// We have to be careful here because we want
//
@@ -257,11 +264,7 @@ impl<'tcx, O: Elaboratable<'tcx>> Elaborator<'tcx, O> {
};
let obligations =
- predicates.predicates.iter().enumerate().map(|(index, &(mut clause, span))| {
- // when parent predicate is non-const, elaborate it to non-const predicates.
- if data.constness == ty::BoundConstness::NotConst {
- clause = clause.without_const(tcx);
- }
+ predicates.predicates.iter().enumerate().map(|(index, &(clause, span))| {
elaboratable.child_with_derived_cause(
clause.subst_supertrait(tcx, &bound_predicate.rebind(data.trait_ref)),
span,
diff --git a/compiler/rustc_interface/Cargo.toml b/compiler/rustc_interface/Cargo.toml
index 7826d42dc..ae008674d 100644
--- a/compiler/rustc_interface/Cargo.toml
+++ b/compiler/rustc_interface/Cargo.toml
@@ -6,7 +6,6 @@ edition = "2021"
[lib]
[dependencies]
-atty = "0.2.13"
libloading = "0.7.1"
tracing = "0.1"
rustc-rayon-core = { version = "0.5.0", optional = true }
@@ -16,6 +15,7 @@ rustc_attr = { path = "../rustc_attr" }
rustc_borrowck = { path = "../rustc_borrowck" }
rustc_builtin_macros = { path = "../rustc_builtin_macros" }
rustc_expand = { path = "../rustc_expand" }
+rustc_feature = { path = "../rustc_feature" }
rustc_fluent_macro = { path = "../rustc_fluent_macro" }
rustc_fs_util = { path = "../rustc_fs_util" }
rustc_macros = { path = "../rustc_macros" }
diff --git a/compiler/rustc_interface/src/interface.rs b/compiler/rustc_interface/src/interface.rs
index 953c2e4b8..5b417e008 100644
--- a/compiler/rustc_interface/src/interface.rs
+++ b/compiler/rustc_interface/src/interface.rs
@@ -59,11 +59,6 @@ impl Compiler {
}
}
-#[allow(rustc::bad_opt_access)]
-pub fn set_thread_safe_mode(sopts: &config::UnstableOptions) {
- rustc_data_structures::sync::set_dyn_thread_safe_mode(sopts.threads > 1);
-}
-
/// Converts strings provided as `--cfg [cfgspec]` into a `crate_cfg`.
pub fn parse_cfgspecs(
handler: &EarlyErrorHandler,
@@ -190,7 +185,8 @@ pub fn parse_check_cfg(handler: &EarlyErrorHandler, specs: Vec<String>) -> Check
ExpectedValues::Some(FxHashSet::default())
});
- let ExpectedValues::Some(expected_values) = expected_values else {
+ let ExpectedValues::Some(expected_values) = expected_values
+ else {
bug!("`expected_values` should be a list a values")
};
@@ -255,6 +251,7 @@ pub struct Config {
pub input: Input,
pub output_dir: Option<PathBuf>,
pub output_file: Option<OutFileName>,
+ pub ice_file: Option<PathBuf>,
pub file_loader: Option<Box<dyn FileLoader + Send + Sync>>,
pub locale_resources: &'static [&'static str],
@@ -288,6 +285,10 @@ pub struct Config {
#[allow(rustc::bad_opt_access)]
pub fn run_compiler<R: Send>(config: Config, f: impl FnOnce(&Compiler) -> R + Send) -> R {
trace!("run_compiler");
+
+ // Set parallel mode before thread pool creation, which will create `Lock`s.
+ rustc_data_structures::sync::set_dyn_thread_safe_mode(config.opts.unstable_opts.threads > 1);
+
util::run_in_thread_pool_with_globals(
config.opts.edition,
config.opts.unstable_opts.threads,
@@ -315,6 +316,7 @@ pub fn run_compiler<R: Send>(config: Config, f: impl FnOnce(&Compiler) -> R + Se
config.lint_caps,
config.make_codegen_backend,
registry.clone(),
+ config.ice_file,
);
if let Some(parse_sess_created) = config.parse_sess_created {
@@ -346,7 +348,11 @@ pub fn run_compiler<R: Send>(config: Config, f: impl FnOnce(&Compiler) -> R + Se
)
}
-pub fn try_print_query_stack(handler: &Handler, num_frames: Option<usize>) {
+pub fn try_print_query_stack(
+ handler: &Handler,
+ num_frames: Option<usize>,
+ file: Option<std::fs::File>,
+) {
eprintln!("query stack during panic:");
// Be careful relying on global state here: this code is called from
@@ -358,7 +364,8 @@ pub fn try_print_query_stack(handler: &Handler, num_frames: Option<usize>) {
QueryCtxt::new(icx.tcx),
icx.query,
handler,
- num_frames
+ num_frames,
+ file,
))
} else {
0
diff --git a/compiler/rustc_interface/src/passes.rs b/compiler/rustc_interface/src/passes.rs
index 6b3facd04..18a669175 100644
--- a/compiler/rustc_interface/src/passes.rs
+++ b/compiler/rustc_interface/src/passes.rs
@@ -11,6 +11,7 @@ use rustc_data_structures::steal::Steal;
use rustc_data_structures::sync::{Lrc, OnceCell, WorkerLocal};
use rustc_errors::PResult;
use rustc_expand::base::{ExtCtxt, LintStoreExpand};
+use rustc_feature::Features;
use rustc_fs_util::try_canonicalize;
use rustc_hir::def_id::{StableCrateId, LOCAL_CRATE};
use rustc_lint::{unerased_lint_store, BufferedEarlyLint, EarlyCheckNode, LintStore};
@@ -72,43 +73,16 @@ fn count_nodes(krate: &ast::Crate) -> usize {
counter.count
}
-pub fn register_plugins<'a>(
- sess: &'a Session,
- metadata_loader: &'a dyn MetadataLoader,
- register_lints: impl Fn(&Session, &mut LintStore),
+pub(crate) fn create_lint_store(
+ sess: &Session,
+ metadata_loader: &dyn MetadataLoader,
+ register_lints: Option<impl Fn(&Session, &mut LintStore)>,
pre_configured_attrs: &[ast::Attribute],
- crate_name: Symbol,
-) -> Result<LintStore> {
- // these need to be set "early" so that expansion sees `quote` if enabled.
- let features = rustc_expand::config::features(sess, pre_configured_attrs);
- sess.init_features(features);
-
- let crate_types = util::collect_crate_types(sess, pre_configured_attrs);
- sess.init_crate_types(crate_types);
-
- let stable_crate_id = StableCrateId::new(
- crate_name,
- sess.crate_types().contains(&CrateType::Executable),
- sess.opts.cg.metadata.clone(),
- sess.cfg_version,
- );
- sess.stable_crate_id.set(stable_crate_id).expect("not yet initialized");
- rustc_incremental::prepare_session_directory(sess, crate_name, stable_crate_id)?;
-
- if sess.opts.incremental.is_some() {
- sess.time("incr_comp_garbage_collect_session_directories", || {
- if let Err(e) = rustc_incremental::garbage_collect_session_directories(sess) {
- warn!(
- "Error while trying to garbage collect incremental \
- compilation cache directory: {}",
- e
- );
- }
- });
- }
-
+) -> LintStore {
let mut lint_store = rustc_lint::new_lint_store(sess.enable_internal_lints());
- register_lints(sess, &mut lint_store);
+ if let Some(register_lints) = register_lints {
+ register_lints(sess, &mut lint_store);
+ }
let registrars = sess.time("plugin_loading", || {
plugin::load::load_plugins(sess, metadata_loader, pre_configured_attrs)
@@ -120,11 +94,12 @@ pub fn register_plugins<'a>(
}
});
- Ok(lint_store)
+ lint_store
}
fn pre_expansion_lint<'a>(
sess: &Session,
+ features: &Features,
lint_store: &LintStore,
registered_tools: &RegisteredTools,
check_node: impl EarlyCheckNode<'a>,
@@ -134,6 +109,7 @@ fn pre_expansion_lint<'a>(
|| {
rustc_lint::check_ast_node(
sess,
+ features,
true,
lint_store,
registered_tools,
@@ -152,13 +128,14 @@ impl LintStoreExpand for LintStoreExpandImpl<'_> {
fn pre_expansion_lint(
&self,
sess: &Session,
+ features: &Features,
registered_tools: &RegisteredTools,
node_id: ast::NodeId,
attrs: &[ast::Attribute],
items: &[rustc_ast::ptr::P<ast::Item>],
name: Symbol,
) {
- pre_expansion_lint(sess, self.0, registered_tools, (node_id, attrs, items), name);
+ pre_expansion_lint(sess, features, self.0, registered_tools, (node_id, attrs, items), name);
}
}
@@ -174,10 +151,18 @@ fn configure_and_expand(
) -> ast::Crate {
let tcx = resolver.tcx();
let sess = tcx.sess;
+ let features = tcx.features();
let lint_store = unerased_lint_store(tcx);
let crate_name = tcx.crate_name(LOCAL_CRATE);
let lint_check_node = (&krate, pre_configured_attrs);
- pre_expansion_lint(sess, lint_store, tcx.registered_tools(()), lint_check_node, crate_name);
+ pre_expansion_lint(
+ sess,
+ features,
+ lint_store,
+ tcx.registered_tools(()),
+ lint_check_node,
+ crate_name,
+ );
rustc_builtin_macros::register_builtin_macros(resolver);
let num_standard_library_imports = sess.time("crate_injection", || {
@@ -186,6 +171,7 @@ fn configure_and_expand(
pre_configured_attrs,
resolver,
sess,
+ features,
)
});
@@ -225,16 +211,15 @@ fn configure_and_expand(
}
// Create the config for macro expansion
- let features = sess.features_untracked();
let recursion_limit = get_recursion_limit(pre_configured_attrs, sess);
let cfg = rustc_expand::expand::ExpansionConfig {
- features: Some(features),
+ crate_name: crate_name.to_string(),
+ features,
recursion_limit,
trace_mac: sess.opts.unstable_opts.trace_macros,
should_test: sess.is_test_crate(),
span_debug: sess.opts.unstable_opts.span_debug,
proc_macro_backtrace: sess.opts.unstable_opts.proc_macro_backtrace,
- ..rustc_expand::expand::ExpansionConfig::default(crate_name.to_string())
};
let lint_store = LintStoreExpandImpl(lint_store);
@@ -268,14 +253,19 @@ fn configure_and_expand(
});
sess.time("maybe_building_test_harness", || {
- rustc_builtin_macros::test_harness::inject(&mut krate, sess, resolver)
+ rustc_builtin_macros::test_harness::inject(&mut krate, sess, features, resolver)
});
let has_proc_macro_decls = sess.time("AST_validation", || {
- rustc_ast_passes::ast_validation::check_crate(sess, &krate, resolver.lint_buffer())
+ rustc_ast_passes::ast_validation::check_crate(
+ sess,
+ features,
+ &krate,
+ resolver.lint_buffer(),
+ )
});
- let crate_types = sess.crate_types();
+ let crate_types = tcx.crate_types();
let is_executable_crate = crate_types.contains(&CrateType::Executable);
let is_proc_macro_crate = crate_types.contains(&CrateType::ProcMacro);
@@ -297,6 +287,7 @@ fn configure_and_expand(
rustc_builtin_macros::proc_macro_harness::inject(
&mut krate,
sess,
+ features,
resolver,
is_proc_macro_crate,
has_proc_macro_decls,
@@ -327,7 +318,7 @@ fn early_lint_checks(tcx: TyCtxt<'_>, (): ()) {
// Needs to go *after* expansion to be able to check the results of macro expansion.
sess.time("complete_gated_feature_checking", || {
- rustc_ast_passes::feature_gate::check_crate(&krate, sess);
+ rustc_ast_passes::feature_gate::check_crate(&krate, sess, tcx.features());
});
// Add all buffered lints from the `ParseSess` to the `Session`.
@@ -356,6 +347,7 @@ fn early_lint_checks(tcx: TyCtxt<'_>, (): ()) {
let lint_store = unerased_lint_store(tcx);
rustc_lint::check_ast_node(
sess,
+ tcx.features(),
false,
lint_store,
tcx.registered_tools(()),
@@ -367,11 +359,12 @@ fn early_lint_checks(tcx: TyCtxt<'_>, (): ()) {
// Returns all the paths that correspond to generated files.
fn generated_output_paths(
- sess: &Session,
+ tcx: TyCtxt<'_>,
outputs: &OutputFilenames,
exact_name: bool,
crate_name: Symbol,
) -> Vec<PathBuf> {
+ let sess = tcx.sess;
let mut out_filenames = Vec::new();
for output_type in sess.opts.output_types.keys() {
let out_filename = outputs.path(*output_type);
@@ -380,7 +373,7 @@ fn generated_output_paths(
// If the filename has been overridden using `-o`, it will not be modified
// by appending `.rlib`, `.exe`, etc., so we can skip this transformation.
OutputType::Exe if !exact_name => {
- for crate_type in sess.crate_types().iter() {
+ for crate_type in tcx.crate_types().iter() {
let p = filename_for_input(sess, *crate_type, crate_name, outputs);
out_filenames.push(p.as_path().to_path_buf());
}
@@ -613,7 +606,7 @@ fn output_filenames(tcx: TyCtxt<'_>, (): ()) -> Arc<OutputFilenames> {
let outputs = util::build_output_filenames(&krate.attrs, sess);
let output_paths =
- generated_output_paths(sess, &outputs, sess.io.output_file.is_some(), crate_name);
+ generated_output_paths(tcx, &outputs, sess.io.output_file.is_some(), crate_name);
// Ensure the source file isn't accidentally overwritten during compilation.
if let Some(ref input_path) = sess.io.input.opt_path() {
@@ -691,6 +684,8 @@ pub static DEFAULT_EXTERN_QUERY_PROVIDERS: LazyLock<ExternProviders> = LazyLock:
pub fn create_global_ctxt<'tcx>(
compiler: &'tcx Compiler,
+ crate_types: Vec<CrateType>,
+ stable_crate_id: StableCrateId,
lint_store: Lrc<LintStore>,
dep_graph: DepGraph,
untracked: Untracked,
@@ -723,6 +718,8 @@ pub fn create_global_ctxt<'tcx>(
gcx_cell.get_or_init(move || {
TyCtxt::create_global_ctxt(
sess,
+ crate_types,
+ stable_crate_id,
lint_store,
arena,
hir_arena,
@@ -846,10 +843,11 @@ fn analysis(tcx: TyCtxt<'_>, (): ()) -> Result<()> {
},
{
sess.time("lint_checking", || {
- rustc_lint::check_crate(tcx, || {
- rustc_lint::BuiltinCombinedLateLintPass::new()
- });
+ rustc_lint::check_crate(tcx);
});
+ },
+ {
+ tcx.ensure().clashing_extern_declarations(());
}
);
},
diff --git a/compiler/rustc_interface/src/queries.rs b/compiler/rustc_interface/src/queries.rs
index 8c4cdc669..fc71c6c7e 100644
--- a/compiler/rustc_interface/src/queries.rs
+++ b/compiler/rustc_interface/src/queries.rs
@@ -1,23 +1,21 @@
use crate::errors::{FailedWritingFile, RustcErrorFatal, RustcErrorUnexpectedAnnotation};
use crate::interface::{Compiler, Result};
-use crate::passes;
+use crate::{passes, util};
use rustc_ast as ast;
use rustc_codegen_ssa::traits::CodegenBackend;
use rustc_codegen_ssa::CodegenResults;
-use rustc_data_structures::fx::FxIndexMap;
use rustc_data_structures::steal::Steal;
use rustc_data_structures::svh::Svh;
use rustc_data_structures::sync::{AppendOnlyIndexVec, Lrc, OnceCell, RwLock, WorkerLocal};
-use rustc_hir::def_id::{CRATE_DEF_ID, LOCAL_CRATE};
+use rustc_hir::def_id::{StableCrateId, CRATE_DEF_ID, LOCAL_CRATE};
use rustc_hir::definitions::Definitions;
use rustc_incremental::DepGraphFuture;
-use rustc_lint::LintStore;
use rustc_metadata::creader::CStore;
use rustc_middle::arena::Arena;
use rustc_middle::dep_graph::DepGraph;
use rustc_middle::ty::{GlobalCtxt, TyCtxt};
-use rustc_session::config::{self, OutputFilenames, OutputType};
+use rustc_session::config::{self, CrateType, OutputFilenames, OutputType};
use rustc_session::cstore::Untracked;
use rustc_session::{output::find_crate_name, Session};
use rustc_span::symbol::sym;
@@ -85,12 +83,8 @@ pub struct Queries<'tcx> {
arena: WorkerLocal<Arena<'tcx>>,
hir_arena: WorkerLocal<rustc_hir::Arena<'tcx>>,
- dep_graph_future: Query<Option<DepGraphFuture>>,
parse: Query<ast::Crate>,
pre_configure: Query<(ast::Crate, ast::AttrVec)>,
- crate_name: Query<Symbol>,
- register_plugins: Query<(ast::Crate, ast::AttrVec, Lrc<LintStore>)>,
- dep_graph: Query<DepGraph>,
// This just points to what's in `gcx_cell`.
gcx: Query<&'tcx GlobalCtxt<'tcx>>,
}
@@ -102,12 +96,8 @@ impl<'tcx> Queries<'tcx> {
gcx_cell: OnceCell::new(),
arena: WorkerLocal::new(|_| Arena::default()),
hir_arena: WorkerLocal::new(|_| rustc_hir::Arena::default()),
- dep_graph_future: Default::default(),
parse: Default::default(),
pre_configure: Default::default(),
- crate_name: Default::default(),
- register_plugins: Default::default(),
- dep_graph: Default::default(),
gcx: Default::default(),
}
}
@@ -119,13 +109,6 @@ impl<'tcx> Queries<'tcx> {
self.compiler.codegen_backend()
}
- fn dep_graph_future(&self) -> Result<QueryResult<'_, Option<DepGraphFuture>>> {
- self.dep_graph_future.compute(|| {
- let sess = self.session();
- Ok(sess.opts.build_dep_graph().then(|| rustc_incremental::load_dep_graph(sess)))
- })
- }
-
pub fn parse(&self) -> Result<QueryResult<'_, ast::Crate>> {
self.parse
.compute(|| passes::parse(self.session()).map_err(|mut parse_error| parse_error.emit()))
@@ -148,75 +131,73 @@ impl<'tcx> Queries<'tcx> {
})
}
- pub fn register_plugins(
+ fn dep_graph_future(
&self,
- ) -> Result<QueryResult<'_, (ast::Crate, ast::AttrVec, Lrc<LintStore>)>> {
- self.register_plugins.compute(|| {
- let crate_name = *self.crate_name()?.borrow();
- let (krate, pre_configured_attrs) = self.pre_configure()?.steal();
-
- let empty: &(dyn Fn(&Session, &mut LintStore) + Sync + Send) = &|_, _| {};
- let lint_store = passes::register_plugins(
- self.session(),
- &*self.codegen_backend().metadata_loader(),
- self.compiler.register_lints.as_deref().unwrap_or_else(|| empty),
- &pre_configured_attrs,
- crate_name,
- )?;
-
- // Compute the dependency graph (in the background). We want to do
- // this as early as possible, to give the DepGraph maximum time to
- // load before dep_graph() is called, but it also can't happen
- // until after rustc_incremental::prepare_session_directory() is
- // called, which happens within passes::register_plugins().
- self.dep_graph_future().ok();
+ crate_name: Symbol,
+ stable_crate_id: StableCrateId,
+ ) -> Result<Option<DepGraphFuture>> {
+ let sess = self.session();
+
+ // `load_dep_graph` can only be called after `prepare_session_directory`.
+ rustc_incremental::prepare_session_directory(sess, crate_name, stable_crate_id)?;
+ let res = sess.opts.build_dep_graph().then(|| rustc_incremental::load_dep_graph(sess));
+
+ if sess.opts.incremental.is_some() {
+ sess.time("incr_comp_garbage_collect_session_directories", || {
+ if let Err(e) = rustc_incremental::garbage_collect_session_directories(sess) {
+ warn!(
+ "Error while trying to garbage collect incremental \
+ compilation cache directory: {}",
+ e
+ );
+ }
+ });
+ }
- Ok((krate, pre_configured_attrs, Lrc::new(lint_store)))
- })
+ Ok(res)
}
- fn crate_name(&self) -> Result<QueryResult<'_, Symbol>> {
- self.crate_name.compute(|| {
- Ok({
- let pre_configure_result = self.pre_configure()?;
- let (_, pre_configured_attrs) = &*pre_configure_result.borrow();
- // parse `#[crate_name]` even if `--crate-name` was passed, to make sure it matches.
- find_crate_name(self.session(), pre_configured_attrs)
+ fn dep_graph(&self, dep_graph_future: Option<DepGraphFuture>) -> DepGraph {
+ dep_graph_future
+ .and_then(|future| {
+ let sess = self.session();
+ let (prev_graph, prev_work_products) =
+ sess.time("blocked_on_dep_graph_loading", || future.open().open(sess));
+ rustc_incremental::build_dep_graph(sess, prev_graph, prev_work_products)
})
- })
- }
-
- fn dep_graph(&self) -> Result<QueryResult<'_, DepGraph>> {
- self.dep_graph.compute(|| {
- let sess = self.session();
- let future_opt = self.dep_graph_future()?.steal();
- let dep_graph = future_opt
- .and_then(|future| {
- let (prev_graph, mut prev_work_products) =
- sess.time("blocked_on_dep_graph_loading", || future.open().open(sess));
- // Convert from UnordMap to FxIndexMap by sorting
- let prev_work_product_ids =
- prev_work_products.items().map(|x| *x.0).into_sorted_stable_ord();
- let prev_work_products = prev_work_product_ids
- .into_iter()
- .map(|x| (x, prev_work_products.remove(&x).unwrap()))
- .collect::<FxIndexMap<_, _>>();
- rustc_incremental::build_dep_graph(sess, prev_graph, prev_work_products)
- })
- .unwrap_or_else(DepGraph::new_disabled);
- Ok(dep_graph)
- })
+ .unwrap_or_else(DepGraph::new_disabled)
}
pub fn global_ctxt(&'tcx self) -> Result<QueryResult<'_, &'tcx GlobalCtxt<'tcx>>> {
self.gcx.compute(|| {
- let crate_name = *self.crate_name()?.borrow();
- let (krate, pre_configured_attrs, lint_store) = self.register_plugins()?.steal();
-
let sess = self.session();
+ let (krate, pre_configured_attrs) = self.pre_configure()?.steal();
+
+ // parse `#[crate_name]` even if `--crate-name` was passed, to make sure it matches.
+ let crate_name = find_crate_name(sess, &pre_configured_attrs);
+ let crate_types = util::collect_crate_types(sess, &pre_configured_attrs);
+ let stable_crate_id = StableCrateId::new(
+ crate_name,
+ crate_types.contains(&CrateType::Executable),
+ sess.opts.cg.metadata.clone(),
+ sess.cfg_version,
+ );
- let cstore = RwLock::new(Box::new(CStore::new(sess)) as _);
- let definitions = RwLock::new(Definitions::new(sess.local_stable_crate_id()));
+ // Compute the dependency graph (in the background). We want to do this as early as
+ // possible, to give the DepGraph maximum time to load before `dep_graph` is called.
+ let dep_graph_future = self.dep_graph_future(crate_name, stable_crate_id)?;
+
+ let lint_store = Lrc::new(passes::create_lint_store(
+ sess,
+ &*self.codegen_backend().metadata_loader(),
+ self.compiler.register_lints.as_deref(),
+ &pre_configured_attrs,
+ ));
+ let cstore = RwLock::new(Box::new(CStore::new(
+ self.codegen_backend().metadata_loader(),
+ stable_crate_id,
+ )) as _);
+ let definitions = RwLock::new(Definitions::new(stable_crate_id));
let source_span = AppendOnlyIndexVec::new();
let _id = source_span.push(krate.spans.inner_span);
debug_assert_eq!(_id, CRATE_DEF_ID);
@@ -224,8 +205,10 @@ impl<'tcx> Queries<'tcx> {
let qcx = passes::create_global_ctxt(
self.compiler,
+ crate_types,
+ stable_crate_id,
lint_store,
- self.dep_graph()?.steal(),
+ self.dep_graph(dep_graph_future),
untracked,
&self.gcx_cell,
&self.arena,
@@ -237,11 +220,10 @@ impl<'tcx> Queries<'tcx> {
feed.crate_name(crate_name);
let feed = tcx.feed_unit_query();
- feed.crate_for_resolver(tcx.arena.alloc(Steal::new((krate, pre_configured_attrs))));
- feed.metadata_loader(
- tcx.arena.alloc(Steal::new(self.codegen_backend().metadata_loader())),
+ feed.features_query(
+ tcx.arena.alloc(rustc_expand::config::features(sess, &pre_configured_attrs)),
);
- feed.features_query(tcx.sess.features_untracked());
+ feed.crate_for_resolver(tcx.arena.alloc(Steal::new((krate, pre_configured_attrs))));
});
Ok(qcx)
})
@@ -303,7 +285,7 @@ impl<'tcx> Queries<'tcx> {
let (crate_hash, prepare_outputs, dep_graph) = self.global_ctxt()?.enter(|tcx| {
(
- if tcx.sess.needs_crate_hash() { Some(tcx.crate_hash(LOCAL_CRATE)) } else { None },
+ if tcx.needs_crate_hash() { Some(tcx.crate_hash(LOCAL_CRATE)) } else { None },
tcx.output_filenames(()).clone(),
tcx.dep_graph.clone(),
)
diff --git a/compiler/rustc_interface/src/tests.rs b/compiler/rustc_interface/src/tests.rs
index 09141afd1..e3d66d183 100644
--- a/compiler/rustc_interface/src/tests.rs
+++ b/compiler/rustc_interface/src/tests.rs
@@ -67,6 +67,7 @@ fn mk_session(handler: &mut EarlyErrorHandler, matches: getopts::Matches) -> (Se
None,
None,
"",
+ None,
);
(sess, cfg)
}
@@ -714,6 +715,7 @@ fn test_unstable_options_tracking_hash() {
untracked!(perf_stats, true);
// `pre_link_arg` is omitted because it just forwards to `pre_link_args`.
untracked!(pre_link_args, vec![String::from("abc"), String::from("def")]);
+ untracked!(print_codegen_stats, true);
untracked!(print_llvm_passes, true);
untracked!(print_mono_items, Some(String::from("abc")));
untracked!(print_type_sizes, true);
@@ -738,6 +740,7 @@ fn test_unstable_options_tracking_hash() {
untracked!(unstable_options, true);
untracked!(validate_mir, true);
untracked!(verbose, true);
+ untracked!(write_long_types_to_disk, false);
// tidy-alphabetical-end
macro_rules! tracked {
diff --git a/compiler/rustc_interface/src/util.rs b/compiler/rustc_interface/src/util.rs
index 035ea2414..ad35dbbc8 100644
--- a/compiler/rustc_interface/src/util.rs
+++ b/compiler/rustc_interface/src/util.rs
@@ -70,6 +70,7 @@ pub fn create_session(
Box<dyn FnOnce(&config::Options) -> Box<dyn CodegenBackend> + Send>,
>,
descriptions: Registry,
+ ice_file: Option<PathBuf>,
) -> (Session, Box<dyn CodegenBackend>) {
let codegen_backend = if let Some(make_codegen_backend) = make_codegen_backend {
make_codegen_backend(&sopts)
@@ -111,6 +112,7 @@ pub fn create_session(
file_loader,
target_override,
rustc_version_str().unwrap_or("unknown"),
+ ice_file,
);
codegen_backend.init(&sess);
@@ -517,7 +519,8 @@ fn multiple_output_types_to_stdout(
output_types: &OutputTypes,
single_output_file_is_stdout: bool,
) -> bool {
- if atty::is(atty::Stream::Stdout) {
+ use std::io::IsTerminal;
+ if std::io::stdout().is_terminal() {
// If stdout is a tty, check if multiple text output types are
// specified by `--emit foo=- --emit bar=-` or `-o - --emit foo,bar`
let named_text_types = output_types
diff --git a/compiler/rustc_lexer/Cargo.toml b/compiler/rustc_lexer/Cargo.toml
index 23294dc2e..2211ac1c8 100644
--- a/compiler/rustc_lexer/Cargo.toml
+++ b/compiler/rustc_lexer/Cargo.toml
@@ -16,7 +16,11 @@ Rust lexer used by rustc. No stability guarantees are provided.
# Note that this crate purposefully does not depend on other rustc crates
[dependencies]
unicode-xid = "0.2.0"
-unic-emoji-char = "0.9.0"
+
+[dependencies.unicode-properties]
+version = "0.1.0"
+default-features = false
+features = ["emoji"]
[dev-dependencies]
expect-test = "1.4.0"
diff --git a/compiler/rustc_lexer/src/cursor.rs b/compiler/rustc_lexer/src/cursor.rs
index eceef5980..aba7f9548 100644
--- a/compiler/rustc_lexer/src/cursor.rs
+++ b/compiler/rustc_lexer/src/cursor.rs
@@ -24,6 +24,10 @@ impl<'a> Cursor<'a> {
}
}
+ pub fn as_str(&self) -> &'a str {
+ self.chars.as_str()
+ }
+
/// Returns the last eaten symbol (or `'\0'` in release builds).
/// (For debug assertions only.)
pub(crate) fn prev(&self) -> char {
diff --git a/compiler/rustc_lexer/src/lib.rs b/compiler/rustc_lexer/src/lib.rs
index 29335a8c0..43dfd34a6 100644
--- a/compiler/rustc_lexer/src/lib.rs
+++ b/compiler/rustc_lexer/src/lib.rs
@@ -34,6 +34,7 @@ pub use crate::cursor::Cursor;
use self::LiteralKind::*;
use self::TokenKind::*;
use crate::cursor::EOF_CHAR;
+use unicode_properties::UnicodeEmoji;
/// Parsed token.
/// It doesn't contain information about data that has been parsed,
@@ -367,6 +368,13 @@ impl Cursor<'_> {
Some(|terminated| Byte { terminated }),
),
+ // c-string literal, raw c-string literal or identifier.
+ 'c' => self.c_or_byte_string(
+ |terminated| CStr { terminated },
+ |n_hashes| RawCStr { n_hashes },
+ None,
+ ),
+
// Identifier (this should be checked after other variant that can
// start as identifier).
c if is_id_start(c) => self.ident_or_unknown_prefix(),
@@ -421,9 +429,7 @@ impl Cursor<'_> {
Literal { kind, suffix_start }
}
// Identifier starting with an emoji. Only lexed for graceful error recovery.
- c if !c.is_ascii() && unic_emoji_char::is_emoji(c) => {
- self.fake_ident_or_unknown_prefix()
- }
+ c if !c.is_ascii() && c.is_emoji_char() => self.fake_ident_or_unknown_prefix(),
_ => Unknown,
};
let res = Token::new(token_kind, self.pos_within_token());
@@ -507,9 +513,7 @@ impl Cursor<'_> {
// we see a prefix here, it is definitely an unknown prefix.
match self.first() {
'#' | '"' | '\'' => UnknownPrefix,
- c if !c.is_ascii() && unic_emoji_char::is_emoji(c) => {
- self.fake_ident_or_unknown_prefix()
- }
+ c if !c.is_ascii() && c.is_emoji_char() => self.fake_ident_or_unknown_prefix(),
_ => Ident,
}
}
@@ -518,7 +522,7 @@ impl Cursor<'_> {
// Start is already eaten, eat the rest of identifier.
self.eat_while(|c| {
unicode_xid::UnicodeXID::is_xid_continue(c)
- || (!c.is_ascii() && unic_emoji_char::is_emoji(c))
+ || (!c.is_ascii() && c.is_emoji_char())
|| c == '\u{200d}'
});
// Known prefixes must have been handled earlier. So if
diff --git a/compiler/rustc_lexer/src/unescape.rs b/compiler/rustc_lexer/src/unescape.rs
index c9ad54d8d..717b042fb 100644
--- a/compiler/rustc_lexer/src/unescape.rs
+++ b/compiler/rustc_lexer/src/unescape.rs
@@ -372,7 +372,7 @@ where
callback(start..end, EscapeError::MultipleSkippedLinesWarning);
}
let tail = &tail[first_non_space..];
- if let Some(c) = tail.chars().nth(0) {
+ if let Some(c) = tail.chars().next() {
if c.is_whitespace() {
// For error reporting, we would like the span to contain the character that was not
// skipped. The +1 is necessary to account for the leading \ that started the escape.
diff --git a/compiler/rustc_lint/messages.ftl b/compiler/rustc_lint/messages.ftl
index 22e22c833..c4a7f7178 100644
--- a/compiler/rustc_lint/messages.ftl
+++ b/compiler/rustc_lint/messages.ftl
@@ -72,6 +72,9 @@ lint_builtin_incomplete_features = the feature `{$name}` is incomplete and may n
.note = see issue #{$n} <https://github.com/rust-lang/rust/issues/{$n}> for more information
.help = consider using `min_{$name}` instead, which is more stable and complete
+lint_builtin_internal_features = the feature `{$name}` is internal to the compiler or standard library
+ .note = using it is strongly discouraged
+
lint_builtin_keyword_idents = `{$kw}` is a keyword in the {$next} edition
.suggestion = you can use a raw identifier to stay compatible
@@ -127,8 +130,6 @@ lint_builtin_unexpected_cli_config_name = unexpected `{$name}` as condition name
lint_builtin_unexpected_cli_config_value = unexpected condition value `{$value}` for condition name `{$name}`
.help = was set with `--cfg` but isn't in the `--check-cfg` expected values
-lint_builtin_unnameable_test_items = cannot test inner items
-
lint_builtin_unpermitted_type_init_label = this code causes undefined behavior when executed
lint_builtin_unpermitted_type_init_label_suggestion = help: use `MaybeUninit<T>` instead, and only call `assume_init` after initialization is done
@@ -166,8 +167,9 @@ lint_check_name_warning = {$msg}
lint_command_line_source = `forbid` lint level was set on command line
-lint_confusable_identifier_pair = identifier pair considered confusable between `{$existing_sym}` and `{$sym}`
- .label = this is where the previous identifier occurred
+lint_confusable_identifier_pair = found both `{$existing_sym}` and `{$sym}` as identifiers, which look alike
+ .current_use = this identifier can be confused with `{$existing_sym}`
+ .other_use = other identifier used here
lint_cstring_ptr = getting the inner pointer of a temporary `CString`
.as_ptr_label = this pointer will be invalid
@@ -315,7 +317,11 @@ lint_invalid_nan_comparisons_eq_ne = incorrect NaN comparison, NaN cannot be dir
lint_invalid_nan_comparisons_lt_le_gt_ge = incorrect NaN comparison, NaN is not orderable
-lint_invalid_reference_casting = casting `&T` to `&mut T` is undefined behavior, even if the reference is unused, consider instead using an `UnsafeCell`
+lint_invalid_reference_casting_assign_to_ref = assigning to `&T` is undefined behavior, consider using an `UnsafeCell`
+ .label = casting happend here
+
+lint_invalid_reference_casting_borrow_as_mut = casting `&T` to `&mut T` is undefined behavior, even if the reference is unused, consider instead using an `UnsafeCell`
+ .label = casting happend here
lint_lintpass_by_hand = implementing `LintPass` by hand
.help = try using `declare_lint_pass!` or `impl_lint_pass!` instead
@@ -407,8 +413,8 @@ lint_non_upper_case_global = {$sort} `{$name}` should have an upper case name
.label = should have an UPPER_CASE name
lint_noop_method_call = call to `.{$method}()` on a reference in this situation does nothing
- .label = unnecessary method call
- .note = the type `{$receiver_ty}` which `{$method}` is being called on is the same as the type returned from `{$method}`, so the method call does not do anything and can be removed
+ .suggestion = remove this redundant call
+ .note = the type `{$orig_ty}` does not implement `{$trait_}`, so calling `{$method}` on `&{$orig_ty}` copies the reference, which does not do anything and can be removed
lint_only_cast_u8_to_char = only `u8` can be cast into `char`
.suggestion = use a `char` literal instead
@@ -447,6 +453,13 @@ lint_path_statement_drop = path statement drops value
lint_path_statement_no_effect = path statement with no effect
+lint_ptr_null_checks_fn_ptr = function pointers are not nullable, so checking them for null will always return false
+ .help = wrap the function pointer inside an `Option` and use `Option::is_none` to check for null pointer value
+ .label = expression has type `{$orig_ty}`
+
+lint_ptr_null_checks_ref = references are not nullable, so checking them for null will always return false
+ .label = expression has type `{$orig_ty}`
+
lint_query_instability = using `{$query}` can result in unstable query results
.note = if you believe this case to be fine, allow this lint and add a comment explaining your rationale
diff --git a/compiler/rustc_lint/src/array_into_iter.rs b/compiler/rustc_lint/src/array_into_iter.rs
index bccb0a94e..d0967ba56 100644
--- a/compiler/rustc_lint/src/array_into_iter.rs
+++ b/compiler/rustc_lint/src/array_into_iter.rs
@@ -81,7 +81,7 @@ impl<'tcx> LateLintPass<'tcx> for ArrayIntoIter {
let adjustments = cx.typeck_results().expr_adjustments(receiver_arg);
let Some(Adjustment { kind: Adjust::Borrow(_), target }) = adjustments.last() else {
- return
+ return;
};
let types =
diff --git a/compiler/rustc_lint/src/builtin.rs b/compiler/rustc_lint/src/builtin.rs
index b821933e9..4b6917fdf 100644
--- a/compiler/rustc_lint/src/builtin.rs
+++ b/compiler/rustc_lint/src/builtin.rs
@@ -24,24 +24,22 @@ use crate::fluent_generated as fluent;
use crate::{
errors::BuiltinEllipsisInclusiveRangePatterns,
lints::{
- BuiltinAnonymousParams, BuiltinBoxPointers, BuiltinClashingExtern,
- BuiltinClashingExternSub, BuiltinConstNoMangle, BuiltinDeprecatedAttrLink,
- BuiltinDeprecatedAttrLinkSuggestion, BuiltinDeprecatedAttrUsed, BuiltinDerefNullptr,
- BuiltinEllipsisInclusiveRangePatternsLint, BuiltinExplicitOutlives,
- BuiltinExplicitOutlivesSuggestion, BuiltinIncompleteFeatures,
- BuiltinIncompleteFeaturesHelp, BuiltinIncompleteFeaturesNote, BuiltinKeywordIdents,
+ BuiltinAnonymousParams, BuiltinBoxPointers, BuiltinConstNoMangle,
+ BuiltinDeprecatedAttrLink, BuiltinDeprecatedAttrLinkSuggestion, BuiltinDeprecatedAttrUsed,
+ BuiltinDerefNullptr, BuiltinEllipsisInclusiveRangePatternsLint, BuiltinExplicitOutlives,
+ BuiltinExplicitOutlivesSuggestion, BuiltinFeatureIssueNote, BuiltinIncompleteFeatures,
+ BuiltinIncompleteFeaturesHelp, BuiltinInternalFeatures, BuiltinKeywordIdents,
BuiltinMissingCopyImpl, BuiltinMissingDebugImpl, BuiltinMissingDoc,
BuiltinMutablesTransmutes, BuiltinNoMangleGeneric, BuiltinNonShorthandFieldPatterns,
BuiltinSpecialModuleNameUsed, BuiltinTrivialBounds, BuiltinTypeAliasGenericBounds,
BuiltinTypeAliasGenericBoundsSuggestion, BuiltinTypeAliasWhereClause,
BuiltinUnexpectedCliConfigName, BuiltinUnexpectedCliConfigValue,
- BuiltinUngatedAsyncFnTrackCaller, BuiltinUnnameableTestItems, BuiltinUnpermittedTypeInit,
+ BuiltinUngatedAsyncFnTrackCaller, BuiltinUnpermittedTypeInit,
BuiltinUnpermittedTypeInitSub, BuiltinUnreachablePub, BuiltinUnsafe,
BuiltinUnstableFeatures, BuiltinUnusedDocComment, BuiltinUnusedDocCommentSub,
BuiltinWhileTrue, SuggestChangingAssocTypes,
},
- types::{transparent_newtype_field, CItemKind},
- EarlyContext, EarlyLintPass, LateContext, LateLintPass, LintContext,
+ EarlyContext, EarlyLintPass, LateContext, LateLintPass, Level, LintContext,
};
use hir::IsAsync;
use rustc_ast::attr;
@@ -49,29 +47,29 @@ use rustc_ast::tokenstream::{TokenStream, TokenTree};
use rustc_ast::visit::{FnCtxt, FnKind};
use rustc_ast::{self as ast, *};
use rustc_ast_pretty::pprust::{self, expr_to_string};
-use rustc_data_structures::fx::{FxHashMap, FxHashSet};
-use rustc_data_structures::stack::ensure_sufficient_stack;
use rustc_errors::{Applicability, DecorateLint, MultiSpan};
use rustc_feature::{deprecated_attributes, AttributeGate, BuiltinAttribute, GateIssue, Stability};
use rustc_hir as hir;
use rustc_hir::def::{DefKind, Res};
-use rustc_hir::def_id::{DefId, LocalDefId, LocalDefIdSet, CRATE_DEF_ID};
+use rustc_hir::def_id::{DefId, LocalDefId, CRATE_DEF_ID};
use rustc_hir::intravisit::FnKind as HirFnKind;
-use rustc_hir::{Body, FnDecl, ForeignItemKind, GenericParamKind, Node, PatKind, PredicateOrigin};
+use rustc_hir::{Body, FnDecl, GenericParamKind, Node, PatKind, PredicateOrigin};
use rustc_middle::lint::in_external_macro;
-use rustc_middle::ty::layout::{LayoutError, LayoutOf};
+use rustc_middle::ty::layout::LayoutOf;
use rustc_middle::ty::print::with_no_trimmed_paths;
-use rustc_middle::ty::subst::GenericArgKind;
+use rustc_middle::ty::GenericArgKind;
+use rustc_middle::ty::ToPredicate;
use rustc_middle::ty::TypeVisitableExt;
-use rustc_middle::ty::{self, Instance, Ty, TyCtxt, VariantDef};
+use rustc_middle::ty::{self, Ty, TyCtxt, VariantDef};
use rustc_session::config::ExpectedValues;
use rustc_session::lint::{BuiltinLintDiagnostics, FutureIncompatibilityReason};
use rustc_span::edition::Edition;
use rustc_span::source_map::Spanned;
use rustc_span::symbol::{kw, sym, Ident, Symbol};
use rustc_span::{BytePos, InnerSpan, Span};
-use rustc_target::abi::{Abi, FIRST_VARIANT};
+use rustc_target::abi::Abi;
use rustc_trait_selection::infer::{InferCtxtExt, TyCtxtInferExt};
+use rustc_trait_selection::traits::query::evaluate_obligation::InferCtxtExt as _;
use rustc_trait_selection::traits::{self, misc::type_allowed_to_implement_copy};
use crate::nonstandard_style::{method_context, MethodLateContext};
@@ -181,9 +179,11 @@ impl<'tcx> LateLintPass<'tcx> for BoxPointers {
| hir::ItemKind::TyAlias(..)
| hir::ItemKind::Enum(..)
| hir::ItemKind::Struct(..)
- | hir::ItemKind::Union(..) => {
- self.check_heap_type(cx, it.span, cx.tcx.type_of(it.owner_id).subst_identity())
- }
+ | hir::ItemKind::Union(..) => self.check_heap_type(
+ cx,
+ it.span,
+ cx.tcx.type_of(it.owner_id).instantiate_identity(),
+ ),
_ => (),
}
@@ -194,7 +194,7 @@ impl<'tcx> LateLintPass<'tcx> for BoxPointers {
self.check_heap_type(
cx,
field.span,
- cx.tcx.type_of(field.def_id).subst_identity(),
+ cx.tcx.type_of(field.def_id).instantiate_identity(),
);
}
}
@@ -459,10 +459,7 @@ declare_lint! {
report_in_external_macro
}
-pub struct MissingDoc {
- /// Stack of whether `#[doc(hidden)]` is set at each level which has lint attributes.
- doc_hidden_stack: Vec<bool>,
-}
+pub struct MissingDoc;
impl_lint_pass!(MissingDoc => [MISSING_DOCS]);
@@ -491,14 +488,6 @@ fn has_doc(attr: &ast::Attribute) -> bool {
}
impl MissingDoc {
- pub fn new() -> MissingDoc {
- MissingDoc { doc_hidden_stack: vec![false] }
- }
-
- fn doc_hidden(&self) -> bool {
- *self.doc_hidden_stack.last().expect("empty doc_hidden_stack")
- }
-
fn check_missing_docs_attrs(
&self,
cx: &LateContext<'_>,
@@ -512,11 +501,6 @@ impl MissingDoc {
return;
}
- // `#[doc(hidden)]` disables missing_docs check.
- if self.doc_hidden() {
- return;
- }
-
// Only check publicly-visible items, using the result from the privacy pass.
// It's an option so the crate root can also use this function (it doesn't
// have a `NodeId`).
@@ -539,23 +523,6 @@ impl MissingDoc {
}
impl<'tcx> LateLintPass<'tcx> for MissingDoc {
- #[inline]
- fn enter_lint_attrs(&mut self, _cx: &LateContext<'_>, attrs: &[ast::Attribute]) {
- let doc_hidden = self.doc_hidden()
- || attrs.iter().any(|attr| {
- attr.has_name(sym::doc)
- && match attr.meta_item_list() {
- None => false,
- Some(l) => attr::list_contains_name(&l, sym::hidden),
- }
- });
- self.doc_hidden_stack.push(doc_hidden);
- }
-
- fn exit_lint_attrs(&mut self, _: &LateContext<'_>, _attrs: &[ast::Attribute]) {
- self.doc_hidden_stack.pop().expect("empty doc_hidden_stack");
- }
-
fn check_crate(&mut self, cx: &LateContext<'_>) {
self.check_missing_docs_attrs(cx, CRATE_DEF_ID, "the", "crate");
}
@@ -591,7 +558,7 @@ impl<'tcx> LateLintPass<'tcx> for MissingDoc {
// If the method is an impl for an item with docs_hidden, don't doc.
MethodLateContext::PlainImpl => {
let parent = cx.tcx.hir().get_parent_item(impl_item.hir_id());
- let impl_ty = cx.tcx.type_of(parent).subst_identity();
+ let impl_ty = cx.tcx.type_of(parent).instantiate_identity();
let outerdef = match impl_ty.kind() {
ty::Adt(def, _) => Some(def.did()),
ty::Foreign(def_id) => Some(*def_id),
@@ -700,7 +667,7 @@ impl<'tcx> LateLintPass<'tcx> for MissingCopyImplementations {
// and recommending Copy might be a bad idea.
for field in def.all_fields() {
let did = field.did;
- if cx.tcx.type_of(did).subst_identity().is_unsafe_ptr() {
+ if cx.tcx.type_of(did).instantiate_identity().is_unsafe_ptr() {
return;
}
}
@@ -708,6 +675,9 @@ impl<'tcx> LateLintPass<'tcx> for MissingCopyImplementations {
if ty.is_copy_modulo_regions(cx.tcx, param_env) {
return;
}
+ if type_implements_negative_copy_modulo_regions(cx.tcx, ty, param_env) {
+ return;
+ }
// We shouldn't recommend implementing `Copy` on stateful things,
// such as iterators.
@@ -743,6 +713,24 @@ impl<'tcx> LateLintPass<'tcx> for MissingCopyImplementations {
}
}
+/// Check whether a `ty` has a negative `Copy` implementation, ignoring outlives constraints.
+fn type_implements_negative_copy_modulo_regions<'tcx>(
+ tcx: TyCtxt<'tcx>,
+ ty: Ty<'tcx>,
+ param_env: ty::ParamEnv<'tcx>,
+) -> bool {
+ let trait_ref = ty::TraitRef::new(tcx, tcx.require_lang_item(hir::LangItem::Copy, None), [ty]);
+ let pred = ty::TraitPredicate { trait_ref, polarity: ty::ImplPolarity::Negative };
+ let obligation = traits::Obligation {
+ cause: traits::ObligationCause::dummy(),
+ param_env,
+ recursion_depth: 0,
+ predicate: ty::Binder::dummy(pred).to_predicate(tcx),
+ };
+
+ tcx.infer_ctxt().build().predicate_must_hold_modulo_regions(&obligation)
+}
+
declare_lint! {
/// The `missing_debug_implementations` lint detects missing
/// implementations of [`fmt::Debug`] for public types.
@@ -776,9 +764,7 @@ declare_lint! {
}
#[derive(Default)]
-pub struct MissingDebugImplementations {
- impling_types: Option<LocalDefIdSet>,
-}
+pub(crate) struct MissingDebugImplementations;
impl_lint_pass!(MissingDebugImplementations => [MISSING_DEBUG_IMPLEMENTATIONS]);
@@ -793,25 +779,20 @@ impl<'tcx> LateLintPass<'tcx> for MissingDebugImplementations {
_ => return,
}
- let Some(debug) = cx.tcx.get_diagnostic_item(sym::Debug) else {
- return
- };
-
- if self.impling_types.is_none() {
- let mut impls = LocalDefIdSet::default();
- cx.tcx.for_each_impl(debug, |d| {
- if let Some(ty_def) = cx.tcx.type_of(d).subst_identity().ty_adt_def() {
- if let Some(def_id) = ty_def.did().as_local() {
- impls.insert(def_id);
- }
- }
- });
-
- self.impling_types = Some(impls);
- debug!("{:?}", self.impling_types);
+ // Avoid listing trait impls if the trait is allowed.
+ let (level, _) = cx.tcx.lint_level_at_node(MISSING_DEBUG_IMPLEMENTATIONS, item.hir_id());
+ if level == Level::Allow {
+ return;
}
- if !self.impling_types.as_ref().unwrap().contains(&item.owner_id.def_id) {
+ let Some(debug) = cx.tcx.get_diagnostic_item(sym::Debug) else { return };
+
+ let has_impl = cx
+ .tcx
+ .non_blanket_impls_for_ty(debug, cx.tcx.type_of(item.owner_id).instantiate_identity())
+ .next()
+ .is_some();
+ if !has_impl {
cx.emit_spanned_lint(
MISSING_DEBUG_IMPLEMENTATIONS,
item.span,
@@ -1259,8 +1240,8 @@ impl<'tcx> LateLintPass<'tcx> for UnstableFeatures {
declare_lint! {
/// The `ungated_async_fn_track_caller` lint warns when the
- /// `#[track_caller]` attribute is used on an async function, method, or
- /// closure, without enabling the corresponding unstable feature flag.
+ /// `#[track_caller]` attribute is used on an async function
+ /// without enabling the corresponding unstable feature flag.
///
/// ### Example
///
@@ -1274,13 +1255,13 @@ declare_lint! {
/// ### Explanation
///
/// The attribute must be used in conjunction with the
- /// [`closure_track_caller` feature flag]. Otherwise, the `#[track_caller]`
+ /// [`async_fn_track_caller` feature flag]. Otherwise, the `#[track_caller]`
/// annotation will function as a no-op.
///
- /// [`closure_track_caller` feature flag]: https://doc.rust-lang.org/beta/unstable-book/language-features/closure-track-caller.html
+ /// [`async_fn_track_caller` feature flag]: https://doc.rust-lang.org/beta/unstable-book/language-features/async-fn-track-caller.html
UNGATED_ASYNC_FN_TRACK_CALLER,
Warn,
- "enabling track_caller on an async fn is a no-op unless the closure_track_caller feature is enabled"
+ "enabling track_caller on an async fn is a no-op unless the async_fn_track_caller feature is enabled"
}
declare_lint_pass!(
@@ -1300,7 +1281,7 @@ impl<'tcx> LateLintPass<'tcx> for UngatedAsyncFnTrackCaller {
def_id: LocalDefId,
) {
if fn_kind.asyncness() == IsAsync::Async
- && !cx.tcx.features().closure_track_caller
+ && !cx.tcx.features().async_fn_track_caller
// Now, check if the function has the `#[track_caller]` attribute
&& let Some(attr) = cx.tcx.get_attr(def_id, sym::track_caller)
{
@@ -1458,17 +1439,20 @@ impl TypeAliasBounds {
impl<'tcx> LateLintPass<'tcx> for TypeAliasBounds {
fn check_item(&mut self, cx: &LateContext<'_>, item: &hir::Item<'_>) {
- let hir::ItemKind::TyAlias(ty, type_alias_generics) = &item.kind else {
- return
- };
- if cx.tcx.type_of(item.owner_id.def_id).skip_binder().has_opaque_types() {
- // Bounds are respected for `type X = impl Trait` and `type X = (impl Trait, Y);`
+ let hir::ItemKind::TyAlias(hir_ty, type_alias_generics) = &item.kind else { return };
+
+ if cx.tcx.features().lazy_type_alias {
+ // Bounds of lazy type aliases are respected.
return;
}
- if cx.tcx.type_of(item.owner_id).skip_binder().has_inherent_projections() {
- // Bounds are respected for `type X = … Type::Inherent …`
+
+ let ty = cx.tcx.type_of(item.owner_id).skip_binder();
+ if ty.has_opaque_types() || ty.has_inherent_projections() {
+ // Bounds of type aliases that contain opaque types or inherent projections are respected.
+ // E.g: `type X = impl Trait;`, `type X = (impl Trait, Y);`, `type X = Type::Inherent;`.
return;
}
+
// There must not be a where clause
if type_alias_generics.predicates.is_empty() {
return;
@@ -1493,7 +1477,7 @@ impl<'tcx> LateLintPass<'tcx> for TypeAliasBounds {
if !where_spans.is_empty() {
let sub = (!suggested_changing_assoc_types).then(|| {
suggested_changing_assoc_types = true;
- SuggestChangingAssocTypes { ty }
+ SuggestChangingAssocTypes { ty: hir_ty }
});
cx.emit_spanned_lint(
TYPE_ALIAS_BOUNDS,
@@ -1509,7 +1493,7 @@ impl<'tcx> LateLintPass<'tcx> for TypeAliasBounds {
let suggestion = BuiltinTypeAliasGenericBoundsSuggestion { suggestions: inline_sugg };
let sub = (!suggested_changing_assoc_types).then(|| {
suggested_changing_assoc_types = true;
- SuggestChangingAssocTypes { ty }
+ SuggestChangingAssocTypes { ty: hir_ty }
});
cx.emit_spanned_lint(
TYPE_ALIAS_BOUNDS,
@@ -1531,9 +1515,10 @@ declare_lint_pass!(
impl<'tcx> LateLintPass<'tcx> for UnusedBrokenConst {
fn check_item(&mut self, cx: &LateContext<'_>, it: &hir::Item<'_>) {
match it.kind {
- hir::ItemKind::Const(_, body_id) => {
+ hir::ItemKind::Const(_, _, body_id) => {
let def_id = cx.tcx.hir().body_owner_def_id(body_id).to_def_id();
// trigger the query once for all constants since that will already report the errors
+ // FIXME(generic_const_items): Does this work properly with generic const items?
cx.tcx.ensure().const_eval_poly(def_id);
}
hir::ItemKind::Static(_, _, body_id) => {
@@ -1718,7 +1703,7 @@ impl EarlyLintPass for EllipsisInclusiveRangePatterns {
let end = expr_to_string(&end);
let replace = match start {
Some(start) => format!("&({}..={})", expr_to_string(&start), end),
- None => format!("&(..={})", end),
+ None => format!("&(..={end})"),
};
if join.edition() >= Edition::Edition2021 {
cx.sess().emit_err(BuiltinEllipsisInclusiveRangePatterns {
@@ -1767,82 +1752,6 @@ impl EarlyLintPass for EllipsisInclusiveRangePatterns {
}
declare_lint! {
- /// The `unnameable_test_items` lint detects [`#[test]`][test] functions
- /// that are not able to be run by the test harness because they are in a
- /// position where they are not nameable.
- ///
- /// [test]: https://doc.rust-lang.org/reference/attributes/testing.html#the-test-attribute
- ///
- /// ### Example
- ///
- /// ```rust,test
- /// fn main() {
- /// #[test]
- /// fn foo() {
- /// // This test will not fail because it does not run.
- /// assert_eq!(1, 2);
- /// }
- /// }
- /// ```
- ///
- /// {{produces}}
- ///
- /// ### Explanation
- ///
- /// In order for the test harness to run a test, the test function must be
- /// located in a position where it can be accessed from the crate root.
- /// This generally means it must be defined in a module, and not anywhere
- /// else such as inside another function. The compiler previously allowed
- /// this without an error, so a lint was added as an alert that a test is
- /// not being used. Whether or not this should be allowed has not yet been
- /// decided, see [RFC 2471] and [issue #36629].
- ///
- /// [RFC 2471]: https://github.com/rust-lang/rfcs/pull/2471#issuecomment-397414443
- /// [issue #36629]: https://github.com/rust-lang/rust/issues/36629
- UNNAMEABLE_TEST_ITEMS,
- Warn,
- "detects an item that cannot be named being marked as `#[test_case]`",
- report_in_external_macro
-}
-
-pub struct UnnameableTestItems {
- boundary: Option<hir::OwnerId>, // Id of the item under which things are not nameable
- items_nameable: bool,
-}
-
-impl_lint_pass!(UnnameableTestItems => [UNNAMEABLE_TEST_ITEMS]);
-
-impl UnnameableTestItems {
- pub fn new() -> Self {
- Self { boundary: None, items_nameable: true }
- }
-}
-
-impl<'tcx> LateLintPass<'tcx> for UnnameableTestItems {
- fn check_item(&mut self, cx: &LateContext<'_>, it: &hir::Item<'_>) {
- if self.items_nameable {
- if let hir::ItemKind::Mod(..) = it.kind {
- } else {
- self.items_nameable = false;
- self.boundary = Some(it.owner_id);
- }
- return;
- }
-
- let attrs = cx.tcx.hir().attrs(it.hir_id());
- if let Some(attr) = attr::find_by_name(attrs, sym::rustc_test_marker) {
- cx.emit_spanned_lint(UNNAMEABLE_TEST_ITEMS, attr.span, BuiltinUnnameableTestItems);
- }
- }
-
- fn check_item_post(&mut self, _cx: &LateContext<'_>, it: &hir::Item<'_>) {
- if !self.items_nameable && self.boundary == Some(it.owner_id) {
- self.items_nameable = true;
- }
- }
-}
-
-declare_lint! {
/// The `keyword_idents` lint detects edition keywords being used as an
/// identifier.
///
@@ -2147,8 +2056,8 @@ impl<'tcx> LateLintPass<'tcx> for ExplicitOutlivesRequirements {
match predicate.bounded_ty.kind {
hir::TyKind::Path(hir::QPath::Resolved(None, path)) => {
let Res::Def(DefKind::TyParam, def_id) = path.res else {
- continue;
- };
+ continue;
+ };
let index = ty_generics.param_def_id_to_index[&def_id];
(
Self::lifetimes_outliving_type(inferred_outlives, index),
@@ -2297,30 +2206,63 @@ declare_lint! {
"incomplete features that may function improperly in some or all cases"
}
+declare_lint! {
+ /// The `internal_features` lint detects unstable features enabled with
+ /// the [`feature` attribute] that are internal to the compiler or standard
+ /// library.
+ ///
+ /// [`feature` attribute]: https://doc.rust-lang.org/nightly/unstable-book/
+ ///
+ /// ### Example
+ ///
+ /// ```rust
+ /// #![feature(rustc_attrs)]
+ /// ```
+ ///
+ /// {{produces}}
+ ///
+ /// ### Explanation
+ ///
+ /// These features are an implementation detail of the compiler and standard
+ /// library and are not supposed to be used in user code.
+ pub INTERNAL_FEATURES,
+ Warn,
+ "internal features are not supposed to be used"
+}
+
declare_lint_pass!(
/// Check for used feature gates in `INCOMPLETE_FEATURES` in `rustc_feature/src/active.rs`.
- IncompleteFeatures => [INCOMPLETE_FEATURES]
+ IncompleteInternalFeatures => [INCOMPLETE_FEATURES, INTERNAL_FEATURES]
);
-impl EarlyLintPass for IncompleteFeatures {
+impl EarlyLintPass for IncompleteInternalFeatures {
fn check_crate(&mut self, cx: &EarlyContext<'_>, _: &ast::Crate) {
- let features = cx.sess().features_untracked();
+ let features = cx.builder.features();
features
.declared_lang_features
.iter()
.map(|(name, span, _)| (name, span))
.chain(features.declared_lib_features.iter().map(|(name, span)| (name, span)))
- .filter(|(&name, _)| features.incomplete(name))
+ .filter(|(&name, _)| features.incomplete(name) || features.internal(name))
.for_each(|(&name, &span)| {
let note = rustc_feature::find_feature_issue(name, GateIssue::Language)
- .map(|n| BuiltinIncompleteFeaturesNote { n });
- let help =
- HAS_MIN_FEATURES.contains(&name).then_some(BuiltinIncompleteFeaturesHelp);
- cx.emit_spanned_lint(
- INCOMPLETE_FEATURES,
- span,
- BuiltinIncompleteFeatures { name, note, help },
- );
+ .map(|n| BuiltinFeatureIssueNote { n });
+
+ if features.incomplete(name) {
+ let help =
+ HAS_MIN_FEATURES.contains(&name).then_some(BuiltinIncompleteFeaturesHelp);
+ cx.emit_spanned_lint(
+ INCOMPLETE_FEATURES,
+ span,
+ BuiltinIncompleteFeatures { name, note, help },
+ );
+ } else {
+ cx.emit_spanned_lint(
+ INTERNAL_FEATURES,
+ span,
+ BuiltinInternalFeatures { name, note },
+ );
+ }
});
}
}
@@ -2459,12 +2401,12 @@ impl<'tcx> LateLintPass<'tcx> for InvalidValue {
cx: &LateContext<'tcx>,
ty: Ty<'tcx>,
variant: &VariantDef,
- substs: ty::SubstsRef<'tcx>,
+ args: ty::GenericArgsRef<'tcx>,
descr: &str,
init: InitKind,
) -> Option<InitError> {
let mut field_err = variant.fields.iter().find_map(|field| {
- ty_find_init_error(cx, field.ty(cx.tcx, substs), init).map(|mut err| {
+ ty_find_init_error(cx, field.ty(cx.tcx, args), init).map(|mut err| {
if !field.did.is_local() {
err
} else if err.span.is_none() {
@@ -2541,14 +2483,14 @@ impl<'tcx> LateLintPass<'tcx> for InvalidValue {
Some("raw pointers must be initialized".into())
}
// Recurse and checks for some compound types. (but not unions)
- Adt(adt_def, substs) if !adt_def.is_union() => {
+ Adt(adt_def, args) if !adt_def.is_union() => {
// Handle structs.
if adt_def.is_struct() {
return variant_find_init_error(
cx,
ty,
adt_def.non_enum_variant(),
- substs,
+ args,
"struct field",
init,
);
@@ -2558,7 +2500,7 @@ impl<'tcx> LateLintPass<'tcx> for InvalidValue {
let mut potential_variants = adt_def.variants().iter().filter_map(|variant| {
let definitely_inhabited = match variant
.inhabited_predicate(cx.tcx, *adt_def)
- .subst(cx.tcx, substs)
+ .instantiate(cx.tcx, args)
.apply_any_module(cx.tcx, cx.param_env)
{
// Entirely skip uninhabited variants.
@@ -2570,7 +2512,10 @@ impl<'tcx> LateLintPass<'tcx> for InvalidValue {
Some((variant, definitely_inhabited))
});
let Some(first_variant) = potential_variants.next() else {
- return Some(InitError::from("enums with no inhabited variants have no valid value").spanned(span));
+ return Some(
+ InitError::from("enums with no inhabited variants have no valid value")
+ .spanned(span),
+ );
};
// So we have at least one potentially inhabited variant. Might we have two?
let Some(second_variant) = potential_variants.next() else {
@@ -2579,7 +2524,7 @@ impl<'tcx> LateLintPass<'tcx> for InvalidValue {
cx,
ty,
&first_variant.0,
- substs,
+ args,
"field of the only potentially inhabited enum variant",
init,
);
@@ -2649,381 +2594,6 @@ impl<'tcx> LateLintPass<'tcx> for InvalidValue {
}
declare_lint! {
- /// The `clashing_extern_declarations` lint detects when an `extern fn`
- /// has been declared with the same name but different types.
- ///
- /// ### Example
- ///
- /// ```rust
- /// mod m {
- /// extern "C" {
- /// fn foo();
- /// }
- /// }
- ///
- /// extern "C" {
- /// fn foo(_: u32);
- /// }
- /// ```
- ///
- /// {{produces}}
- ///
- /// ### Explanation
- ///
- /// Because two symbols of the same name cannot be resolved to two
- /// different functions at link time, and one function cannot possibly
- /// have two types, a clashing extern declaration is almost certainly a
- /// mistake. Check to make sure that the `extern` definitions are correct
- /// and equivalent, and possibly consider unifying them in one location.
- ///
- /// This lint does not run between crates because a project may have
- /// dependencies which both rely on the same extern function, but declare
- /// it in a different (but valid) way. For example, they may both declare
- /// an opaque type for one or more of the arguments (which would end up
- /// distinct types), or use types that are valid conversions in the
- /// language the `extern fn` is defined in. In these cases, the compiler
- /// can't say that the clashing declaration is incorrect.
- pub CLASHING_EXTERN_DECLARATIONS,
- Warn,
- "detects when an extern fn has been declared with the same name but different types"
-}
-
-pub struct ClashingExternDeclarations {
- /// Map of function symbol name to the first-seen hir id for that symbol name.. If seen_decls
- /// contains an entry for key K, it means a symbol with name K has been seen by this lint and
- /// the symbol should be reported as a clashing declaration.
- // FIXME: Technically, we could just store a &'tcx str here without issue; however, the
- // `impl_lint_pass` macro doesn't currently support lints parametric over a lifetime.
- seen_decls: FxHashMap<Symbol, hir::OwnerId>,
-}
-
-/// Differentiate between whether the name for an extern decl came from the link_name attribute or
-/// just from declaration itself. This is important because we don't want to report clashes on
-/// symbol name if they don't actually clash because one or the other links against a symbol with a
-/// different name.
-enum SymbolName {
- /// The name of the symbol + the span of the annotation which introduced the link name.
- Link(Symbol, Span),
- /// No link name, so just the name of the symbol.
- Normal(Symbol),
-}
-
-impl SymbolName {
- fn get_name(&self) -> Symbol {
- match self {
- SymbolName::Link(s, _) | SymbolName::Normal(s) => *s,
- }
- }
-}
-
-impl ClashingExternDeclarations {
- pub(crate) fn new() -> Self {
- ClashingExternDeclarations { seen_decls: FxHashMap::default() }
- }
-
- /// Insert a new foreign item into the seen set. If a symbol with the same name already exists
- /// for the item, return its HirId without updating the set.
- fn insert(&mut self, tcx: TyCtxt<'_>, fi: &hir::ForeignItem<'_>) -> Option<hir::OwnerId> {
- let did = fi.owner_id.to_def_id();
- let instance = Instance::new(did, ty::List::identity_for_item(tcx, did));
- let name = Symbol::intern(tcx.symbol_name(instance).name);
- if let Some(&existing_id) = self.seen_decls.get(&name) {
- // Avoid updating the map with the new entry when we do find a collision. We want to
- // make sure we're always pointing to the first definition as the previous declaration.
- // This lets us avoid emitting "knock-on" diagnostics.
- Some(existing_id)
- } else {
- self.seen_decls.insert(name, fi.owner_id)
- }
- }
-
- /// Get the name of the symbol that's linked against for a given extern declaration. That is,
- /// the name specified in a #[link_name = ...] attribute if one was specified, else, just the
- /// symbol's name.
- fn name_of_extern_decl(tcx: TyCtxt<'_>, fi: &hir::ForeignItem<'_>) -> SymbolName {
- if let Some((overridden_link_name, overridden_link_name_span)) =
- tcx.codegen_fn_attrs(fi.owner_id).link_name.map(|overridden_link_name| {
- // FIXME: Instead of searching through the attributes again to get span
- // information, we could have codegen_fn_attrs also give span information back for
- // where the attribute was defined. However, until this is found to be a
- // bottleneck, this does just fine.
- (overridden_link_name, tcx.get_attr(fi.owner_id, sym::link_name).unwrap().span)
- })
- {
- SymbolName::Link(overridden_link_name, overridden_link_name_span)
- } else {
- SymbolName::Normal(fi.ident.name)
- }
- }
-
- /// Checks whether two types are structurally the same enough that the declarations shouldn't
- /// clash. We need this so we don't emit a lint when two modules both declare an extern struct,
- /// with the same members (as the declarations shouldn't clash).
- fn structurally_same_type<'tcx>(
- cx: &LateContext<'tcx>,
- a: Ty<'tcx>,
- b: Ty<'tcx>,
- ckind: CItemKind,
- ) -> bool {
- fn structurally_same_type_impl<'tcx>(
- seen_types: &mut FxHashSet<(Ty<'tcx>, Ty<'tcx>)>,
- cx: &LateContext<'tcx>,
- a: Ty<'tcx>,
- b: Ty<'tcx>,
- ckind: CItemKind,
- ) -> bool {
- debug!("structurally_same_type_impl(cx, a = {:?}, b = {:?})", a, b);
- let tcx = cx.tcx;
-
- // Given a transparent newtype, reach through and grab the inner
- // type unless the newtype makes the type non-null.
- let non_transparent_ty = |mut ty: Ty<'tcx>| -> Ty<'tcx> {
- loop {
- if let ty::Adt(def, substs) = *ty.kind() {
- let is_transparent = def.repr().transparent();
- let is_non_null = crate::types::nonnull_optimization_guaranteed(tcx, def);
- debug!(
- "non_transparent_ty({:?}) -- type is transparent? {}, type is non-null? {}",
- ty, is_transparent, is_non_null
- );
- if is_transparent && !is_non_null {
- debug_assert_eq!(def.variants().len(), 1);
- let v = &def.variant(FIRST_VARIANT);
- // continue with `ty`'s non-ZST field,
- // otherwise `ty` is a ZST and we can return
- if let Some(field) = transparent_newtype_field(tcx, v) {
- ty = field.ty(tcx, substs);
- continue;
- }
- }
- }
- debug!("non_transparent_ty -> {:?}", ty);
- return ty;
- }
- };
-
- let a = non_transparent_ty(a);
- let b = non_transparent_ty(b);
-
- if !seen_types.insert((a, b)) {
- // We've encountered a cycle. There's no point going any further -- the types are
- // structurally the same.
- true
- } else if a == b {
- // All nominally-same types are structurally same, too.
- true
- } else {
- // Do a full, depth-first comparison between the two.
- use rustc_type_ir::sty::TyKind::*;
- let a_kind = a.kind();
- let b_kind = b.kind();
-
- let compare_layouts = |a, b| -> Result<bool, LayoutError<'tcx>> {
- debug!("compare_layouts({:?}, {:?})", a, b);
- let a_layout = &cx.layout_of(a)?.layout.abi();
- let b_layout = &cx.layout_of(b)?.layout.abi();
- debug!(
- "comparing layouts: {:?} == {:?} = {}",
- a_layout,
- b_layout,
- a_layout == b_layout
- );
- Ok(a_layout == b_layout)
- };
-
- #[allow(rustc::usage_of_ty_tykind)]
- let is_primitive_or_pointer = |kind: &ty::TyKind<'_>| {
- kind.is_primitive() || matches!(kind, RawPtr(..) | Ref(..))
- };
-
- ensure_sufficient_stack(|| {
- match (a_kind, b_kind) {
- (Adt(a_def, _), Adt(b_def, _)) => {
- // We can immediately rule out these types as structurally same if
- // their layouts differ.
- match compare_layouts(a, b) {
- Ok(false) => return false,
- _ => (), // otherwise, continue onto the full, fields comparison
- }
-
- // Grab a flattened representation of all fields.
- let a_fields = a_def.variants().iter().flat_map(|v| v.fields.iter());
- let b_fields = b_def.variants().iter().flat_map(|v| v.fields.iter());
-
- // Perform a structural comparison for each field.
- a_fields.eq_by(
- b_fields,
- |&ty::FieldDef { did: a_did, .. },
- &ty::FieldDef { did: b_did, .. }| {
- structurally_same_type_impl(
- seen_types,
- cx,
- tcx.type_of(a_did).subst_identity(),
- tcx.type_of(b_did).subst_identity(),
- ckind,
- )
- },
- )
- }
- (Array(a_ty, a_const), Array(b_ty, b_const)) => {
- // For arrays, we also check the constness of the type.
- a_const.kind() == b_const.kind()
- && structurally_same_type_impl(seen_types, cx, *a_ty, *b_ty, ckind)
- }
- (Slice(a_ty), Slice(b_ty)) => {
- structurally_same_type_impl(seen_types, cx, *a_ty, *b_ty, ckind)
- }
- (RawPtr(a_tymut), RawPtr(b_tymut)) => {
- a_tymut.mutbl == b_tymut.mutbl
- && structurally_same_type_impl(
- seen_types, cx, a_tymut.ty, b_tymut.ty, ckind,
- )
- }
- (Ref(_a_region, a_ty, a_mut), Ref(_b_region, b_ty, b_mut)) => {
- // For structural sameness, we don't need the region to be same.
- a_mut == b_mut
- && structurally_same_type_impl(seen_types, cx, *a_ty, *b_ty, ckind)
- }
- (FnDef(..), FnDef(..)) => {
- let a_poly_sig = a.fn_sig(tcx);
- let b_poly_sig = b.fn_sig(tcx);
-
- // We don't compare regions, but leaving bound regions around ICEs, so
- // we erase them.
- let a_sig = tcx.erase_late_bound_regions(a_poly_sig);
- let b_sig = tcx.erase_late_bound_regions(b_poly_sig);
-
- (a_sig.abi, a_sig.unsafety, a_sig.c_variadic)
- == (b_sig.abi, b_sig.unsafety, b_sig.c_variadic)
- && a_sig.inputs().iter().eq_by(b_sig.inputs().iter(), |a, b| {
- structurally_same_type_impl(seen_types, cx, *a, *b, ckind)
- })
- && structurally_same_type_impl(
- seen_types,
- cx,
- a_sig.output(),
- b_sig.output(),
- ckind,
- )
- }
- (Tuple(a_substs), Tuple(b_substs)) => {
- a_substs.iter().eq_by(b_substs.iter(), |a_ty, b_ty| {
- structurally_same_type_impl(seen_types, cx, a_ty, b_ty, ckind)
- })
- }
- // For these, it's not quite as easy to define structural-sameness quite so easily.
- // For the purposes of this lint, take the conservative approach and mark them as
- // not structurally same.
- (Dynamic(..), Dynamic(..))
- | (Error(..), Error(..))
- | (Closure(..), Closure(..))
- | (Generator(..), Generator(..))
- | (GeneratorWitness(..), GeneratorWitness(..))
- | (Alias(ty::Projection, ..), Alias(ty::Projection, ..))
- | (Alias(ty::Inherent, ..), Alias(ty::Inherent, ..))
- | (Alias(ty::Opaque, ..), Alias(ty::Opaque, ..)) => false,
-
- // These definitely should have been caught above.
- (Bool, Bool) | (Char, Char) | (Never, Never) | (Str, Str) => unreachable!(),
-
- // An Adt and a primitive or pointer type. This can be FFI-safe if non-null
- // enum layout optimisation is being applied.
- (Adt(..), other_kind) | (other_kind, Adt(..))
- if is_primitive_or_pointer(other_kind) =>
- {
- let (primitive, adt) =
- if is_primitive_or_pointer(a.kind()) { (a, b) } else { (b, a) };
- if let Some(ty) = crate::types::repr_nullable_ptr(cx, adt, ckind) {
- ty == primitive
- } else {
- compare_layouts(a, b).unwrap_or(false)
- }
- }
- // Otherwise, just compare the layouts. This may fail to lint for some
- // incompatible types, but at the very least, will stop reads into
- // uninitialised memory.
- _ => compare_layouts(a, b).unwrap_or(false),
- }
- })
- }
- }
- let mut seen_types = FxHashSet::default();
- structurally_same_type_impl(&mut seen_types, cx, a, b, ckind)
- }
-}
-
-impl_lint_pass!(ClashingExternDeclarations => [CLASHING_EXTERN_DECLARATIONS]);
-
-impl<'tcx> LateLintPass<'tcx> for ClashingExternDeclarations {
- #[instrument(level = "trace", skip(self, cx))]
- fn check_foreign_item(&mut self, cx: &LateContext<'tcx>, this_fi: &hir::ForeignItem<'_>) {
- if let ForeignItemKind::Fn(..) = this_fi.kind {
- let tcx = cx.tcx;
- if let Some(existing_did) = self.insert(tcx, this_fi) {
- let existing_decl_ty = tcx.type_of(existing_did).skip_binder();
- let this_decl_ty = tcx.type_of(this_fi.owner_id).subst_identity();
- debug!(
- "ClashingExternDeclarations: Comparing existing {:?}: {:?} to this {:?}: {:?}",
- existing_did, existing_decl_ty, this_fi.owner_id, this_decl_ty
- );
- // Check that the declarations match.
- if !Self::structurally_same_type(
- cx,
- existing_decl_ty,
- this_decl_ty,
- CItemKind::Declaration,
- ) {
- let orig_fi = tcx.hir().expect_foreign_item(existing_did);
- let orig = Self::name_of_extern_decl(tcx, orig_fi);
-
- // We want to ensure that we use spans for both decls that include where the
- // name was defined, whether that was from the link_name attribute or not.
- let get_relevant_span =
- |fi: &hir::ForeignItem<'_>| match Self::name_of_extern_decl(tcx, fi) {
- SymbolName::Normal(_) => fi.span,
- SymbolName::Link(_, annot_span) => fi.span.to(annot_span),
- };
-
- // Finally, emit the diagnostic.
- let this = this_fi.ident.name;
- let orig = orig.get_name();
- let previous_decl_label = get_relevant_span(orig_fi);
- let mismatch_label = get_relevant_span(this_fi);
- let sub = BuiltinClashingExternSub {
- tcx,
- expected: existing_decl_ty,
- found: this_decl_ty,
- };
- let decorator = if orig == this {
- BuiltinClashingExtern::SameName {
- this,
- orig,
- previous_decl_label,
- mismatch_label,
- sub,
- }
- } else {
- BuiltinClashingExtern::DiffName {
- this,
- orig,
- previous_decl_label,
- mismatch_label,
- sub,
- }
- };
- tcx.emit_spanned_lint(
- CLASHING_EXTERN_DECLARATIONS,
- this_fi.hir_id(),
- get_relevant_span(this_fi),
- decorator,
- );
- }
- }
- }
- }
-}
-
-declare_lint! {
/// The `deref_nullptr` lint detects when an null pointer is dereferenced,
/// which causes [undefined behavior].
///
@@ -3181,7 +2751,7 @@ impl<'tcx> LateLintPass<'tcx> for NamedAsmLabels {
let mut chars = possible_label.chars();
let Some(c) = chars.next() else {
// Empty string means a leading ':' in this section, which is not a label
- break
+ break;
};
// A label starts with an alphabetic character or . or _ and continues with alphanumeric characters, _, or $
if (c.is_alphabetic() || matches!(c, '.' | '_'))
diff --git a/compiler/rustc_lint/src/context.rs b/compiler/rustc_lint/src/context.rs
index 3761754f3..f73797415 100644
--- a/compiler/rustc_lint/src/context.rs
+++ b/compiler/rustc_lint/src/context.rs
@@ -27,6 +27,7 @@ use rustc_data_structures::fx::FxHashMap;
use rustc_data_structures::sync;
use rustc_errors::{add_elided_lifetime_in_path_suggestion, DiagnosticBuilder, DiagnosticMessage};
use rustc_errors::{Applicability, DecorateLint, MultiSpan, SuggestionStyle};
+use rustc_feature::Features;
use rustc_hir as hir;
use rustc_hir::def::Res;
use rustc_hir::def_id::{CrateNum, DefId};
@@ -35,7 +36,7 @@ use rustc_middle::middle::privacy::EffectiveVisibilities;
use rustc_middle::middle::stability;
use rustc_middle::ty::layout::{LayoutError, LayoutOfHelpers, TyAndLayout};
use rustc_middle::ty::print::with_no_trimmed_paths;
-use rustc_middle::ty::{self, print::Printer, subst::GenericArg, RegisteredTools, Ty, TyCtxt};
+use rustc_middle::ty::{self, print::Printer, GenericArg, RegisteredTools, Ty, TyCtxt};
use rustc_session::config::ExpectedValues;
use rustc_session::lint::{BuiltinLintDiagnostics, LintExpectationId};
use rustc_session::lint::{FutureIncompatibleInfo, Level, Lint, LintBuffer, LintId};
@@ -411,7 +412,7 @@ impl LintStore {
}
let complete_name = if let Some(tool_name) = tool_name {
- format!("{}::{}", tool_name, lint_name)
+ format!("{tool_name}::{lint_name}")
} else {
lint_name.to_string()
};
@@ -424,7 +425,7 @@ impl LintStore {
// 1. The tool is currently running, so this lint really doesn't exist.
// FIXME: should this handle tools that never register a lint, like rustfmt?
debug!("lints={:?}", self.by_name.keys().collect::<Vec<_>>());
- let tool_prefix = format!("{}::", tool_name);
+ let tool_prefix = format!("{tool_name}::");
return if self.by_name.keys().any(|lint| lint.starts_with(&tool_prefix)) {
self.no_lint_suggestion(&complete_name)
} else {
@@ -445,11 +446,11 @@ impl LintStore {
}
match self.by_name.get(&complete_name) {
Some(Renamed(new_name, _)) => CheckLintNameResult::Warning(
- format!("lint `{}` has been renamed to `{}`", complete_name, new_name),
+ format!("lint `{complete_name}` has been renamed to `{new_name}`"),
Some(new_name.to_owned()),
),
Some(Removed(reason)) => CheckLintNameResult::Warning(
- format!("lint `{}` has been removed: {}", complete_name, reason),
+ format!("lint `{complete_name}` has been removed: {reason}"),
None,
),
None => match self.lint_groups.get(&*complete_name) {
@@ -503,7 +504,7 @@ impl LintStore {
lint_name: &str,
tool_name: &str,
) -> CheckLintNameResult<'_> {
- let complete_name = format!("{}::{}", tool_name, lint_name);
+ let complete_name = format!("{tool_name}::{lint_name}");
match self.by_name.get(&complete_name) {
None => match self.lint_groups.get(&*complete_name) {
// Now we are sure, that this lint exists nowhere
@@ -618,12 +619,10 @@ pub trait LintContext: Sized {
_ => ("", "s"),
};
db.span_label(span, format!(
- "this comment contains {}invisible unicode text flow control codepoint{}",
- an,
- s,
+ "this comment contains {an}invisible unicode text flow control codepoint{s}",
));
for (c, span) in &spans {
- db.span_label(*span, format!("{:?}", c));
+ db.span_label(*span, format!("{c:?}"));
}
db.note(
"these kind of unicode codepoints change the way text flows on \
@@ -648,7 +647,7 @@ pub trait LintContext: Sized {
let opt_colon =
if s.trim_start().starts_with("::") { "" } else { "::" };
- (format!("crate{}{}", opt_colon, s), Applicability::MachineApplicable)
+ (format!("crate{opt_colon}{s}"), Applicability::MachineApplicable)
}
Err(_) => ("crate::<path>".to_string(), Applicability::HasPlaceholders),
};
@@ -704,7 +703,7 @@ pub trait LintContext: Sized {
let introduced = if is_imported { "imported" } else { "defined" };
db.span_label(
span,
- format!("the item `{}` is already {} here", ident, introduced),
+ format!("the item `{ident}` is already {introduced} here"),
);
}
}
@@ -908,7 +907,7 @@ pub trait LintContext: Sized {
BuiltinLintDiagnostics::NamedArgumentUsedPositionally{ position_sp_to_replace, position_sp_for_msg, named_arg_sp, named_arg_name, is_formatting_arg} => {
db.span_label(named_arg_sp, "this named argument is referred to by position in formatting string");
if let Some(positional_arg_for_msg) = position_sp_for_msg {
- let msg = format!("this formatting argument uses named argument `{}` by position", named_arg_name);
+ let msg = format!("this formatting argument uses named argument `{named_arg_name}` by position");
db.span_label(positional_arg_for_msg, msg);
}
@@ -948,14 +947,25 @@ pub trait LintContext: Sized {
Applicability::MachineApplicable,
);
}
+ BuiltinLintDiagnostics::AmbiguousGlobImports { diag } => {
+ rustc_errors::report_ambiguity_error(db, diag);
+ }
BuiltinLintDiagnostics::AmbiguousGlobReexports { name, namespace, first_reexport_span, duplicate_reexport_span } => {
- db.span_label(first_reexport_span, format!("the name `{}` in the {} namespace is first re-exported here", name, namespace));
- db.span_label(duplicate_reexport_span, format!("but the name `{}` in the {} namespace is also re-exported here", name, namespace));
+ db.span_label(first_reexport_span, format!("the name `{name}` in the {namespace} namespace is first re-exported here"));
+ db.span_label(duplicate_reexport_span, format!("but the name `{name}` in the {namespace} namespace is also re-exported here"));
}
BuiltinLintDiagnostics::HiddenGlobReexports { name, namespace, glob_reexport_span, private_item_span } => {
- db.span_note(glob_reexport_span, format!("the name `{}` in the {} namespace is supposed to be publicly re-exported here", name, namespace));
+ db.span_note(glob_reexport_span, format!("the name `{name}` in the {namespace} namespace is supposed to be publicly re-exported here"));
db.span_note(private_item_span, "but the private item here shadows it".to_owned());
}
+ BuiltinLintDiagnostics::UnusedQualifications { removal_span } => {
+ db.span_suggestion_verbose(
+ removal_span,
+ "remove the unnecessary path segments",
+ "",
+ Applicability::MachineApplicable
+ );
+ }
}
// Rewrap `db`, and pass control to the user.
decorate(db)
@@ -1062,6 +1072,7 @@ pub trait LintContext: Sized {
impl<'a> EarlyContext<'a> {
pub(crate) fn new(
sess: &'a Session,
+ features: &'a Features,
warn_about_weird_lints: bool,
lint_store: &'a LintStore,
registered_tools: &'a RegisteredTools,
@@ -1070,6 +1081,7 @@ impl<'a> EarlyContext<'a> {
EarlyContext {
builder: LintLevelsBuilder::new(
sess,
+ features,
warn_about_weird_lints,
lint_store,
registered_tools,
@@ -1265,16 +1277,16 @@ impl<'tcx> LateContext<'tcx> {
trait_ref: Option<ty::TraitRef<'tcx>>,
) -> Result<Self::Path, Self::Error> {
if trait_ref.is_none() {
- if let ty::Adt(def, substs) = self_ty.kind() {
- return self.print_def_path(def.did(), substs);
+ if let ty::Adt(def, args) = self_ty.kind() {
+ return self.print_def_path(def.did(), args);
}
}
// This shouldn't ever be needed, but just in case:
with_no_trimmed_paths!({
Ok(vec![match trait_ref {
- Some(trait_ref) => Symbol::intern(&format!("{:?}", trait_ref)),
- None => Symbol::intern(&format!("<{}>", self_ty)),
+ Some(trait_ref) => Symbol::intern(&format!("{trait_ref:?}")),
+ None => Symbol::intern(&format!("<{self_ty}>")),
}])
})
}
@@ -1298,7 +1310,7 @@ impl<'tcx> LateContext<'tcx> {
)))
}
None => {
- with_no_trimmed_paths!(Symbol::intern(&format!("<impl {}>", self_ty)))
+ with_no_trimmed_paths!(Symbol::intern(&format!("<impl {self_ty}>")))
}
});
diff --git a/compiler/rustc_lint/src/deref_into_dyn_supertrait.rs b/compiler/rustc_lint/src/deref_into_dyn_supertrait.rs
index ccf95992a..851c6493d 100644
--- a/compiler/rustc_lint/src/deref_into_dyn_supertrait.rs
+++ b/compiler/rustc_lint/src/deref_into_dyn_supertrait.rs
@@ -59,7 +59,7 @@ impl<'tcx> LateLintPass<'tcx> for DerefIntoDynSupertrait {
// `Deref` is being implemented for `t`
if let hir::ItemKind::Impl(impl_) = item.kind
&& let Some(trait_) = &impl_.of_trait
- && let t = cx.tcx.type_of(item.owner_id).subst_identity()
+ && let t = cx.tcx.type_of(item.owner_id).instantiate_identity()
&& let opt_did @ Some(did) = trait_.trait_def_id()
&& opt_did == cx.tcx.lang_items().deref_trait()
// `t` is `dyn t_principal`
diff --git a/compiler/rustc_lint/src/early.rs b/compiler/rustc_lint/src/early.rs
index 9f1f5a26e..211ea8f43 100644
--- a/compiler/rustc_lint/src/early.rs
+++ b/compiler/rustc_lint/src/early.rs
@@ -20,6 +20,7 @@ use rustc_ast::ptr::P;
use rustc_ast::visit::{self as ast_visit, Visitor};
use rustc_ast::{self as ast, walk_list, HasAttrs};
use rustc_data_structures::stack::ensure_sufficient_stack;
+use rustc_feature::Features;
use rustc_middle::ty::RegisteredTools;
use rustc_session::lint::{BufferedEarlyLint, LintBuffer, LintPass};
use rustc_session::Session;
@@ -381,6 +382,7 @@ impl<'a> EarlyCheckNode<'a> for (ast::NodeId, &'a [ast::Attribute], &'a [P<ast::
pub fn check_ast_node<'a>(
sess: &Session,
+ features: &Features,
pre_expansion: bool,
lint_store: &LintStore,
registered_tools: &RegisteredTools,
@@ -390,6 +392,7 @@ pub fn check_ast_node<'a>(
) {
let context = EarlyContext::new(
sess,
+ features,
!pre_expansion,
lint_store,
registered_tools,
diff --git a/compiler/rustc_lint/src/enum_intrinsics_non_enums.rs b/compiler/rustc_lint/src/enum_intrinsics_non_enums.rs
index 2ce28f3a0..05fe64830 100644
--- a/compiler/rustc_lint/src/enum_intrinsics_non_enums.rs
+++ b/compiler/rustc_lint/src/enum_intrinsics_non_enums.rs
@@ -51,7 +51,7 @@ fn enforce_mem_discriminant(
expr_span: Span,
args_span: Span,
) {
- let ty_param = cx.typeck_results().node_substs(func_expr.hir_id).type_at(0);
+ let ty_param = cx.typeck_results().node_args(func_expr.hir_id).type_at(0);
if is_non_enum(ty_param) {
cx.emit_spanned_lint(
ENUM_INTRINSICS_NON_ENUMS,
@@ -62,7 +62,7 @@ fn enforce_mem_discriminant(
}
fn enforce_mem_variant_count(cx: &LateContext<'_>, func_expr: &hir::Expr<'_>, span: Span) {
- let ty_param = cx.typeck_results().node_substs(func_expr.hir_id).type_at(0);
+ let ty_param = cx.typeck_results().node_args(func_expr.hir_id).type_at(0);
if is_non_enum(ty_param) {
cx.emit_spanned_lint(
ENUM_INTRINSICS_NON_ENUMS,
diff --git a/compiler/rustc_lint/src/for_loops_over_fallibles.rs b/compiler/rustc_lint/src/for_loops_over_fallibles.rs
index 7b58bf03b..c299e3884 100644
--- a/compiler/rustc_lint/src/for_loops_over_fallibles.rs
+++ b/compiler/rustc_lint/src/for_loops_over_fallibles.rs
@@ -51,7 +51,7 @@ impl<'tcx> LateLintPass<'tcx> for ForLoopsOverFallibles {
let ty = cx.typeck_results().expr_ty(arg);
- let &ty::Adt(adt, substs) = ty.kind() else { return };
+ let &ty::Adt(adt, args) = ty.kind() else { return };
let (article, ty, var) = match adt.did() {
did if cx.tcx.is_diagnostic_item(sym::Option, did) => ("an", "Option", "Some"),
@@ -66,7 +66,7 @@ impl<'tcx> LateLintPass<'tcx> for ForLoopsOverFallibles {
} else {
ForLoopsOverFalliblesLoopSub::UseWhileLet { start_span: expr.span.with_hi(pat.span.lo()), end_span: pat.span.between(arg.span), var }
} ;
- let question_mark = suggest_question_mark(cx, adt, substs, expr.span)
+ let question_mark = suggest_question_mark(cx, adt, args, expr.span)
.then(|| ForLoopsOverFalliblesQuestionMark { suggestion: arg.span.shrink_to_hi() });
let suggestion = ForLoopsOverFalliblesSuggestion {
var,
@@ -115,11 +115,13 @@ fn extract_iterator_next_call<'tcx>(
fn suggest_question_mark<'tcx>(
cx: &LateContext<'tcx>,
adt: ty::AdtDef<'tcx>,
- substs: &List<ty::GenericArg<'tcx>>,
+ args: &List<ty::GenericArg<'tcx>>,
span: Span,
) -> bool {
let Some(body_id) = cx.enclosing_body else { return false };
- let Some(into_iterator_did) = cx.tcx.get_diagnostic_item(sym::IntoIterator) else { return false };
+ let Some(into_iterator_did) = cx.tcx.get_diagnostic_item(sym::IntoIterator) else {
+ return false;
+ };
if !cx.tcx.is_diagnostic_item(sym::Result, adt.did()) {
return false;
@@ -135,7 +137,7 @@ fn suggest_question_mark<'tcx>(
}
}
- let ty = substs.type_at(0);
+ let ty = args.type_at(0);
let infcx = cx.tcx.infer_ctxt().build();
let ocx = ObligationCtxt::new(&infcx);
diff --git a/compiler/rustc_lint/src/foreign_modules.rs b/compiler/rustc_lint/src/foreign_modules.rs
new file mode 100644
index 000000000..7b291d558
--- /dev/null
+++ b/compiler/rustc_lint/src/foreign_modules.rs
@@ -0,0 +1,402 @@
+use rustc_data_structures::fx::{FxHashMap, FxHashSet};
+use rustc_data_structures::stack::ensure_sufficient_stack;
+use rustc_hir as hir;
+use rustc_hir::def::DefKind;
+use rustc_middle::query::Providers;
+use rustc_middle::ty::layout::LayoutError;
+use rustc_middle::ty::{self, Instance, Ty, TyCtxt};
+use rustc_session::lint::{lint_array, LintArray};
+use rustc_span::{sym, Span, Symbol};
+use rustc_target::abi::FIRST_VARIANT;
+
+use crate::lints::{BuiltinClashingExtern, BuiltinClashingExternSub};
+use crate::types;
+
+pub(crate) fn provide(providers: &mut Providers) {
+ *providers = Providers { clashing_extern_declarations, ..*providers };
+}
+
+pub(crate) fn get_lints() -> LintArray {
+ lint_array!(CLASHING_EXTERN_DECLARATIONS)
+}
+
+fn clashing_extern_declarations(tcx: TyCtxt<'_>, (): ()) {
+ let mut lint = ClashingExternDeclarations::new();
+ for id in tcx.hir_crate_items(()).foreign_items() {
+ lint.check_foreign_item(tcx, id);
+ }
+}
+
+declare_lint! {
+ /// The `clashing_extern_declarations` lint detects when an `extern fn`
+ /// has been declared with the same name but different types.
+ ///
+ /// ### Example
+ ///
+ /// ```rust
+ /// mod m {
+ /// extern "C" {
+ /// fn foo();
+ /// }
+ /// }
+ ///
+ /// extern "C" {
+ /// fn foo(_: u32);
+ /// }
+ /// ```
+ ///
+ /// {{produces}}
+ ///
+ /// ### Explanation
+ ///
+ /// Because two symbols of the same name cannot be resolved to two
+ /// different functions at link time, and one function cannot possibly
+ /// have two types, a clashing extern declaration is almost certainly a
+ /// mistake. Check to make sure that the `extern` definitions are correct
+ /// and equivalent, and possibly consider unifying them in one location.
+ ///
+ /// This lint does not run between crates because a project may have
+ /// dependencies which both rely on the same extern function, but declare
+ /// it in a different (but valid) way. For example, they may both declare
+ /// an opaque type for one or more of the arguments (which would end up
+ /// distinct types), or use types that are valid conversions in the
+ /// language the `extern fn` is defined in. In these cases, the compiler
+ /// can't say that the clashing declaration is incorrect.
+ pub CLASHING_EXTERN_DECLARATIONS,
+ Warn,
+ "detects when an extern fn has been declared with the same name but different types"
+}
+
+struct ClashingExternDeclarations {
+ /// Map of function symbol name to the first-seen hir id for that symbol name.. If seen_decls
+ /// contains an entry for key K, it means a symbol with name K has been seen by this lint and
+ /// the symbol should be reported as a clashing declaration.
+ // FIXME: Technically, we could just store a &'tcx str here without issue; however, the
+ // `impl_lint_pass` macro doesn't currently support lints parametric over a lifetime.
+ seen_decls: FxHashMap<Symbol, hir::OwnerId>,
+}
+
+/// Differentiate between whether the name for an extern decl came from the link_name attribute or
+/// just from declaration itself. This is important because we don't want to report clashes on
+/// symbol name if they don't actually clash because one or the other links against a symbol with a
+/// different name.
+enum SymbolName {
+ /// The name of the symbol + the span of the annotation which introduced the link name.
+ Link(Symbol, Span),
+ /// No link name, so just the name of the symbol.
+ Normal(Symbol),
+}
+
+impl SymbolName {
+ fn get_name(&self) -> Symbol {
+ match self {
+ SymbolName::Link(s, _) | SymbolName::Normal(s) => *s,
+ }
+ }
+}
+
+impl ClashingExternDeclarations {
+ pub(crate) fn new() -> Self {
+ ClashingExternDeclarations { seen_decls: FxHashMap::default() }
+ }
+
+ /// Insert a new foreign item into the seen set. If a symbol with the same name already exists
+ /// for the item, return its HirId without updating the set.
+ fn insert(&mut self, tcx: TyCtxt<'_>, fi: hir::ForeignItemId) -> Option<hir::OwnerId> {
+ let did = fi.owner_id.to_def_id();
+ let instance = Instance::new(did, ty::List::identity_for_item(tcx, did));
+ let name = Symbol::intern(tcx.symbol_name(instance).name);
+ if let Some(&existing_id) = self.seen_decls.get(&name) {
+ // Avoid updating the map with the new entry when we do find a collision. We want to
+ // make sure we're always pointing to the first definition as the previous declaration.
+ // This lets us avoid emitting "knock-on" diagnostics.
+ Some(existing_id)
+ } else {
+ self.seen_decls.insert(name, fi.owner_id)
+ }
+ }
+
+ #[instrument(level = "trace", skip(self, tcx))]
+ fn check_foreign_item<'tcx>(&mut self, tcx: TyCtxt<'tcx>, this_fi: hir::ForeignItemId) {
+ let DefKind::Fn = tcx.def_kind(this_fi.owner_id) else { return };
+ let Some(existing_did) = self.insert(tcx, this_fi) else { return };
+
+ let existing_decl_ty = tcx.type_of(existing_did).skip_binder();
+ let this_decl_ty = tcx.type_of(this_fi.owner_id).instantiate_identity();
+ debug!(
+ "ClashingExternDeclarations: Comparing existing {:?}: {:?} to this {:?}: {:?}",
+ existing_did, existing_decl_ty, this_fi.owner_id, this_decl_ty
+ );
+
+ // Check that the declarations match.
+ if !structurally_same_type(
+ tcx,
+ tcx.param_env(this_fi.owner_id),
+ existing_decl_ty,
+ this_decl_ty,
+ types::CItemKind::Declaration,
+ ) {
+ let orig = name_of_extern_decl(tcx, existing_did);
+
+ // Finally, emit the diagnostic.
+ let this = tcx.item_name(this_fi.owner_id.to_def_id());
+ let orig = orig.get_name();
+ let previous_decl_label = get_relevant_span(tcx, existing_did);
+ let mismatch_label = get_relevant_span(tcx, this_fi.owner_id);
+ let sub =
+ BuiltinClashingExternSub { tcx, expected: existing_decl_ty, found: this_decl_ty };
+ let decorator = if orig == this {
+ BuiltinClashingExtern::SameName {
+ this,
+ orig,
+ previous_decl_label,
+ mismatch_label,
+ sub,
+ }
+ } else {
+ BuiltinClashingExtern::DiffName {
+ this,
+ orig,
+ previous_decl_label,
+ mismatch_label,
+ sub,
+ }
+ };
+ tcx.emit_spanned_lint(
+ CLASHING_EXTERN_DECLARATIONS,
+ this_fi.hir_id(),
+ mismatch_label,
+ decorator,
+ );
+ }
+ }
+}
+
+/// Get the name of the symbol that's linked against for a given extern declaration. That is,
+/// the name specified in a #[link_name = ...] attribute if one was specified, else, just the
+/// symbol's name.
+fn name_of_extern_decl(tcx: TyCtxt<'_>, fi: hir::OwnerId) -> SymbolName {
+ if let Some((overridden_link_name, overridden_link_name_span)) =
+ tcx.codegen_fn_attrs(fi).link_name.map(|overridden_link_name| {
+ // FIXME: Instead of searching through the attributes again to get span
+ // information, we could have codegen_fn_attrs also give span information back for
+ // where the attribute was defined. However, until this is found to be a
+ // bottleneck, this does just fine.
+ (overridden_link_name, tcx.get_attr(fi, sym::link_name).unwrap().span)
+ })
+ {
+ SymbolName::Link(overridden_link_name, overridden_link_name_span)
+ } else {
+ SymbolName::Normal(tcx.item_name(fi.to_def_id()))
+ }
+}
+
+/// We want to ensure that we use spans for both decls that include where the
+/// name was defined, whether that was from the link_name attribute or not.
+fn get_relevant_span(tcx: TyCtxt<'_>, fi: hir::OwnerId) -> Span {
+ match name_of_extern_decl(tcx, fi) {
+ SymbolName::Normal(_) => tcx.def_span(fi),
+ SymbolName::Link(_, annot_span) => annot_span,
+ }
+}
+
+/// Checks whether two types are structurally the same enough that the declarations shouldn't
+/// clash. We need this so we don't emit a lint when two modules both declare an extern struct,
+/// with the same members (as the declarations shouldn't clash).
+fn structurally_same_type<'tcx>(
+ tcx: TyCtxt<'tcx>,
+ param_env: ty::ParamEnv<'tcx>,
+ a: Ty<'tcx>,
+ b: Ty<'tcx>,
+ ckind: types::CItemKind,
+) -> bool {
+ let mut seen_types = FxHashSet::default();
+ structurally_same_type_impl(&mut seen_types, tcx, param_env, a, b, ckind)
+}
+
+fn structurally_same_type_impl<'tcx>(
+ seen_types: &mut FxHashSet<(Ty<'tcx>, Ty<'tcx>)>,
+ tcx: TyCtxt<'tcx>,
+ param_env: ty::ParamEnv<'tcx>,
+ a: Ty<'tcx>,
+ b: Ty<'tcx>,
+ ckind: types::CItemKind,
+) -> bool {
+ debug!("structurally_same_type_impl(tcx, a = {:?}, b = {:?})", a, b);
+
+ // Given a transparent newtype, reach through and grab the inner
+ // type unless the newtype makes the type non-null.
+ let non_transparent_ty = |mut ty: Ty<'tcx>| -> Ty<'tcx> {
+ loop {
+ if let ty::Adt(def, args) = *ty.kind() {
+ let is_transparent = def.repr().transparent();
+ let is_non_null = types::nonnull_optimization_guaranteed(tcx, def);
+ debug!(
+ "non_transparent_ty({:?}) -- type is transparent? {}, type is non-null? {}",
+ ty, is_transparent, is_non_null
+ );
+ if is_transparent && !is_non_null {
+ debug_assert_eq!(def.variants().len(), 1);
+ let v = &def.variant(FIRST_VARIANT);
+ // continue with `ty`'s non-ZST field,
+ // otherwise `ty` is a ZST and we can return
+ if let Some(field) = types::transparent_newtype_field(tcx, v) {
+ ty = field.ty(tcx, args);
+ continue;
+ }
+ }
+ }
+ debug!("non_transparent_ty -> {:?}", ty);
+ return ty;
+ }
+ };
+
+ let a = non_transparent_ty(a);
+ let b = non_transparent_ty(b);
+
+ if !seen_types.insert((a, b)) {
+ // We've encountered a cycle. There's no point going any further -- the types are
+ // structurally the same.
+ true
+ } else if a == b {
+ // All nominally-same types are structurally same, too.
+ true
+ } else {
+ // Do a full, depth-first comparison between the two.
+ use rustc_type_ir::sty::TyKind::*;
+ let a_kind = a.kind();
+ let b_kind = b.kind();
+
+ let compare_layouts = |a, b| -> Result<bool, &'tcx LayoutError<'tcx>> {
+ debug!("compare_layouts({:?}, {:?})", a, b);
+ let a_layout = &tcx.layout_of(param_env.and(a))?.layout.abi();
+ let b_layout = &tcx.layout_of(param_env.and(b))?.layout.abi();
+ debug!(
+ "comparing layouts: {:?} == {:?} = {}",
+ a_layout,
+ b_layout,
+ a_layout == b_layout
+ );
+ Ok(a_layout == b_layout)
+ };
+
+ #[allow(rustc::usage_of_ty_tykind)]
+ let is_primitive_or_pointer =
+ |kind: &ty::TyKind<'_>| kind.is_primitive() || matches!(kind, RawPtr(..) | Ref(..));
+
+ ensure_sufficient_stack(|| {
+ match (a_kind, b_kind) {
+ (Adt(a_def, _), Adt(b_def, _)) => {
+ // We can immediately rule out these types as structurally same if
+ // their layouts differ.
+ match compare_layouts(a, b) {
+ Ok(false) => return false,
+ _ => (), // otherwise, continue onto the full, fields comparison
+ }
+
+ // Grab a flattened representation of all fields.
+ let a_fields = a_def.variants().iter().flat_map(|v| v.fields.iter());
+ let b_fields = b_def.variants().iter().flat_map(|v| v.fields.iter());
+
+ // Perform a structural comparison for each field.
+ a_fields.eq_by(
+ b_fields,
+ |&ty::FieldDef { did: a_did, .. }, &ty::FieldDef { did: b_did, .. }| {
+ structurally_same_type_impl(
+ seen_types,
+ tcx,
+ param_env,
+ tcx.type_of(a_did).instantiate_identity(),
+ tcx.type_of(b_did).instantiate_identity(),
+ ckind,
+ )
+ },
+ )
+ }
+ (Array(a_ty, a_const), Array(b_ty, b_const)) => {
+ // For arrays, we also check the constness of the type.
+ a_const.kind() == b_const.kind()
+ && structurally_same_type_impl(
+ seen_types, tcx, param_env, *a_ty, *b_ty, ckind,
+ )
+ }
+ (Slice(a_ty), Slice(b_ty)) => {
+ structurally_same_type_impl(seen_types, tcx, param_env, *a_ty, *b_ty, ckind)
+ }
+ (RawPtr(a_tymut), RawPtr(b_tymut)) => {
+ a_tymut.mutbl == b_tymut.mutbl
+ && structurally_same_type_impl(
+ seen_types, tcx, param_env, a_tymut.ty, b_tymut.ty, ckind,
+ )
+ }
+ (Ref(_a_region, a_ty, a_mut), Ref(_b_region, b_ty, b_mut)) => {
+ // For structural sameness, we don't need the region to be same.
+ a_mut == b_mut
+ && structurally_same_type_impl(
+ seen_types, tcx, param_env, *a_ty, *b_ty, ckind,
+ )
+ }
+ (FnDef(..), FnDef(..)) => {
+ let a_poly_sig = a.fn_sig(tcx);
+ let b_poly_sig = b.fn_sig(tcx);
+
+ // We don't compare regions, but leaving bound regions around ICEs, so
+ // we erase them.
+ let a_sig = tcx.erase_late_bound_regions(a_poly_sig);
+ let b_sig = tcx.erase_late_bound_regions(b_poly_sig);
+
+ (a_sig.abi, a_sig.unsafety, a_sig.c_variadic)
+ == (b_sig.abi, b_sig.unsafety, b_sig.c_variadic)
+ && a_sig.inputs().iter().eq_by(b_sig.inputs().iter(), |a, b| {
+ structurally_same_type_impl(seen_types, tcx, param_env, *a, *b, ckind)
+ })
+ && structurally_same_type_impl(
+ seen_types,
+ tcx,
+ param_env,
+ a_sig.output(),
+ b_sig.output(),
+ ckind,
+ )
+ }
+ (Tuple(a_args), Tuple(b_args)) => {
+ a_args.iter().eq_by(b_args.iter(), |a_ty, b_ty| {
+ structurally_same_type_impl(seen_types, tcx, param_env, a_ty, b_ty, ckind)
+ })
+ }
+ // For these, it's not quite as easy to define structural-sameness quite so easily.
+ // For the purposes of this lint, take the conservative approach and mark them as
+ // not structurally same.
+ (Dynamic(..), Dynamic(..))
+ | (Error(..), Error(..))
+ | (Closure(..), Closure(..))
+ | (Generator(..), Generator(..))
+ | (GeneratorWitness(..), GeneratorWitness(..))
+ | (Alias(ty::Projection, ..), Alias(ty::Projection, ..))
+ | (Alias(ty::Inherent, ..), Alias(ty::Inherent, ..))
+ | (Alias(ty::Opaque, ..), Alias(ty::Opaque, ..)) => false,
+
+ // These definitely should have been caught above.
+ (Bool, Bool) | (Char, Char) | (Never, Never) | (Str, Str) => unreachable!(),
+
+ // An Adt and a primitive or pointer type. This can be FFI-safe if non-null
+ // enum layout optimisation is being applied.
+ (Adt(..), other_kind) | (other_kind, Adt(..))
+ if is_primitive_or_pointer(other_kind) =>
+ {
+ let (primitive, adt) =
+ if is_primitive_or_pointer(a.kind()) { (a, b) } else { (b, a) };
+ if let Some(ty) = types::repr_nullable_ptr(tcx, param_env, adt, ckind) {
+ ty == primitive
+ } else {
+ compare_layouts(a, b).unwrap_or(false)
+ }
+ }
+ // Otherwise, just compare the layouts. This may fail to lint for some
+ // incompatible types, but at the very least, will stop reads into
+ // uninitialised memory.
+ _ => compare_layouts(a, b).unwrap_or(false),
+ }
+ })
+ }
+}
diff --git a/compiler/rustc_lint/src/internal.rs b/compiler/rustc_lint/src/internal.rs
index 6f773e04a..4b803621f 100644
--- a/compiler/rustc_lint/src/internal.rs
+++ b/compiler/rustc_lint/src/internal.rs
@@ -52,20 +52,20 @@ impl LateLintPass<'_> for DefaultHashTypes {
}
/// Helper function for lints that check for expressions with calls and use typeck results to
-/// get the `DefId` and `SubstsRef` of the function.
+/// get the `DefId` and `GenericArgsRef` of the function.
fn typeck_results_of_method_fn<'tcx>(
cx: &LateContext<'tcx>,
expr: &Expr<'_>,
-) -> Option<(Span, DefId, ty::subst::SubstsRef<'tcx>)> {
+) -> Option<(Span, DefId, ty::GenericArgsRef<'tcx>)> {
match expr.kind {
ExprKind::MethodCall(segment, ..)
if let Some(def_id) = cx.typeck_results().type_dependent_def_id(expr.hir_id) =>
{
- Some((segment.ident.span, def_id, cx.typeck_results().node_substs(expr.hir_id)))
+ Some((segment.ident.span, def_id, cx.typeck_results().node_args(expr.hir_id)))
},
_ => {
match cx.typeck_results().node_type(expr.hir_id).kind() {
- &ty::FnDef(def_id, substs) => Some((expr.span, def_id, substs)),
+ &ty::FnDef(def_id, args) => Some((expr.span, def_id, args)),
_ => None,
}
}
@@ -89,8 +89,8 @@ declare_lint_pass!(QueryStability => [POTENTIAL_QUERY_INSTABILITY]);
impl LateLintPass<'_> for QueryStability {
fn check_expr(&mut self, cx: &LateContext<'_>, expr: &Expr<'_>) {
- let Some((span, def_id, substs)) = typeck_results_of_method_fn(cx, expr) else { return };
- if let Ok(Some(instance)) = ty::Instance::resolve(cx.tcx, cx.param_env, def_id, substs) {
+ let Some((span, def_id, args)) = typeck_results_of_method_fn(cx, expr) else { return };
+ if let Ok(Some(instance)) = ty::Instance::resolve(cx.tcx, cx.param_env, def_id, args) {
let def_id = instance.def_id();
if cx.tcx.has_attr(def_id, sym::rustc_lint_query_instability) {
cx.emit_spanned_lint(
@@ -232,7 +232,7 @@ fn is_ty_or_ty_ctxt(cx: &LateContext<'_>, path: &Path<'_>) -> Option<String> {
}
// Only lint on `&Ty` and `&TyCtxt` if it is used outside of a trait.
Res::SelfTyAlias { alias_to: did, is_trait_impl: false, .. } => {
- if let ty::Adt(adt, substs) = cx.tcx.type_of(did).subst_identity().kind() {
+ if let ty::Adt(adt, args) = cx.tcx.type_of(did).instantiate_identity().kind() {
if let Some(name @ (sym::Ty | sym::TyCtxt)) = cx.tcx.get_diagnostic_name(adt.did())
{
// NOTE: This path is currently unreachable as `Ty<'tcx>` is
@@ -241,7 +241,7 @@ fn is_ty_or_ty_ctxt(cx: &LateContext<'_>, path: &Path<'_>) -> Option<String> {
//
// I(@lcnr) still kept this branch in so we don't miss this
// if we ever change it in the future.
- return Some(format!("{}<{}>", name, substs[0]));
+ return Some(format!("{}<{}>", name, args[0]));
}
}
}
@@ -379,9 +379,9 @@ declare_lint_pass!(Diagnostics => [ UNTRANSLATABLE_DIAGNOSTIC, DIAGNOSTIC_OUTSID
impl LateLintPass<'_> for Diagnostics {
fn check_expr(&mut self, cx: &LateContext<'_>, expr: &Expr<'_>) {
- let Some((span, def_id, substs)) = typeck_results_of_method_fn(cx, expr) else { return };
- debug!(?span, ?def_id, ?substs);
- let has_attr = ty::Instance::resolve(cx.tcx, cx.param_env, def_id, substs)
+ let Some((span, def_id, args)) = typeck_results_of_method_fn(cx, expr) else { return };
+ debug!(?span, ?def_id, ?args);
+ let has_attr = ty::Instance::resolve(cx.tcx, cx.param_env, def_id, args)
.ok()
.flatten()
.is_some_and(|inst| cx.tcx.has_attr(inst.def_id(), sym::rustc_lint_diagnostics));
@@ -414,7 +414,7 @@ impl LateLintPass<'_> for Diagnostics {
}
let mut found_diagnostic_message = false;
- for ty in substs.types() {
+ for ty in args.types() {
debug!(?ty);
if let Some(adt_def) = ty.ty_adt_def() &&
let Some(name) = cx.tcx.get_diagnostic_name(adt_def.did()) &&
diff --git a/compiler/rustc_lint/src/late.rs b/compiler/rustc_lint/src/late.rs
index fb12ded71..73af51d9e 100644
--- a/compiler/rustc_lint/src/late.rs
+++ b/compiler/rustc_lint/src/late.rs
@@ -17,9 +17,9 @@
use crate::{passes::LateLintPassObject, LateContext, LateLintPass, LintStore};
use rustc_ast as ast;
use rustc_data_structures::stack::ensure_sufficient_stack;
-use rustc_data_structures::sync::{join, DynSend};
+use rustc_data_structures::sync::join;
use rustc_hir as hir;
-use rustc_hir::def_id::LocalDefId;
+use rustc_hir::def_id::{LocalDefId, LocalModDefId};
use rustc_hir::intravisit as hir_visit;
use rustc_hir::intravisit::Visitor;
use rustc_middle::hir::nested_filter;
@@ -336,9 +336,9 @@ macro_rules! impl_late_lint_pass {
crate::late_lint_methods!(impl_late_lint_pass, []);
-pub(super) fn late_lint_mod<'tcx, T: LateLintPass<'tcx> + 'tcx>(
+pub fn late_lint_mod<'tcx, T: LateLintPass<'tcx> + 'tcx>(
tcx: TyCtxt<'tcx>,
- module_def_id: LocalDefId,
+ module_def_id: LocalModDefId,
builtin_lints: T,
) {
let context = LateContext {
@@ -369,13 +369,19 @@ pub(super) fn late_lint_mod<'tcx, T: LateLintPass<'tcx> + 'tcx>(
fn late_lint_mod_inner<'tcx, T: LateLintPass<'tcx>>(
tcx: TyCtxt<'tcx>,
- module_def_id: LocalDefId,
+ module_def_id: LocalModDefId,
context: LateContext<'tcx>,
pass: T,
) {
let mut cx = LateContextAndPass { context, pass };
let (module, _span, hir_id) = tcx.hir().get_module(module_def_id);
+
+ // There is no module lint that will have the crate itself as an item, so check it here.
+ if hir_id == hir::CRATE_HIR_ID {
+ lint_callback!(cx, check_crate,);
+ }
+
cx.process_mod(module, hir_id);
// Visit the crate attributes
@@ -383,10 +389,19 @@ fn late_lint_mod_inner<'tcx, T: LateLintPass<'tcx>>(
for attr in tcx.hir().attrs(hir::CRATE_HIR_ID).iter() {
cx.visit_attribute(attr)
}
+ lint_callback!(cx, check_crate_post,);
}
}
-fn late_lint_crate<'tcx, T: LateLintPass<'tcx> + 'tcx>(tcx: TyCtxt<'tcx>, builtin_lints: T) {
+fn late_lint_crate<'tcx>(tcx: TyCtxt<'tcx>) {
+ // Note: `passes` is often empty.
+ let mut passes: Vec<_> =
+ unerased_lint_store(tcx).late_passes.iter().map(|mk_pass| (mk_pass)(tcx)).collect();
+
+ if passes.is_empty() {
+ return;
+ }
+
let context = LateContext {
tcx,
enclosing_body: None,
@@ -399,18 +414,8 @@ fn late_lint_crate<'tcx, T: LateLintPass<'tcx> + 'tcx>(tcx: TyCtxt<'tcx>, builti
only_module: false,
};
- // Note: `passes` is often empty. In that case, it's faster to run
- // `builtin_lints` directly rather than bundling it up into the
- // `RuntimeCombinedLateLintPass`.
- let mut passes: Vec<_> =
- unerased_lint_store(tcx).late_passes.iter().map(|mk_pass| (mk_pass)(tcx)).collect();
- if passes.is_empty() {
- late_lint_crate_inner(tcx, context, builtin_lints);
- } else {
- passes.push(Box::new(builtin_lints));
- let pass = RuntimeCombinedLateLintPass { passes: &mut passes[..] };
- late_lint_crate_inner(tcx, context, pass);
- }
+ let pass = RuntimeCombinedLateLintPass { passes: &mut passes[..] };
+ late_lint_crate_inner(tcx, context, pass);
}
fn late_lint_crate_inner<'tcx, T: LateLintPass<'tcx>>(
@@ -432,15 +437,12 @@ fn late_lint_crate_inner<'tcx, T: LateLintPass<'tcx>>(
}
/// Performs lint checking on a crate.
-pub fn check_crate<'tcx, T: LateLintPass<'tcx> + 'tcx>(
- tcx: TyCtxt<'tcx>,
- builtin_lints: impl FnOnce() -> T + Send + DynSend,
-) {
+pub fn check_crate<'tcx>(tcx: TyCtxt<'tcx>) {
join(
|| {
tcx.sess.time("crate_lints", || {
// Run whole crate non-incremental lints
- late_lint_crate(tcx, builtin_lints());
+ late_lint_crate(tcx);
});
},
|| {
diff --git a/compiler/rustc_lint/src/levels.rs b/compiler/rustc_lint/src/levels.rs
index 8376835f5..1f4e5fa4d 100644
--- a/compiler/rustc_lint/src/levels.rs
+++ b/compiler/rustc_lint/src/levels.rs
@@ -1,4 +1,5 @@
use crate::{
+ builtin::MISSING_DOCS,
context::{CheckLintNameResult, LintStore},
fluent_generated as fluent,
late::unerased_lint_store,
@@ -11,6 +12,7 @@ use rustc_ast as ast;
use rustc_ast_pretty::pprust;
use rustc_data_structures::fx::FxHashMap;
use rustc_errors::{DecorateLint, DiagnosticBuilder, DiagnosticMessage, MultiSpan};
+use rustc_feature::Features;
use rustc_hir as hir;
use rustc_hir::intravisit::{self, Visitor};
use rustc_hir::HirId;
@@ -118,6 +120,7 @@ fn lint_expectations(tcx: TyCtxt<'_>, (): ()) -> Vec<(LintExpectationId, LintExp
let mut builder = LintLevelsBuilder {
sess: tcx.sess,
+ features: tcx.features(),
provider: QueryMapExpectationsWrapper {
tcx,
cur: hir::CRATE_HIR_ID,
@@ -147,6 +150,7 @@ fn shallow_lint_levels_on(tcx: TyCtxt<'_>, owner: hir::OwnerId) -> ShallowLintLe
let mut levels = LintLevelsBuilder {
sess: tcx.sess,
+ features: tcx.features(),
provider: LintLevelQueryMap {
tcx,
cur: owner.into(),
@@ -265,7 +269,10 @@ impl LintLevelsProvider for QueryMapExpectationsWrapper<'_> {
self.specs.lint_level_id_at_node(self.tcx, LintId::of(lint), self.cur)
}
fn push_expectation(&mut self, id: LintExpectationId, expectation: LintExpectation) {
- let LintExpectationId::Stable { attr_id: Some(attr_id), hir_id, attr_index, .. } = id else { bug!("unstable expectation id should already be mapped") };
+ let LintExpectationId::Stable { attr_id: Some(attr_id), hir_id, attr_index, .. } = id
+ else {
+ bug!("unstable expectation id should already be mapped")
+ };
let key = LintExpectationId::Unstable { attr_id, lint_index: None };
self.unstable_to_stable_ids.entry(key).or_insert(LintExpectationId::Stable {
@@ -431,6 +438,7 @@ impl<'tcx> Visitor<'tcx> for LintLevelsBuilder<'_, QueryMapExpectationsWrapper<'
pub struct LintLevelsBuilder<'s, P> {
sess: &'s Session,
+ features: &'s Features,
provider: P,
warn_about_weird_lints: bool,
store: &'s LintStore,
@@ -444,12 +452,14 @@ pub(crate) struct BuilderPush {
impl<'s> LintLevelsBuilder<'s, TopDown> {
pub(crate) fn new(
sess: &'s Session,
+ features: &'s Features,
warn_about_weird_lints: bool,
store: &'s LintStore,
registered_tools: &'s RegisteredTools,
) -> Self {
let mut builder = LintLevelsBuilder {
sess,
+ features,
provider: TopDown { sets: LintLevelSets::new(), cur: COMMAND_LINE },
warn_about_weird_lints,
store,
@@ -522,6 +532,10 @@ impl<'s, P: LintLevelsProvider> LintLevelsBuilder<'s, P> {
self.sess
}
+ pub(crate) fn features(&self) -> &Features {
+ self.features
+ }
+
pub(crate) fn lint_store(&self) -> &LintStore {
self.store
}
@@ -542,7 +556,7 @@ impl<'s, P: LintLevelsProvider> LintLevelsBuilder<'s, P> {
let Ok(ids) = self.store.find_lints(&lint_name) else {
// errors handled in check_lint_name_cmdline above
- continue
+ continue;
};
for id in ids {
// ForceWarn and Forbid cannot be overridden
@@ -664,6 +678,16 @@ impl<'s, P: LintLevelsProvider> LintLevelsBuilder<'s, P> {
continue;
}
+ // `#[doc(hidden)]` disables missing_docs check.
+ if attr.has_name(sym::doc)
+ && attr
+ .meta_item_list()
+ .map_or(false, |l| ast::attr::list_contains_name(&l, sym::hidden))
+ {
+ self.insert(LintId::of(MISSING_DOCS), (Level::Allow, LintLevelSource::Default));
+ continue;
+ }
+
let level = match Level::from_attr(attr) {
None => continue,
// This is the only lint level with a `LintExpectationId` that can be created from an attribute
@@ -685,9 +709,7 @@ impl<'s, P: LintLevelsProvider> LintLevelsBuilder<'s, P> {
Some(lvl) => lvl,
};
- let Some(mut metas) = attr.meta_item_list() else {
- continue
- };
+ let Some(mut metas) = attr.meta_item_list() else { continue };
if metas.is_empty() {
// This emits the unused_attributes lint for `#[level()]`
@@ -704,7 +726,7 @@ impl<'s, P: LintLevelsProvider> LintLevelsBuilder<'s, P> {
ast::MetaItemKind::NameValue(ref name_value) => {
if item.path == sym::reason {
if let ast::LitKind::Str(rationale, _) = name_value.kind {
- if !self.sess.features_untracked().lint_reasons {
+ if !self.features.lint_reasons {
feature_err(
&self.sess.parse_sess,
sym::lint_reasons,
@@ -944,7 +966,7 @@ impl<'s, P: LintLevelsProvider> LintLevelsBuilder<'s, P> {
);
}
} else {
- panic!("renamed lint does not exist: {}", new_name);
+ panic!("renamed lint does not exist: {new_name}");
}
}
}
@@ -956,8 +978,9 @@ impl<'s, P: LintLevelsProvider> LintLevelsBuilder<'s, P> {
continue;
}
- let LintLevelSource::Node { name: lint_attr_name, span: lint_attr_span, .. } = *src else {
- continue
+ let LintLevelSource::Node { name: lint_attr_name, span: lint_attr_span, .. } = *src
+ else {
+ continue;
};
self.emit_spanned_lint(
@@ -976,9 +999,10 @@ impl<'s, P: LintLevelsProvider> LintLevelsBuilder<'s, P> {
/// Returns `true` if the lint's feature is enabled.
// FIXME only emit this once for each attribute, instead of repeating it 4 times for
// pre-expansion lints, post-expansion lints, `shallow_lint_levels_on` and `lint_expectations`.
+ #[track_caller]
fn check_gated_lint(&self, lint_id: LintId, span: Span) -> bool {
if let Some(feature) = lint_id.lint.feature_gate {
- if !self.sess.features_untracked().enabled(feature) {
+ if !self.features.enabled(feature) {
let lint = builtin::UNKNOWN_LINTS;
let (level, src) = self.lint_level(builtin::UNKNOWN_LINTS);
struct_lint_level(
@@ -1013,6 +1037,7 @@ impl<'s, P: LintLevelsProvider> LintLevelsBuilder<'s, P> {
///
/// [`struct_lint_level`]: rustc_middle::lint::struct_lint_level#decorate-signature
#[rustc_lint_diagnostics]
+ #[track_caller]
pub(crate) fn struct_lint(
&self,
lint: &'static Lint,
@@ -1026,6 +1051,7 @@ impl<'s, P: LintLevelsProvider> LintLevelsBuilder<'s, P> {
struct_lint_level(self.sess, lint, level, src, span, msg, decorate)
}
+ #[track_caller]
pub fn emit_spanned_lint(
&self,
lint: &'static Lint,
@@ -1038,6 +1064,7 @@ impl<'s, P: LintLevelsProvider> LintLevelsBuilder<'s, P> {
});
}
+ #[track_caller]
pub fn emit_lint(&self, lint: &'static Lint, decorate: impl for<'a> DecorateLint<'a, ()>) {
let (level, src) = self.lint_level(lint);
struct_lint_level(self.sess, lint, level, src, None, decorate.msg(), |lint| {
diff --git a/compiler/rustc_lint/src/lib.rs b/compiler/rustc_lint/src/lib.rs
index 602071a55..585b10e79 100644
--- a/compiler/rustc_lint/src/lib.rs
+++ b/compiler/rustc_lint/src/lib.rs
@@ -40,6 +40,7 @@
#![recursion_limit = "256"]
#![deny(rustc::untranslatable_diagnostic)]
#![deny(rustc::diagnostic_outside_of_impl)]
+#![cfg_attr(not(bootstrap), allow(internal_features))]
#[macro_use]
extern crate rustc_middle;
@@ -58,6 +59,7 @@ mod enum_intrinsics_non_enums;
mod errors;
mod expect;
mod for_loops_over_fallibles;
+mod foreign_modules;
pub mod hidden_unicode_codepoints;
mod internal;
mod invalid_from_utf8;
@@ -75,6 +77,7 @@ mod noop_method_call;
mod opaque_hidden_inferred_bound;
mod pass_by_value;
mod passes;
+mod ptr_nulls;
mod redundant_semicolon;
mod reference_casting;
mod traits;
@@ -87,7 +90,7 @@ use rustc_ast as ast;
use rustc_errors::{DiagnosticMessage, SubdiagnosticMessage};
use rustc_fluent_macro::fluent_messages;
use rustc_hir as hir;
-use rustc_hir::def_id::LocalDefId;
+use rustc_hir::def_id::{LocalDefId, LocalModDefId};
use rustc_middle::query::Providers;
use rustc_middle::ty::TyCtxt;
use rustc_session::lint::builtin::{
@@ -115,6 +118,7 @@ use nonstandard_style::*;
use noop_method_call::*;
use opaque_hidden_inferred_bound::*;
use pass_by_value::*;
+use ptr_nulls::*;
use redundant_semicolon::*;
use reference_casting::*;
use traits::*;
@@ -122,11 +126,11 @@ use types::*;
use unused::*;
/// Useful for other parts of the compiler / Clippy.
-pub use builtin::SoftLints;
+pub use builtin::{MissingDoc, SoftLints};
pub use context::{CheckLintNameResult, FindLintError, LintStore};
pub use context::{EarlyContext, LateContext, LintContext};
pub use early::{check_ast_node, EarlyCheckNode};
-pub use late::{check_crate, unerased_lint_store};
+pub use late::{check_crate, late_lint_mod, unerased_lint_store};
pub use passes::{EarlyLintPass, LateLintPass};
pub use rustc_session::lint::Level::{self, *};
pub use rustc_session::lint::{BufferedEarlyLint, FutureIncompatibleInfo, Lint, LintId};
@@ -137,11 +141,12 @@ fluent_messages! { "../messages.ftl" }
pub fn provide(providers: &mut Providers) {
levels::provide(providers);
expect::provide(providers);
+ foreign_modules::provide(providers);
*providers = Providers { lint_mod, ..*providers };
}
-fn lint_mod(tcx: TyCtxt<'_>, module_def_id: LocalDefId) {
- late::late_lint_mod(tcx, module_def_id, BuiltinCombinedModuleLateLintPass::new());
+fn lint_mod(tcx: TyCtxt<'_>, module_def_id: LocalModDefId) {
+ late_lint_mod(tcx, module_def_id, BuiltinCombinedModuleLateLintPass::new());
}
early_lint_methods!(
@@ -171,7 +176,7 @@ early_lint_methods!(
WhileTrue: WhileTrue,
NonAsciiIdents: NonAsciiIdents,
HiddenUnicodeCodepoints: HiddenUnicodeCodepoints,
- IncompleteFeatures: IncompleteFeatures,
+ IncompleteInternalFeatures: IncompleteInternalFeatures,
RedundantSemicolons: RedundantSemicolons,
UnusedDocComment: UnusedDocComment,
UnexpectedCfgs: UnexpectedCfgs,
@@ -179,27 +184,6 @@ early_lint_methods!(
]
);
-// FIXME: Make a separate lint type which does not require typeck tables.
-
-late_lint_methods!(
- declare_combined_late_lint_pass,
- [
- pub BuiltinCombinedLateLintPass,
- [
- // Tracks state across modules
- UnnameableTestItems: UnnameableTestItems::new(),
- // Tracks attributes of parents
- MissingDoc: MissingDoc::new(),
- // Builds a global list of all impls of `Debug`.
- // FIXME: Turn the computation of types which implement Debug into a query
- // and change this to a module lint pass
- MissingDebugImplementations: MissingDebugImplementations::default(),
- // Keeps a global list of foreign declarations.
- ClashingExternDeclarations: ClashingExternDeclarations::new(),
- ]
- ]
-);
-
late_lint_methods!(
declare_combined_late_lint_pass,
[
@@ -216,7 +200,7 @@ late_lint_methods!(
BoxPointers: BoxPointers,
PathStatements: PathStatements,
LetUnderscore: LetUnderscore,
- InvalidReferenceCasting: InvalidReferenceCasting,
+ InvalidReferenceCasting: InvalidReferenceCasting::default(),
// Depends on referenced function signatures in expressions
UnusedResults: UnusedResults,
NonUpperCaseGlobals: NonUpperCaseGlobals,
@@ -225,6 +209,7 @@ late_lint_methods!(
// Depends on types used in type definitions
MissingCopyImplementations: MissingCopyImplementations,
// Depends on referenced function signatures in expressions
+ PtrNullChecks: PtrNullChecks,
MutableTransmutes: MutableTransmutes,
TypeAliasBounds: TypeAliasBounds,
TrivialConstraints: TrivialConstraints,
@@ -251,6 +236,8 @@ late_lint_methods!(
OpaqueHiddenInferredBound: OpaqueHiddenInferredBound,
MultipleSupertraitUpcastable: MultipleSupertraitUpcastable,
MapUnitFn: MapUnitFn,
+ MissingDebugImplementations: MissingDebugImplementations,
+ MissingDoc: MissingDoc,
]
]
);
@@ -279,7 +266,7 @@ fn register_builtins(store: &mut LintStore) {
store.register_lints(&BuiltinCombinedPreExpansionLintPass::get_lints());
store.register_lints(&BuiltinCombinedEarlyLintPass::get_lints());
store.register_lints(&BuiltinCombinedModuleLateLintPass::get_lints());
- store.register_lints(&BuiltinCombinedLateLintPass::get_lints());
+ store.register_lints(&foreign_modules::get_lints());
add_lint_group!(
"nonstandard_style",
@@ -519,20 +506,20 @@ fn register_internals(store: &mut LintStore) {
store.register_lints(&LintPassImpl::get_lints());
store.register_early_pass(|| Box::new(LintPassImpl));
store.register_lints(&DefaultHashTypes::get_lints());
- store.register_late_pass(|_| Box::new(DefaultHashTypes));
+ store.register_late_mod_pass(|_| Box::new(DefaultHashTypes));
store.register_lints(&QueryStability::get_lints());
- store.register_late_pass(|_| Box::new(QueryStability));
+ store.register_late_mod_pass(|_| Box::new(QueryStability));
store.register_lints(&ExistingDocKeyword::get_lints());
- store.register_late_pass(|_| Box::new(ExistingDocKeyword));
+ store.register_late_mod_pass(|_| Box::new(ExistingDocKeyword));
store.register_lints(&TyTyKind::get_lints());
- store.register_late_pass(|_| Box::new(TyTyKind));
+ store.register_late_mod_pass(|_| Box::new(TyTyKind));
store.register_lints(&Diagnostics::get_lints());
store.register_early_pass(|| Box::new(Diagnostics));
- store.register_late_pass(|_| Box::new(Diagnostics));
+ store.register_late_mod_pass(|_| Box::new(Diagnostics));
store.register_lints(&BadOptAccess::get_lints());
- store.register_late_pass(|_| Box::new(BadOptAccess));
+ store.register_late_mod_pass(|_| Box::new(BadOptAccess));
store.register_lints(&PassByValue::get_lints());
- store.register_late_pass(|_| Box::new(PassByValue));
+ store.register_late_mod_pass(|_| Box::new(PassByValue));
// FIXME(davidtwco): deliberately do not include `UNTRANSLATABLE_DIAGNOSTIC` and
// `DIAGNOSTIC_OUTSIDE_OF_IMPL` here because `-Wrustc::internal` is provided to every crate and
// these lints will trigger all of the time - change this once migration to diagnostic structs
diff --git a/compiler/rustc_lint/src/lints.rs b/compiler/rustc_lint/src/lints.rs
index 9260237fb..25982a458 100644
--- a/compiler/rustc_lint/src/lints.rs
+++ b/compiler/rustc_lint/src/lints.rs
@@ -250,7 +250,7 @@ impl<'a> DecorateLint<'a, ()> for BuiltinUngatedAsyncFnTrackCaller<'_> {
rustc_session::parse::add_feature_diagnostics(
diag,
&self.parse_sess,
- sym::closure_track_caller,
+ sym::async_fn_track_caller,
);
diag
}
@@ -371,10 +371,6 @@ pub enum BuiltinEllipsisInclusiveRangePatternsLint {
}
#[derive(LintDiagnostic)]
-#[diag(lint_builtin_unnameable_test_items)]
-pub struct BuiltinUnnameableTestItems;
-
-#[derive(LintDiagnostic)]
#[diag(lint_builtin_keyword_idents)]
pub struct BuiltinKeywordIdents {
pub kw: Ident,
@@ -405,18 +401,27 @@ pub struct BuiltinExplicitOutlivesSuggestion {
pub struct BuiltinIncompleteFeatures {
pub name: Symbol,
#[subdiagnostic]
- pub note: Option<BuiltinIncompleteFeaturesNote>,
+ pub note: Option<BuiltinFeatureIssueNote>,
#[subdiagnostic]
pub help: Option<BuiltinIncompleteFeaturesHelp>,
}
+#[derive(LintDiagnostic)]
+#[diag(lint_builtin_internal_features)]
+#[note]
+pub struct BuiltinInternalFeatures {
+ pub name: Symbol,
+ #[subdiagnostic]
+ pub note: Option<BuiltinFeatureIssueNote>,
+}
+
#[derive(Subdiagnostic)]
#[help(lint_help)]
pub struct BuiltinIncompleteFeaturesHelp;
#[derive(Subdiagnostic)]
#[note(lint_note)]
-pub struct BuiltinIncompleteFeaturesNote {
+pub struct BuiltinFeatureIssueNote {
pub n: NonZeroU32,
}
@@ -613,6 +618,24 @@ pub struct ExpectationNote {
pub rationale: Symbol,
}
+// ptr_nulls.rs
+#[derive(LintDiagnostic)]
+pub enum PtrNullChecksDiag<'a> {
+ #[diag(lint_ptr_null_checks_fn_ptr)]
+ #[help(lint_help)]
+ FnPtr {
+ orig_ty: Ty<'a>,
+ #[label]
+ label: Span,
+ },
+ #[diag(lint_ptr_null_checks_ref)]
+ Ref {
+ orig_ty: Ty<'a>,
+ #[label]
+ label: Span,
+ },
+}
+
// for_loops_over_fallibles.rs
#[derive(LintDiagnostic)]
#[diag(lint_for_loops_over_fallibles)]
@@ -739,8 +762,18 @@ pub enum InvalidFromUtf8Diag {
// reference_casting.rs
#[derive(LintDiagnostic)]
-#[diag(lint_invalid_reference_casting)]
-pub struct InvalidReferenceCastingDiag;
+pub enum InvalidReferenceCastingDiag {
+ #[diag(lint_invalid_reference_casting_borrow_as_mut)]
+ BorrowAsMut {
+ #[label]
+ orig_cast: Option<Span>,
+ },
+ #[diag(lint_invalid_reference_casting_assign_to_ref)]
+ AssignToRef {
+ #[label]
+ orig_cast: Option<Span>,
+ },
+}
// hidden_unicode_codepoints.rs
#[derive(LintDiagnostic)]
@@ -770,7 +803,7 @@ impl AddToDiagnostic for HiddenUnicodeCodepointsDiagLabels {
) -> rustc_errors::SubdiagnosticMessage,
{
for (c, span) in self.spans {
- diag.span_label(span, format!("{:?}", c));
+ diag.span_label(span, format!("{c:?}"));
}
}
}
@@ -802,7 +835,7 @@ impl AddToDiagnostic for HiddenUnicodeCodepointsDiagSub {
spans
.into_iter()
.map(|(c, span)| {
- let c = format!("{:?}", c);
+ let c = format!("{c:?}");
(span, c[1..c.len() - 1].to_string())
})
.collect(),
@@ -817,7 +850,7 @@ impl AddToDiagnostic for HiddenUnicodeCodepointsDiagSub {
"escaped",
spans
.into_iter()
- .map(|(c, _)| format!("{:?}", c))
+ .map(|(c, _)| format!("{c:?}"))
.collect::<Vec<String>>()
.join(", "),
);
@@ -1050,8 +1083,10 @@ pub struct IdentifierUncommonCodepoints;
pub struct ConfusableIdentifierPair {
pub existing_sym: Symbol,
pub sym: Symbol,
- #[label]
+ #[label(lint_other_use)]
pub label: Span,
+ #[label(lint_current_use)]
+ pub main_label: Span,
}
#[derive(LintDiagnostic)]
@@ -1225,8 +1260,9 @@ pub enum NonUpperCaseGlobalSub {
#[note]
pub struct NoopMethodCallDiag<'a> {
pub method: Symbol,
- pub receiver_ty: Ty<'a>,
- #[label]
+ pub orig_ty: Ty<'a>,
+ pub trait_: Symbol,
+ #[suggestion(code = "", applicability = "machine-applicable")]
pub label: Span,
}
@@ -1460,7 +1496,7 @@ pub enum InvalidNanComparisons {
#[diag(lint_invalid_nan_comparisons_eq_ne)]
EqNe {
#[subdiagnostic]
- suggestion: InvalidNanComparisonsSuggestion,
+ suggestion: Option<InvalidNanComparisonsSuggestion>,
},
#[diag(lint_invalid_nan_comparisons_lt_le_gt_ge)]
LtLeGtGe,
diff --git a/compiler/rustc_lint/src/methods.rs b/compiler/rustc_lint/src/methods.rs
index 4c25d94a1..5b63b19c5 100644
--- a/compiler/rustc_lint/src/methods.rs
+++ b/compiler/rustc_lint/src/methods.rs
@@ -53,9 +53,9 @@ fn lint_cstring_as_ptr(
unwrap: &rustc_hir::Expr<'_>,
) {
let source_type = cx.typeck_results().expr_ty(source);
- if let ty::Adt(def, substs) = source_type.kind() {
+ if let ty::Adt(def, args) = source_type.kind() {
if cx.tcx.is_diagnostic_item(sym::Result, def.did()) {
- if let ty::Adt(adt, _) = substs.type_at(0).kind() {
+ if let ty::Adt(adt, _) = args.type_at(0).kind() {
if cx.tcx.is_diagnostic_item(sym::cstring_type, adt.did()) {
cx.emit_spanned_lint(
TEMPORARY_CSTRING_AS_PTR,
diff --git a/compiler/rustc_lint/src/multiple_supertrait_upcastable.rs b/compiler/rustc_lint/src/multiple_supertrait_upcastable.rs
index 53fe0ceb2..84558ee1f 100644
--- a/compiler/rustc_lint/src/multiple_supertrait_upcastable.rs
+++ b/compiler/rustc_lint/src/multiple_supertrait_upcastable.rs
@@ -10,6 +10,7 @@ declare_lint! {
/// ### Example
///
/// ```rust
+ /// #![feature(multiple_supertrait_upcastable)]
/// trait A {}
/// trait B {}
///
diff --git a/compiler/rustc_lint/src/non_ascii_idents.rs b/compiler/rustc_lint/src/non_ascii_idents.rs
index 4af879b4e..62bb8c2c6 100644
--- a/compiler/rustc_lint/src/non_ascii_idents.rs
+++ b/compiler/rustc_lint/src/non_ascii_idents.rs
@@ -222,6 +222,7 @@ impl EarlyLintPass for NonAsciiIdents {
existing_sym: *existing_symbol,
sym: symbol,
label: *existing_span,
+ main_label: sp,
},
);
}
diff --git a/compiler/rustc_lint/src/noop_method_call.rs b/compiler/rustc_lint/src/noop_method_call.rs
index d56c35bb6..bc0b9d6d8 100644
--- a/compiler/rustc_lint/src/noop_method_call.rs
+++ b/compiler/rustc_lint/src/noop_method_call.rs
@@ -18,7 +18,6 @@ declare_lint! {
///
/// ```rust
/// # #![allow(unused)]
- /// #![warn(noop_method_call)]
/// struct Foo;
/// let foo = &Foo;
/// let clone: &Foo = foo.clone();
@@ -34,7 +33,7 @@ declare_lint! {
/// calling `clone` on a `&T` where `T` does not implement clone, actually doesn't do anything
/// as references are copy. This lint detects these calls and warns the user about them.
pub NOOP_METHOD_CALL,
- Allow,
+ Warn,
"detects the use of well-known noop methods"
}
@@ -79,28 +78,24 @@ impl<'tcx> LateLintPass<'tcx> for NoopMethodCall {
// We only care about method calls corresponding to the `Clone`, `Deref` and `Borrow`
// traits and ignore any other method call.
- let Some((DefKind::AssocFn, did)) =
- cx.typeck_results().type_dependent_def(expr.hir_id)
+ let Some((DefKind::AssocFn, did)) = cx.typeck_results().type_dependent_def(expr.hir_id)
else {
return;
};
let Some(trait_id) = cx.tcx.trait_of_item(did) else { return };
- if !matches!(
- cx.tcx.get_diagnostic_name(trait_id),
- Some(sym::Borrow | sym::Clone | sym::Deref)
- ) {
+ let Some(trait_) = cx.tcx.get_diagnostic_name(trait_id) else { return };
+
+ if !matches!(trait_, sym::Borrow | sym::Clone | sym::Deref) {
return;
};
- let substs = cx
+ let args = cx
.tcx
- .normalize_erasing_regions(cx.param_env, cx.typeck_results().node_substs(expr.hir_id));
+ .normalize_erasing_regions(cx.param_env, cx.typeck_results().node_args(expr.hir_id));
// Resolve the trait method instance.
- let Ok(Some(i)) = ty::Instance::resolve(cx.tcx, cx.param_env, did, substs) else {
- return
- };
+ let Ok(Some(i)) = ty::Instance::resolve(cx.tcx, cx.param_env, did, args) else { return };
// (Re)check that it implements the noop diagnostic.
let Some(name) = cx.tcx.get_diagnostic_name(i.def_id()) else { return };
@@ -117,11 +112,13 @@ impl<'tcx> LateLintPass<'tcx> for NoopMethodCall {
let expr_span = expr.span;
let span = expr_span.with_lo(receiver.span.hi());
+ let orig_ty = expr_ty.peel_refs();
+
if receiver_ty == expr_ty {
cx.emit_spanned_lint(
NOOP_METHOD_CALL,
span,
- NoopMethodCallDiag { method: call.ident.name, receiver_ty, label: span },
+ NoopMethodCallDiag { method: call.ident.name, orig_ty, trait_, label: span },
);
} else {
match name {
diff --git a/compiler/rustc_lint/src/opaque_hidden_inferred_bound.rs b/compiler/rustc_lint/src/opaque_hidden_inferred_bound.rs
index 09a1651c2..79b0b32be 100644
--- a/compiler/rustc_lint/src/opaque_hidden_inferred_bound.rs
+++ b/compiler/rustc_lint/src/opaque_hidden_inferred_bound.rs
@@ -68,13 +68,17 @@ declare_lint_pass!(OpaqueHiddenInferredBound => [OPAQUE_HIDDEN_INFERRED_BOUND]);
impl<'tcx> LateLintPass<'tcx> for OpaqueHiddenInferredBound {
fn check_item(&mut self, cx: &LateContext<'tcx>, item: &'tcx hir::Item<'tcx>) {
- let hir::ItemKind::OpaqueTy(opaque) = &item.kind else { return; };
+ let hir::ItemKind::OpaqueTy(opaque) = &item.kind else {
+ return;
+ };
let def_id = item.owner_id.def_id.to_def_id();
let infcx = &cx.tcx.infer_ctxt().build();
// For every projection predicate in the opaque type's explicit bounds,
// check that the type that we're assigning actually satisfies the bounds
// of the associated type.
- for (pred, pred_span) in cx.tcx.explicit_item_bounds(def_id).subst_identity_iter_copied() {
+ for (pred, pred_span) in
+ cx.tcx.explicit_item_bounds(def_id).instantiate_identity_iter_copied()
+ {
// Liberate bound regions in the predicate since we
// don't actually care about lifetimes in this check.
let predicate = cx.tcx.liberate_late_bound_regions(def_id, pred.kind());
@@ -97,7 +101,7 @@ impl<'tcx> LateLintPass<'tcx> for OpaqueHiddenInferredBound {
}
let proj_ty =
- Ty::new_projection(cx.tcx, proj.projection_ty.def_id, proj.projection_ty.substs);
+ Ty::new_projection(cx.tcx, proj.projection_ty.def_id, proj.projection_ty.args);
// For every instance of the projection type in the bounds,
// replace them with the term we're assigning to the associated
// type in our opaque type.
@@ -113,10 +117,15 @@ impl<'tcx> LateLintPass<'tcx> for OpaqueHiddenInferredBound {
for (assoc_pred, assoc_pred_span) in cx
.tcx
.explicit_item_bounds(proj.projection_ty.def_id)
- .subst_iter_copied(cx.tcx, &proj.projection_ty.substs)
+ .iter_instantiated_copied(cx.tcx, &proj.projection_ty.args)
{
let assoc_pred = assoc_pred.fold_with(proj_replacer);
- let Ok(assoc_pred) = traits::fully_normalize(infcx, traits::ObligationCause::dummy(), cx.param_env, assoc_pred) else {
+ let Ok(assoc_pred) = traits::fully_normalize(
+ infcx,
+ traits::ObligationCause::dummy(),
+ cx.param_env,
+ assoc_pred,
+ ) else {
continue;
};
// If that predicate doesn't hold modulo regions (but passed during type-check),
@@ -147,7 +156,7 @@ impl<'tcx> LateLintPass<'tcx> for OpaqueHiddenInferredBound {
ty: Ty::new_opaque(
cx.tcx,
def_id,
- ty::InternalSubsts::identity_for_item(cx.tcx, def_id),
+ ty::GenericArgs::identity_for_item(cx.tcx, def_id),
),
proj_ty: proj_term,
assoc_pred_span,
diff --git a/compiler/rustc_lint/src/pass_by_value.rs b/compiler/rustc_lint/src/pass_by_value.rs
index 2bb2a3aab..cad2cd7fa 100644
--- a/compiler/rustc_lint/src/pass_by_value.rs
+++ b/compiler/rustc_lint/src/pass_by_value.rs
@@ -50,9 +50,9 @@ fn path_for_pass_by_value(cx: &LateContext<'_>, ty: &hir::Ty<'_>) -> Option<Stri
return Some(format!("{}{}", name, gen_args(cx, path_segment)));
}
Res::SelfTyAlias { alias_to: did, is_trait_impl: false, .. } => {
- if let ty::Adt(adt, substs) = cx.tcx.type_of(did).subst_identity().kind() {
+ if let ty::Adt(adt, args) = cx.tcx.type_of(did).instantiate_identity().kind() {
if cx.tcx.has_attr(adt.did(), sym::rustc_pass_by_value) {
- return Some(cx.tcx.def_path_str_with_substs(adt.did(), substs));
+ return Some(cx.tcx.def_path_str_with_args(adt.did(), args));
}
}
}
diff --git a/compiler/rustc_lint/src/ptr_nulls.rs b/compiler/rustc_lint/src/ptr_nulls.rs
new file mode 100644
index 000000000..02aff9103
--- /dev/null
+++ b/compiler/rustc_lint/src/ptr_nulls.rs
@@ -0,0 +1,146 @@
+use crate::{lints::PtrNullChecksDiag, LateContext, LateLintPass, LintContext};
+use rustc_ast::LitKind;
+use rustc_hir::{BinOpKind, Expr, ExprKind, TyKind};
+use rustc_session::{declare_lint, declare_lint_pass};
+use rustc_span::sym;
+
+declare_lint! {
+ /// The `useless_ptr_null_checks` lint checks for useless null checks against pointers
+ /// obtained from non-null types.
+ ///
+ /// ### Example
+ ///
+ /// ```rust
+ /// # fn test() {}
+ /// let fn_ptr: fn() = /* somehow obtained nullable function pointer */
+ /// # test;
+ ///
+ /// if (fn_ptr as *const ()).is_null() { /* ... */ }
+ /// ```
+ ///
+ /// {{produces}}
+ ///
+ /// ### Explanation
+ ///
+ /// Function pointers and references are assumed to be non-null, checking them for null
+ /// will always return false.
+ USELESS_PTR_NULL_CHECKS,
+ Warn,
+ "useless checking of non-null-typed pointer"
+}
+
+declare_lint_pass!(PtrNullChecks => [USELESS_PTR_NULL_CHECKS]);
+
+/// This function detects and returns the original expression from a series of consecutive casts,
+/// ie. `(my_fn as *const _ as *mut _).cast_mut()` would return the expression for `my_fn`.
+fn ptr_cast_chain<'a>(cx: &'a LateContext<'_>, mut e: &'a Expr<'a>) -> Option<&'a Expr<'a>> {
+ let mut had_at_least_one_cast = false;
+ loop {
+ e = e.peel_blocks();
+ e = if let ExprKind::Cast(expr, t) = e.kind
+ && let TyKind::Ptr(_) = t.kind {
+ had_at_least_one_cast = true;
+ expr
+ } else if let ExprKind::MethodCall(_, expr, [], _) = e.kind
+ && let Some(def_id) = cx.typeck_results().type_dependent_def_id(e.hir_id)
+ && matches!(cx.tcx.get_diagnostic_name(def_id), Some(sym::ptr_cast | sym::ptr_cast_mut)) {
+ had_at_least_one_cast = true;
+ expr
+ } else if let ExprKind::Call(path, [arg]) = e.kind
+ && let ExprKind::Path(ref qpath) = path.kind
+ && let Some(def_id) = cx.qpath_res(qpath, path.hir_id).opt_def_id()
+ && matches!(cx.tcx.get_diagnostic_name(def_id), Some(sym::ptr_from_ref | sym::ptr_from_mut)) {
+ had_at_least_one_cast = true;
+ arg
+ } else if had_at_least_one_cast {
+ return Some(e);
+ } else {
+ return None;
+ };
+ }
+}
+
+fn incorrect_check<'a>(cx: &LateContext<'a>, expr: &Expr<'_>) -> Option<PtrNullChecksDiag<'a>> {
+ let expr = ptr_cast_chain(cx, expr)?;
+
+ let orig_ty = cx.typeck_results().expr_ty(expr);
+ if orig_ty.is_fn() {
+ Some(PtrNullChecksDiag::FnPtr { orig_ty, label: expr.span })
+ } else if orig_ty.is_ref() {
+ Some(PtrNullChecksDiag::Ref { orig_ty, label: expr.span })
+ } else {
+ None
+ }
+}
+
+impl<'tcx> LateLintPass<'tcx> for PtrNullChecks {
+ fn check_expr(&mut self, cx: &LateContext<'tcx>, expr: &'tcx Expr<'_>) {
+ match expr.kind {
+ // Catching:
+ // <*<const/mut> <ty>>::is_null(fn_ptr as *<const/mut> <ty>)
+ ExprKind::Call(path, [arg])
+ if let ExprKind::Path(ref qpath) = path.kind
+ && let Some(def_id) = cx.qpath_res(qpath, path.hir_id).opt_def_id()
+ && matches!(
+ cx.tcx.get_diagnostic_name(def_id),
+ Some(sym::ptr_const_is_null | sym::ptr_is_null)
+ )
+ && let Some(diag) = incorrect_check(cx, arg) =>
+ {
+ cx.emit_spanned_lint(USELESS_PTR_NULL_CHECKS, expr.span, diag)
+ }
+
+ // Catching:
+ // (fn_ptr as *<const/mut> <ty>).is_null()
+ ExprKind::MethodCall(_, receiver, _, _)
+ if let Some(def_id) = cx.typeck_results().type_dependent_def_id(expr.hir_id)
+ && matches!(
+ cx.tcx.get_diagnostic_name(def_id),
+ Some(sym::ptr_const_is_null | sym::ptr_is_null)
+ )
+ && let Some(diag) = incorrect_check(cx, receiver) =>
+ {
+ cx.emit_spanned_lint(USELESS_PTR_NULL_CHECKS, expr.span, diag)
+ }
+
+ ExprKind::Binary(op, left, right) if matches!(op.node, BinOpKind::Eq) => {
+ let to_check: &Expr<'_>;
+ let diag: PtrNullChecksDiag<'_>;
+ if let Some(ddiag) = incorrect_check(cx, left) {
+ to_check = right;
+ diag = ddiag;
+ } else if let Some(ddiag) = incorrect_check(cx, right) {
+ to_check = left;
+ diag = ddiag;
+ } else {
+ return;
+ }
+
+ match to_check.kind {
+ // Catching:
+ // (fn_ptr as *<const/mut> <ty>) == (0 as <ty>)
+ ExprKind::Cast(cast_expr, _)
+ if let ExprKind::Lit(spanned) = cast_expr.kind
+ && let LitKind::Int(v, _) = spanned.node && v == 0 =>
+ {
+ cx.emit_spanned_lint(USELESS_PTR_NULL_CHECKS, expr.span, diag)
+ },
+
+ // Catching:
+ // (fn_ptr as *<const/mut> <ty>) == std::ptr::null()
+ ExprKind::Call(path, [])
+ if let ExprKind::Path(ref qpath) = path.kind
+ && let Some(def_id) = cx.qpath_res(qpath, path.hir_id).opt_def_id()
+ && let Some(diag_item) = cx.tcx.get_diagnostic_name(def_id)
+ && (diag_item == sym::ptr_null || diag_item == sym::ptr_null_mut) =>
+ {
+ cx.emit_spanned_lint(USELESS_PTR_NULL_CHECKS, expr.span, diag)
+ },
+
+ _ => {},
+ }
+ }
+ _ => {}
+ }
+ }
+}
diff --git a/compiler/rustc_lint/src/reference_casting.rs b/compiler/rustc_lint/src/reference_casting.rs
index db8d7302a..2577cabb3 100644
--- a/compiler/rustc_lint/src/reference_casting.rs
+++ b/compiler/rustc_lint/src/reference_casting.rs
@@ -1,7 +1,8 @@
use rustc_ast::Mutability;
-use rustc_hir::{Expr, ExprKind, MutTy, TyKind, UnOp};
-use rustc_middle::ty;
-use rustc_span::sym;
+use rustc_data_structures::fx::FxHashMap;
+use rustc_hir::{def::Res, Expr, ExprKind, HirId, Local, QPath, StmtKind, UnOp};
+use rustc_middle::ty::{self, TypeAndMut};
+use rustc_span::{sym, Span};
use crate::{lints::InvalidReferenceCastingDiag, LateContext, LateLintPass, LintContext};
@@ -12,7 +13,6 @@ declare_lint! {
/// ### Example
///
/// ```rust,compile_fail
- /// # #![deny(invalid_reference_casting)]
/// fn x(r: &i32) {
/// unsafe {
/// *(r as *const i32 as *mut i32) += 1;
@@ -30,44 +30,140 @@ declare_lint! {
/// `UnsafeCell` is the only way to obtain aliasable data that is considered
/// mutable.
INVALID_REFERENCE_CASTING,
- Allow,
+ Deny,
"casts of `&T` to `&mut T` without interior mutability"
}
-declare_lint_pass!(InvalidReferenceCasting => [INVALID_REFERENCE_CASTING]);
+#[derive(Default)]
+pub struct InvalidReferenceCasting {
+ casted: FxHashMap<HirId, Span>,
+}
+
+impl_lint_pass!(InvalidReferenceCasting => [INVALID_REFERENCE_CASTING]);
impl<'tcx> LateLintPass<'tcx> for InvalidReferenceCasting {
+ fn check_stmt(&mut self, cx: &LateContext<'tcx>, stmt: &'tcx rustc_hir::Stmt<'tcx>) {
+ let StmtKind::Local(local) = stmt.kind else {
+ return;
+ };
+ let Local { init: Some(init), els: None, .. } = local else {
+ return;
+ };
+
+ if is_cast_from_const_to_mut(cx, init) {
+ self.casted.insert(local.pat.hir_id, init.span);
+ }
+ }
+
fn check_expr(&mut self, cx: &LateContext<'tcx>, expr: &'tcx Expr<'tcx>) {
- let ExprKind::Unary(UnOp::Deref, e) = &expr.kind else { return; };
+ // &mut <expr>
+ let inner = if let ExprKind::AddrOf(_, Mutability::Mut, expr) = expr.kind {
+ expr
+ // <expr> = ...
+ } else if let ExprKind::Assign(expr, _, _) = expr.kind {
+ expr
+ // <expr> += ...
+ } else if let ExprKind::AssignOp(_, expr, _) = expr.kind {
+ expr
+ } else {
+ return;
+ };
+
+ let ExprKind::Unary(UnOp::Deref, e) = &inner.kind else {
+ return;
+ };
- let e = e.peel_blocks();
- let e = if let ExprKind::Cast(e, t) = e.kind
- && let TyKind::Ptr(MutTy { mutbl: Mutability::Mut, .. }) = t.kind {
+ let orig_cast = if is_cast_from_const_to_mut(cx, e) {
+ None
+ } else if let ExprKind::Path(QPath::Resolved(_, path)) = e.kind
+ && let Res::Local(hir_id) = &path.res
+ && let Some(orig_cast) = self.casted.get(hir_id) {
+ Some(*orig_cast)
+ } else {
+ return;
+ };
+
+ cx.emit_spanned_lint(
+ INVALID_REFERENCE_CASTING,
+ expr.span,
+ if matches!(expr.kind, ExprKind::AddrOf(..)) {
+ InvalidReferenceCastingDiag::BorrowAsMut { orig_cast }
+ } else {
+ InvalidReferenceCastingDiag::AssignToRef { orig_cast }
+ },
+ );
+ }
+}
+
+fn is_cast_from_const_to_mut<'tcx>(cx: &LateContext<'tcx>, e: &'tcx Expr<'tcx>) -> bool {
+ let e = e.peel_blocks();
+
+ fn from_casts<'tcx>(cx: &LateContext<'tcx>, e: &'tcx Expr<'tcx>) -> Option<&'tcx Expr<'tcx>> {
+ // <expr> as *mut ...
+ let mut e = if let ExprKind::Cast(e, t) = e.kind
+ && let ty::RawPtr(TypeAndMut { mutbl: Mutability::Mut, .. }) = cx.typeck_results().node_type(t.hir_id).kind() {
e
+ // <expr>.cast_mut()
} else if let ExprKind::MethodCall(_, expr, [], _) = e.kind
&& let Some(def_id) = cx.typeck_results().type_dependent_def_id(e.hir_id)
&& cx.tcx.is_diagnostic_item(sym::ptr_cast_mut, def_id) {
expr
} else {
- return;
+ return None;
};
- let e = e.peel_blocks();
- let e = if let ExprKind::Cast(e, t) = e.kind
- && let TyKind::Ptr(MutTy { mutbl: Mutability::Not, .. }) = t.kind {
- e
- } else if let ExprKind::Call(path, [arg]) = e.kind
+ let mut had_at_least_one_cast = false;
+ loop {
+ e = e.peel_blocks();
+ // <expr> as *mut/const ... or <expr> as <uint>
+ e = if let ExprKind::Cast(expr, t) = e.kind
+ && matches!(cx.typeck_results().node_type(t.hir_id).kind(), ty::RawPtr(_) | ty::Uint(_)) {
+ had_at_least_one_cast = true;
+ expr
+ // <expr>.cast(), <expr>.cast_mut() or <expr>.cast_const()
+ } else if let ExprKind::MethodCall(_, expr, [], _) = e.kind
+ && let Some(def_id) = cx.typeck_results().type_dependent_def_id(e.hir_id)
+ && matches!(
+ cx.tcx.get_diagnostic_name(def_id),
+ Some(sym::ptr_cast | sym::const_ptr_cast | sym::ptr_cast_mut | sym::ptr_cast_const)
+ )
+ {
+ had_at_least_one_cast = true;
+ expr
+ // ptr::from_ref(<expr>)
+ } else if let ExprKind::Call(path, [arg]) = e.kind
+ && let ExprKind::Path(ref qpath) = path.kind
+ && let Some(def_id) = cx.qpath_res(qpath, path.hir_id).opt_def_id()
+ && cx.tcx.is_diagnostic_item(sym::ptr_from_ref, def_id) {
+ return Some(arg);
+ } else if had_at_least_one_cast {
+ return Some(e);
+ } else {
+ return None;
+ };
+ }
+ }
+
+ fn from_transmute<'tcx>(
+ cx: &LateContext<'tcx>,
+ e: &'tcx Expr<'tcx>,
+ ) -> Option<&'tcx Expr<'tcx>> {
+ // mem::transmute::<_, *mut _>(<expr>)
+ if let ExprKind::Call(path, [arg]) = e.kind
&& let ExprKind::Path(ref qpath) = path.kind
&& let Some(def_id) = cx.qpath_res(qpath, path.hir_id).opt_def_id()
- && cx.tcx.is_diagnostic_item(sym::ptr_from_ref, def_id) {
- arg
+ && cx.tcx.is_diagnostic_item(sym::transmute, def_id)
+ && let ty::RawPtr(TypeAndMut { mutbl: Mutability::Mut, .. }) = cx.typeck_results().node_type(e.hir_id).kind() {
+ Some(arg)
} else {
- return;
- };
-
- let e = e.peel_blocks();
- if let ty::Ref(..) = cx.typeck_results().node_type(e.hir_id).kind() {
- cx.emit_spanned_lint(INVALID_REFERENCE_CASTING, expr.span, InvalidReferenceCastingDiag);
+ None
}
}
+
+ let Some(e) = from_casts(cx, e).or_else(|| from_transmute(cx, e)) else {
+ return false;
+ };
+
+ let e = e.peel_blocks();
+ matches!(cx.typeck_results().node_type(e.hir_id).kind(), ty::Ref(_, _, Mutability::Not))
}
diff --git a/compiler/rustc_lint/src/traits.rs b/compiler/rustc_lint/src/traits.rs
index de1120806..56508a2a6 100644
--- a/compiler/rustc_lint/src/traits.rs
+++ b/compiler/rustc_lint/src/traits.rs
@@ -92,7 +92,7 @@ impl<'tcx> LateLintPass<'tcx> for DropTraitConstraints {
let predicates = cx.tcx.explicit_predicates_of(item.owner_id);
for &(predicate, span) in predicates.predicates {
let ClauseKind::Trait(trait_predicate) = predicate.kind().skip_binder() else {
- continue
+ continue;
};
let def_id = trait_predicate.trait_ref.def_id;
if cx.tcx.lang_items().drop_trait() == Some(def_id) {
@@ -100,9 +100,7 @@ impl<'tcx> LateLintPass<'tcx> for DropTraitConstraints {
if trait_predicate.trait_ref.self_ty().is_impl_trait() {
continue;
}
- let Some(def_id) = cx.tcx.get_diagnostic_item(sym::needs_drop) else {
- return
- };
+ let Some(def_id) = cx.tcx.get_diagnostic_item(sym::needs_drop) else { return };
cx.emit_spanned_lint(
DROP_BOUNDS,
span,
@@ -113,15 +111,11 @@ impl<'tcx> LateLintPass<'tcx> for DropTraitConstraints {
}
fn check_ty(&mut self, cx: &LateContext<'_>, ty: &'tcx hir::Ty<'tcx>) {
- let hir::TyKind::TraitObject(bounds, _lifetime, _syntax) = &ty.kind else {
- return
- };
+ let hir::TyKind::TraitObject(bounds, _lifetime, _syntax) = &ty.kind else { return };
for bound in &bounds[..] {
let def_id = bound.trait_ref.trait_def_id();
if cx.tcx.lang_items().drop_trait() == def_id {
- let Some(def_id) = cx.tcx.get_diagnostic_item(sym::needs_drop) else {
- return
- };
+ let Some(def_id) = cx.tcx.get_diagnostic_item(sym::needs_drop) else { return };
cx.emit_spanned_lint(DYN_DROP, bound.span, DropGlue { tcx: cx.tcx, def_id });
}
}
diff --git a/compiler/rustc_lint/src/types.rs b/compiler/rustc_lint/src/types.rs
index ed4fb9860..1ba746edd 100644
--- a/compiler/rustc_lint/src/types.rs
+++ b/compiler/rustc_lint/src/types.rs
@@ -17,7 +17,7 @@ use rustc_errors::DiagnosticMessage;
use rustc_hir as hir;
use rustc_hir::{is_range_literal, Expr, ExprKind, Node};
use rustc_middle::ty::layout::{IntegerExt, LayoutOf, SizeSkeleton};
-use rustc_middle::ty::subst::SubstsRef;
+use rustc_middle::ty::GenericArgsRef;
use rustc_middle::ty::{
self, AdtKind, Ty, TyCtxt, TypeSuperVisitable, TypeVisitable, TypeVisitableExt,
};
@@ -560,7 +560,10 @@ fn lint_nan<'tcx>(
let expr = expr.peel_blocks().peel_borrows();
match expr.kind {
ExprKind::Path(qpath) => {
- let Some(def_id) = cx.typeck_results().qpath_res(&qpath, expr.hir_id).opt_def_id() else { return false; };
+ let Some(def_id) = cx.typeck_results().qpath_res(&qpath, expr.hir_id).opt_def_id()
+ else {
+ return false;
+ };
matches!(cx.tcx.get_diagnostic_name(def_id), Some(sym::f32_nan | sym::f64_nan))
}
@@ -569,32 +572,36 @@ fn lint_nan<'tcx>(
}
fn eq_ne(
+ cx: &LateContext<'_>,
e: &hir::Expr<'_>,
l: &hir::Expr<'_>,
r: &hir::Expr<'_>,
f: impl FnOnce(Span, Span) -> InvalidNanComparisonsSuggestion,
) -> InvalidNanComparisons {
- let suggestion =
+ // FIXME(#72505): This suggestion can be restored if `f{32,64}::is_nan` is made const.
+ let suggestion = (!cx.tcx.hir().is_inside_const_context(e.hir_id)).then(|| {
if let Some(l_span) = l.span.find_ancestor_inside(e.span) &&
- let Some(r_span) = r.span.find_ancestor_inside(e.span) {
+ let Some(r_span) = r.span.find_ancestor_inside(e.span)
+ {
f(l_span, r_span)
} else {
InvalidNanComparisonsSuggestion::Spanless
- };
+ }
+ });
InvalidNanComparisons::EqNe { suggestion }
}
let lint = match binop.node {
hir::BinOpKind::Eq | hir::BinOpKind::Ne if is_nan(cx, l) => {
- eq_ne(e, l, r, |l_span, r_span| InvalidNanComparisonsSuggestion::Spanful {
+ eq_ne(cx, e, l, r, |l_span, r_span| InvalidNanComparisonsSuggestion::Spanful {
nan_plus_binop: l_span.until(r_span),
float: r_span.shrink_to_hi(),
neg: (binop.node == hir::BinOpKind::Ne).then(|| r_span.shrink_to_lo()),
})
}
hir::BinOpKind::Eq | hir::BinOpKind::Ne if is_nan(cx, r) => {
- eq_ne(e, l, r, |l_span, r_span| InvalidNanComparisonsSuggestion::Spanful {
+ eq_ne(cx, e, l, r, |l_span, r_span| InvalidNanComparisonsSuggestion::Spanful {
nan_plus_binop: l_span.shrink_to_hi().to(r_span),
float: l_span.shrink_to_hi(),
neg: (binop.node == hir::BinOpKind::Ne).then(|| l_span.shrink_to_lo()),
@@ -805,20 +812,19 @@ pub fn transparent_newtype_field<'a, 'tcx>(
) -> Option<&'a ty::FieldDef> {
let param_env = tcx.param_env(variant.def_id);
variant.fields.iter().find(|field| {
- let field_ty = tcx.type_of(field.did).subst_identity();
+ let field_ty = tcx.type_of(field.did).instantiate_identity();
let is_zst = tcx.layout_of(param_env.and(field_ty)).is_ok_and(|layout| layout.is_zst());
!is_zst
})
}
/// Is type known to be non-null?
-fn ty_is_known_nonnull<'tcx>(cx: &LateContext<'tcx>, ty: Ty<'tcx>, mode: CItemKind) -> bool {
- let tcx = cx.tcx;
+fn ty_is_known_nonnull<'tcx>(tcx: TyCtxt<'tcx>, ty: Ty<'tcx>, mode: CItemKind) -> bool {
match ty.kind() {
ty::FnPtr(_) => true,
ty::Ref(..) => true,
ty::Adt(def, _) if def.is_box() && matches!(mode, CItemKind::Definition) => true,
- ty::Adt(def, substs) if def.repr().transparent() && !def.is_union() => {
+ ty::Adt(def, args) if def.repr().transparent() && !def.is_union() => {
let marked_non_null = nonnull_optimization_guaranteed(tcx, *def);
if marked_non_null {
@@ -832,8 +838,8 @@ fn ty_is_known_nonnull<'tcx>(cx: &LateContext<'tcx>, ty: Ty<'tcx>, mode: CItemKi
def.variants()
.iter()
- .filter_map(|variant| transparent_newtype_field(cx.tcx, variant))
- .any(|field| ty_is_known_nonnull(cx, field.ty(tcx, substs), mode))
+ .filter_map(|variant| transparent_newtype_field(tcx, variant))
+ .any(|field| ty_is_known_nonnull(tcx, field.ty(tcx, args), mode))
}
_ => false,
}
@@ -841,15 +847,12 @@ fn ty_is_known_nonnull<'tcx>(cx: &LateContext<'tcx>, ty: Ty<'tcx>, mode: CItemKi
/// Given a non-null scalar (or transparent) type `ty`, return the nullable version of that type.
/// If the type passed in was not scalar, returns None.
-fn get_nullable_type<'tcx>(cx: &LateContext<'tcx>, ty: Ty<'tcx>) -> Option<Ty<'tcx>> {
- let tcx = cx.tcx;
+fn get_nullable_type<'tcx>(tcx: TyCtxt<'tcx>, ty: Ty<'tcx>) -> Option<Ty<'tcx>> {
Some(match *ty.kind() {
- ty::Adt(field_def, field_substs) => {
+ ty::Adt(field_def, field_args) => {
let inner_field_ty = {
- let mut first_non_zst_ty = field_def
- .variants()
- .iter()
- .filter_map(|v| transparent_newtype_field(cx.tcx, v));
+ let mut first_non_zst_ty =
+ field_def.variants().iter().filter_map(|v| transparent_newtype_field(tcx, v));
debug_assert_eq!(
first_non_zst_ty.clone().count(),
1,
@@ -858,9 +861,9 @@ fn get_nullable_type<'tcx>(cx: &LateContext<'tcx>, ty: Ty<'tcx>) -> Option<Ty<'t
first_non_zst_ty
.next_back()
.expect("No non-zst fields in transparent type.")
- .ty(tcx, field_substs)
+ .ty(tcx, field_args)
};
- return get_nullable_type(cx, inner_field_ty);
+ return get_nullable_type(tcx, inner_field_ty);
}
ty::Int(ty) => Ty::new_int(tcx, ty),
ty::Uint(ty) => Ty::new_uint(tcx, ty),
@@ -892,43 +895,44 @@ fn get_nullable_type<'tcx>(cx: &LateContext<'tcx>, ty: Ty<'tcx>) -> Option<Ty<'t
/// `core::ptr::NonNull`, and `#[repr(transparent)]` newtypes.
/// FIXME: This duplicates code in codegen.
pub(crate) fn repr_nullable_ptr<'tcx>(
- cx: &LateContext<'tcx>,
+ tcx: TyCtxt<'tcx>,
+ param_env: ty::ParamEnv<'tcx>,
ty: Ty<'tcx>,
ckind: CItemKind,
) -> Option<Ty<'tcx>> {
- debug!("is_repr_nullable_ptr(cx, ty = {:?})", ty);
- if let ty::Adt(ty_def, substs) = ty.kind() {
+ debug!("is_repr_nullable_ptr(tcx, ty = {:?})", ty);
+ if let ty::Adt(ty_def, args) = ty.kind() {
let field_ty = match &ty_def.variants().raw[..] {
[var_one, var_two] => match (&var_one.fields.raw[..], &var_two.fields.raw[..]) {
- ([], [field]) | ([field], []) => field.ty(cx.tcx, substs),
+ ([], [field]) | ([field], []) => field.ty(tcx, args),
_ => return None,
},
_ => return None,
};
- if !ty_is_known_nonnull(cx, field_ty, ckind) {
+ if !ty_is_known_nonnull(tcx, field_ty, ckind) {
return None;
}
// At this point, the field's type is known to be nonnull and the parent enum is Option-like.
// If the computed size for the field and the enum are different, the nonnull optimization isn't
// being applied (and we've got a problem somewhere).
- let compute_size_skeleton = |t| SizeSkeleton::compute(t, cx.tcx, cx.param_env).unwrap();
+ let compute_size_skeleton = |t| SizeSkeleton::compute(t, tcx, param_env).unwrap();
if !compute_size_skeleton(ty).same_size(compute_size_skeleton(field_ty)) {
bug!("improper_ctypes: Option nonnull optimization not applied?");
}
// Return the nullable type this Option-like enum can be safely represented with.
- let field_ty_abi = &cx.layout_of(field_ty).unwrap().abi;
+ let field_ty_abi = &tcx.layout_of(param_env.and(field_ty)).unwrap().abi;
if let Abi::Scalar(field_ty_scalar) = field_ty_abi {
- match field_ty_scalar.valid_range(cx) {
+ match field_ty_scalar.valid_range(&tcx) {
WrappingRange { start: 0, end }
- if end == field_ty_scalar.size(&cx.tcx).unsigned_int_max() - 1 =>
+ if end == field_ty_scalar.size(&tcx).unsigned_int_max() - 1 =>
{
- return Some(get_nullable_type(cx, field_ty).unwrap());
+ return Some(get_nullable_type(tcx, field_ty).unwrap());
}
WrappingRange { start: 1, .. } => {
- return Some(get_nullable_type(cx, field_ty).unwrap());
+ return Some(get_nullable_type(tcx, field_ty).unwrap());
}
WrappingRange { start, end } => {
unreachable!("Unhandled start and end range: ({}, {})", start, end)
@@ -960,9 +964,9 @@ impl<'a, 'tcx> ImproperCTypesVisitor<'a, 'tcx> {
&self,
cache: &mut FxHashSet<Ty<'tcx>>,
field: &ty::FieldDef,
- substs: SubstsRef<'tcx>,
+ args: GenericArgsRef<'tcx>,
) -> FfiResult<'tcx> {
- let field_ty = field.ty(self.cx.tcx, substs);
+ let field_ty = field.ty(self.cx.tcx, args);
let field_ty = self
.cx
.tcx
@@ -978,14 +982,14 @@ impl<'a, 'tcx> ImproperCTypesVisitor<'a, 'tcx> {
ty: Ty<'tcx>,
def: ty::AdtDef<'tcx>,
variant: &ty::VariantDef,
- substs: SubstsRef<'tcx>,
+ args: GenericArgsRef<'tcx>,
) -> FfiResult<'tcx> {
use FfiResult::*;
let transparent_with_all_zst_fields = if def.repr().transparent() {
if let Some(field) = transparent_newtype_field(self.cx.tcx, variant) {
// Transparent newtypes have at most one non-ZST field which needs to be checked..
- match self.check_field_type_for_ffi(cache, field, substs) {
+ match self.check_field_type_for_ffi(cache, field, args) {
FfiUnsafe { ty, .. } if ty.is_unit() => (),
r => return r,
}
@@ -1003,7 +1007,7 @@ impl<'a, 'tcx> ImproperCTypesVisitor<'a, 'tcx> {
// We can't completely trust `repr(C)` markings, so make sure the fields are actually safe.
let mut all_phantom = !variant.fields.is_empty();
for field in &variant.fields {
- all_phantom &= match self.check_field_type_for_ffi(cache, &field, substs) {
+ all_phantom &= match self.check_field_type_for_ffi(cache, &field, args) {
FfiSafe => false,
// `()` fields are FFI-safe!
FfiUnsafe { ty, .. } if ty.is_unit() => false,
@@ -1037,7 +1041,7 @@ impl<'a, 'tcx> ImproperCTypesVisitor<'a, 'tcx> {
}
match *ty.kind() {
- ty::Adt(def, substs) => {
+ ty::Adt(def, args) => {
if def.is_box() && matches!(self.mode, CItemKind::Definition) {
if ty.boxed_ty().is_sized(tcx, self.cx.param_env) {
return FfiSafe;
@@ -1100,7 +1104,7 @@ impl<'a, 'tcx> ImproperCTypesVisitor<'a, 'tcx> {
};
}
- self.check_variant_for_ffi(cache, ty, def, def.non_enum_variant(), substs)
+ self.check_variant_for_ffi(cache, ty, def, def.non_enum_variant(), args)
}
AdtKind::Enum => {
if def.variants().is_empty() {
@@ -1113,7 +1117,9 @@ impl<'a, 'tcx> ImproperCTypesVisitor<'a, 'tcx> {
if !def.repr().c() && !def.repr().transparent() && def.repr().int.is_none()
{
// Special-case types like `Option<extern fn()>`.
- if repr_nullable_ptr(self.cx, ty, self.mode).is_none() {
+ if repr_nullable_ptr(self.cx.tcx, self.cx.param_env, ty, self.mode)
+ .is_none()
+ {
return FfiUnsafe {
ty,
reason: fluent::lint_improper_ctypes_enum_repr_reason,
@@ -1141,7 +1147,7 @@ impl<'a, 'tcx> ImproperCTypesVisitor<'a, 'tcx> {
};
}
- match self.check_variant_for_ffi(cache, ty, def, variant, substs) {
+ match self.check_variant_for_ffi(cache, ty, def, variant, args) {
FfiSafe => (),
r => return r,
}
@@ -1376,7 +1382,7 @@ impl<'a, 'tcx> ImproperCTypesVisitor<'a, 'tcx> {
/// For a external ABI function, argument types and the result type are walked to find fn-ptr
/// types that have external ABIs, as these still need checked.
fn check_fn(&mut self, def_id: LocalDefId, decl: &'tcx hir::FnDecl<'_>) {
- let sig = self.cx.tcx.fn_sig(def_id).subst_identity();
+ let sig = self.cx.tcx.fn_sig(def_id).instantiate_identity();
let sig = self.cx.tcx.erase_late_bound_regions(sig);
for (input_ty, input_hir) in iter::zip(sig.inputs(), decl.inputs) {
@@ -1394,7 +1400,7 @@ impl<'a, 'tcx> ImproperCTypesVisitor<'a, 'tcx> {
/// Check if a function's argument types and result type are "ffi-safe".
fn check_foreign_fn(&mut self, def_id: LocalDefId, decl: &'tcx hir::FnDecl<'_>) {
- let sig = self.cx.tcx.fn_sig(def_id).subst_identity();
+ let sig = self.cx.tcx.fn_sig(def_id).instantiate_identity();
let sig = self.cx.tcx.erase_late_bound_regions(sig);
for (input_ty, input_hir) in iter::zip(sig.inputs(), decl.inputs) {
@@ -1407,7 +1413,7 @@ impl<'a, 'tcx> ImproperCTypesVisitor<'a, 'tcx> {
}
fn check_foreign_static(&mut self, id: hir::OwnerId, span: Span) {
- let ty = self.cx.tcx.type_of(id).subst_identity();
+ let ty = self.cx.tcx.type_of(id).instantiate_identity();
self.check_type_for_ffi_and_report_errors(span, ty, true, false);
}
@@ -1513,7 +1519,7 @@ impl<'tcx> LateLintPass<'tcx> for ImproperCTypesDefinitions {
self.check_ty_maybe_containing_foreign_fnptr(
cx,
ty,
- cx.tcx.type_of(item.owner_id).subst_identity(),
+ cx.tcx.type_of(item.owner_id).instantiate_identity(),
);
}
// See `check_fn`..
@@ -1538,7 +1544,7 @@ impl<'tcx> LateLintPass<'tcx> for ImproperCTypesDefinitions {
self.check_ty_maybe_containing_foreign_fnptr(
cx,
field.ty,
- cx.tcx.type_of(field.def_id).subst_identity(),
+ cx.tcx.type_of(field.def_id).instantiate_identity(),
);
}
@@ -1573,13 +1579,13 @@ declare_lint_pass!(VariantSizeDifferences => [VARIANT_SIZE_DIFFERENCES]);
impl<'tcx> LateLintPass<'tcx> for VariantSizeDifferences {
fn check_item(&mut self, cx: &LateContext<'_>, it: &hir::Item<'_>) {
if let hir::ItemKind::Enum(ref enum_definition, _) = it.kind {
- let t = cx.tcx.type_of(it.owner_id).subst_identity();
+ let t = cx.tcx.type_of(it.owner_id).instantiate_identity();
let ty = cx.tcx.erase_regions(t);
let Ok(layout) = cx.layout_of(ty) else { return };
- let Variants::Multiple {
- tag_encoding: TagEncoding::Direct, tag, ref variants, ..
- } = &layout.variants else {
- return
+ let Variants::Multiple { tag_encoding: TagEncoding::Direct, tag, ref variants, .. } =
+ &layout.variants
+ else {
+ return;
};
let tag_size = tag.size(&cx.tcx).bytes();
@@ -1693,7 +1699,7 @@ impl InvalidAtomicOrdering {
&& recognized_names.contains(&method_path.ident.name)
&& let Some(m_def_id) = cx.typeck_results().type_dependent_def_id(expr.hir_id)
&& let Some(impl_did) = cx.tcx.impl_of_method(m_def_id)
- && let Some(adt) = cx.tcx.type_of(impl_did).subst_identity().ty_adt_def()
+ && let Some(adt) = cx.tcx.type_of(impl_did).instantiate_identity().ty_adt_def()
// skip extension traits, only lint functions from the standard library
&& cx.tcx.trait_id_of_impl(impl_did).is_none()
&& let parent = cx.tcx.parent(adt.did())
@@ -1752,8 +1758,13 @@ impl InvalidAtomicOrdering {
}
fn check_atomic_compare_exchange(cx: &LateContext<'_>, expr: &Expr<'_>) {
- let Some((method, args)) = Self::inherent_atomic_method_call(cx, expr, &[sym::fetch_update, sym::compare_exchange, sym::compare_exchange_weak])
- else {return };
+ let Some((method, args)) = Self::inherent_atomic_method_call(
+ cx,
+ expr,
+ &[sym::fetch_update, sym::compare_exchange, sym::compare_exchange_weak],
+ ) else {
+ return;
+ };
let fail_order_arg = match method {
sym::fetch_update => &args[1],
diff --git a/compiler/rustc_lint/src/unused.rs b/compiler/rustc_lint/src/unused.rs
index 5015b751e..6041f8075 100644
--- a/compiler/rustc_lint/src/unused.rs
+++ b/compiler/rustc_lint/src/unused.rs
@@ -94,7 +94,9 @@ declare_lint_pass!(UnusedResults => [UNUSED_MUST_USE, UNUSED_RESULTS]);
impl<'tcx> LateLintPass<'tcx> for UnusedResults {
fn check_stmt(&mut self, cx: &LateContext<'_>, s: &hir::Stmt<'_>) {
- let hir::StmtKind::Semi(mut expr) = s.kind else { return; };
+ let hir::StmtKind::Semi(mut expr) = s.kind else {
+ return;
+ };
let mut expr_is_from_block = false;
while let hir::ExprKind::Block(blk, ..) = expr.kind
@@ -284,22 +286,25 @@ impl<'tcx> LateLintPass<'tcx> for UnusedResults {
}
ty::Adt(def, _) => is_def_must_use(cx, def.did(), span),
ty::Alias(ty::Opaque, ty::AliasTy { def_id: def, .. }) => {
- elaborate(cx.tcx, cx.tcx.explicit_item_bounds(def).subst_identity_iter_copied())
- // We only care about self bounds for the impl-trait
- .filter_only_self()
- .find_map(|(pred, _span)| {
- // We only look at the `DefId`, so it is safe to skip the binder here.
- if let ty::ClauseKind::Trait(ref poly_trait_predicate) =
- pred.kind().skip_binder()
- {
- let def_id = poly_trait_predicate.trait_ref.def_id;
-
- is_def_must_use(cx, def_id, span)
- } else {
- None
- }
- })
- .map(|inner| MustUsePath::Opaque(Box::new(inner)))
+ elaborate(
+ cx.tcx,
+ cx.tcx.explicit_item_bounds(def).instantiate_identity_iter_copied(),
+ )
+ // We only care about self bounds for the impl-trait
+ .filter_only_self()
+ .find_map(|(pred, _span)| {
+ // We only look at the `DefId`, so it is safe to skip the binder here.
+ if let ty::ClauseKind::Trait(ref poly_trait_predicate) =
+ pred.kind().skip_binder()
+ {
+ let def_id = poly_trait_predicate.trait_ref.def_id;
+
+ is_def_must_use(cx, def_id, span)
+ } else {
+ None
+ }
+ })
+ .map(|inner| MustUsePath::Opaque(Box::new(inner)))
}
ty::Dynamic(binders, _, _) => binders.iter().find_map(|predicate| {
if let ty::ExistentialPredicate::Trait(ref trait_ref) = predicate.skip_binder()
@@ -409,7 +414,7 @@ impl<'tcx> LateLintPass<'tcx> for UnusedResults {
match path {
MustUsePath::Suppressed => {}
MustUsePath::Boxed(path) => {
- let descr_pre = &format!("{}boxed ", descr_pre);
+ let descr_pre = &format!("{descr_pre}boxed ");
emit_must_use_untranslated(
cx,
path,
@@ -421,7 +426,7 @@ impl<'tcx> LateLintPass<'tcx> for UnusedResults {
);
}
MustUsePath::Opaque(path) => {
- let descr_pre = &format!("{}implementer{} of ", descr_pre, plural_suffix);
+ let descr_pre = &format!("{descr_pre}implementer{plural_suffix} of ");
emit_must_use_untranslated(
cx,
path,
@@ -433,7 +438,7 @@ impl<'tcx> LateLintPass<'tcx> for UnusedResults {
);
}
MustUsePath::TraitObject(path) => {
- let descr_post = &format!(" trait object{}{}", plural_suffix, descr_post);
+ let descr_post = &format!(" trait object{plural_suffix}{descr_post}");
emit_must_use_untranslated(
cx,
path,
@@ -446,7 +451,7 @@ impl<'tcx> LateLintPass<'tcx> for UnusedResults {
}
MustUsePath::TupleElement(elems) => {
for (index, path) in elems {
- let descr_post = &format!(" in tuple element {}", index);
+ let descr_post = &format!(" in tuple element {index}");
emit_must_use_untranslated(
cx,
path,
@@ -459,7 +464,7 @@ impl<'tcx> LateLintPass<'tcx> for UnusedResults {
}
}
MustUsePath::Array(path, len) => {
- let descr_pre = &format!("{}array{} of ", descr_pre, plural_suffix);
+ let descr_pre = &format!("{descr_pre}array{plural_suffix} of ");
emit_must_use_untranslated(
cx,
path,
@@ -648,7 +653,7 @@ trait UnusedDelimLint {
ExprKind::Call(fn_, _params) => fn_,
ExprKind::Cast(expr, _ty) => expr,
ExprKind::Type(expr, _ty) => expr,
- ExprKind::Index(base, _subscript) => base,
+ ExprKind::Index(base, _subscript, _) => base,
_ => break,
};
if !classify::expr_requires_semi_to_be_stmt(innermost) {
@@ -661,6 +666,24 @@ trait UnusedDelimLint {
if !followed_by_block {
return false;
}
+
+ // Check if we need parens for `match &( Struct { feild: }) {}`.
+ {
+ let mut innermost = inner;
+ loop {
+ innermost = match &innermost.kind {
+ ExprKind::AddrOf(_, _, expr) => expr,
+ _ => {
+ if parser::contains_exterior_struct_lit(&innermost) {
+ return true;
+ } else {
+ break;
+ }
+ }
+ }
+ }
+ }
+
let mut innermost = inner;
loop {
innermost = match &innermost.kind {
@@ -825,7 +848,7 @@ trait UnusedDelimLint {
(value, UnusedDelimsCtx::ReturnValue, false, Some(left), None, true)
}
- Index(_, ref value) => (value, UnusedDelimsCtx::IndexExpr, false, None, None, false),
+ Index(_, ref value, _) => (value, UnusedDelimsCtx::IndexExpr, false, None, None, false),
Assign(_, ref value, _) | AssignOp(.., ref value) => {
(value, UnusedDelimsCtx::AssignedValue, false, None, None, false)
diff --git a/compiler/rustc_lint_defs/src/builtin.rs b/compiler/rustc_lint_defs/src/builtin.rs
index 87c542dc2..96c31a90d 100644
--- a/compiler/rustc_lint_defs/src/builtin.rs
+++ b/compiler/rustc_lint_defs/src/builtin.rs
@@ -2847,6 +2847,45 @@ declare_lint! {
}
declare_lint! {
+ /// The `unnameable_test_items` lint detects [`#[test]`][test] functions
+ /// that are not able to be run by the test harness because they are in a
+ /// position where they are not nameable.
+ ///
+ /// [test]: https://doc.rust-lang.org/reference/attributes/testing.html#the-test-attribute
+ ///
+ /// ### Example
+ ///
+ /// ```rust,test
+ /// fn main() {
+ /// #[test]
+ /// fn foo() {
+ /// // This test will not fail because it does not run.
+ /// assert_eq!(1, 2);
+ /// }
+ /// }
+ /// ```
+ ///
+ /// {{produces}}
+ ///
+ /// ### Explanation
+ ///
+ /// In order for the test harness to run a test, the test function must be
+ /// located in a position where it can be accessed from the crate root.
+ /// This generally means it must be defined in a module, and not anywhere
+ /// else such as inside another function. The compiler previously allowed
+ /// this without an error, so a lint was added as an alert that a test is
+ /// not being used. Whether or not this should be allowed has not yet been
+ /// decided, see [RFC 2471] and [issue #36629].
+ ///
+ /// [RFC 2471]: https://github.com/rust-lang/rfcs/pull/2471#issuecomment-397414443
+ /// [issue #36629]: https://github.com/rust-lang/rust/issues/36629
+ pub UNNAMEABLE_TEST_ITEMS,
+ Warn,
+ "detects an item that cannot be named being marked as `#[test_case]`",
+ report_in_external_macro
+}
+
+declare_lint! {
/// The `useless_deprecated` lint detects deprecation attributes with no effect.
///
/// ### Example
@@ -3316,6 +3355,7 @@ declare_lint_pass! {
// tidy-alphabetical-start
ABSOLUTE_PATHS_NOT_STARTING_WITH_CRATE,
AMBIGUOUS_ASSOCIATED_ITEMS,
+ AMBIGUOUS_GLOB_IMPORTS,
AMBIGUOUS_GLOB_REEXPORTS,
ARITHMETIC_OVERFLOW,
ASM_SUB_REGISTER,
@@ -3326,6 +3366,7 @@ declare_lint_pass! {
BYTE_SLICE_IN_PACKED_STRUCT_WITH_DERIVE,
CENUM_IMPL_DROP_CAST,
COHERENCE_LEAK_CHECK,
+ COINDUCTIVE_OVERLAP_IN_COHERENCE,
CONFLICTING_REPR_HINTS,
CONST_EVALUATABLE_UNCHECKED,
CONST_ITEM_MUTATION,
@@ -3400,7 +3441,9 @@ declare_lint_pass! {
UNFULFILLED_LINT_EXPECTATIONS,
UNINHABITED_STATIC,
UNKNOWN_CRATE_TYPES,
+ UNKNOWN_DIAGNOSTIC_ATTRIBUTES,
UNKNOWN_LINTS,
+ UNNAMEABLE_TEST_ITEMS,
UNNAMEABLE_TYPES,
UNREACHABLE_CODE,
UNREACHABLE_PATTERNS,
@@ -3923,7 +3966,6 @@ declare_lint! {
///
/// // in crate B
/// #![feature(non_exhaustive_omitted_patterns_lint)]
- ///
/// match Bar::A {
/// Bar::A => {},
/// #[warn(non_exhaustive_omitted_patterns)]
@@ -4052,12 +4094,12 @@ declare_lint! {
///
/// The compiler disables the automatic implementation if an explicit one
/// exists for given type constructor. The exact rules governing this
- /// are currently unsound, quite subtle, and will be modified in the future.
- /// This change will cause the automatic implementation to be disabled in more
+ /// were previously unsound, quite subtle, and have been recently modified.
+ /// This change caused the automatic implementation to be disabled in more
/// cases, potentially breaking some code.
pub SUSPICIOUS_AUTO_TRAIT_IMPLS,
Warn,
- "the rules governing auto traits will change in the future",
+ "the rules governing auto traits have recently changed resulting in potential breakage",
@future_incompatible = FutureIncompatibleInfo {
reason: FutureIncompatibilityReason::FutureReleaseSemanticsChange,
reference: "issue #93367 <https://github.com/rust-lang/rust/issues/93367>",
@@ -4084,7 +4126,7 @@ declare_lint! {
///
/// ### Explanation
///
- /// The preferred location for where clauses on associated types in impls
+ /// The preferred location for where clauses on associated types
/// is after the type. However, for most of generic associated types development,
/// it was only accepted before the equals. To provide a transition period and
/// further evaluate this change, both are currently accepted. At some point in
@@ -4380,3 +4422,108 @@ declare_lint! {
"effective visibility of a type is larger than the area in which it can be named",
@feature_gate = sym::type_privacy_lints;
}
+
+declare_lint! {
+ /// The `coinductive_overlap_in_coherence` lint detects impls which are currently
+ /// considered not overlapping, but may be considered to overlap if support for
+ /// coinduction is added to the trait solver.
+ ///
+ /// ### Example
+ ///
+ /// ```rust,compile_fail
+ /// #![deny(coinductive_overlap_in_coherence)]
+ ///
+ /// trait CyclicTrait {}
+ /// impl<T: CyclicTrait> CyclicTrait for T {}
+ ///
+ /// trait Trait {}
+ /// impl<T: CyclicTrait> Trait for T {}
+ /// // conflicting impl with the above
+ /// impl Trait for u8 {}
+ /// ```
+ ///
+ /// {{produces}}
+ ///
+ /// ### Explanation
+ ///
+ /// We have two choices for impl which satisfy `u8: Trait`: the blanket impl
+ /// for generic `T`, and the direct impl for `u8`. These two impls nominally
+ /// overlap, since we can infer `T = u8` in the former impl, but since the where
+ /// clause `u8: CyclicTrait` would end up resulting in a cycle (since it depends
+ /// on itself), the blanket impl is not considered to hold for `u8`. This will
+ /// change in a future release.
+ pub COINDUCTIVE_OVERLAP_IN_COHERENCE,
+ Warn,
+ "impls that are not considered to overlap may be considered to \
+ overlap in the future",
+ @future_incompatible = FutureIncompatibleInfo {
+ reference: "issue #114040 <https://github.com/rust-lang/rust/issues/114040>",
+ };
+}
+
+declare_lint! {
+ /// The `unknown_diagnostic_attributes` lint detects unrecognized diagnostic attributes.
+ ///
+ /// ### Example
+ ///
+ /// ```rust
+ /// #![feature(diagnostic_namespace)]
+ /// #[diagnostic::does_not_exist]
+ /// struct Foo;
+ /// ```
+ ///
+ /// {{produces}}
+ ///
+ /// ### Explanation
+ ///
+ /// It is usually a mistake to specify a diagnostic attribute that does not exist. Check
+ /// the spelling, and check the diagnostic attribute listing for the correct name. Also
+ /// consider if you are using an old version of the compiler, and the attribute
+ /// is only available in a newer version.
+ pub UNKNOWN_DIAGNOSTIC_ATTRIBUTES,
+ Warn,
+ "unrecognized diagnostic attribute"
+}
+
+declare_lint! {
+ /// The `ambiguous_glob_imports` lint detects glob imports that should report ambiguity
+ /// errors, but previously didn't do that due to rustc bugs.
+ ///
+ /// ### Example
+ ///
+ /// ```rust,compile_fail
+ ///
+ /// #![deny(ambiguous_glob_imports)]
+ /// pub fn foo() -> u32 {
+ /// use sub::*;
+ /// C
+ /// }
+ ///
+ /// mod sub {
+ /// mod mod1 { pub const C: u32 = 1; }
+ /// mod mod2 { pub const C: u32 = 2; }
+ ///
+ /// pub use mod1::*;
+ /// pub use mod2::*;
+ /// }
+ /// ```
+ ///
+ /// {{produces}}
+ ///
+ /// ### Explanation
+ ///
+ /// Previous versions of Rust compile it successfully because it
+ /// had lost the ambiguity error when resolve `use sub::mod2::*`.
+ ///
+ /// This is a [future-incompatible] lint to transition this to a
+ /// hard error in the future.
+ ///
+ /// [future-incompatible]: ../index.md#future-incompatible-lints
+ pub AMBIGUOUS_GLOB_IMPORTS,
+ Warn,
+ "detects certain glob imports that require reporting an ambiguity error",
+ @future_incompatible = FutureIncompatibleInfo {
+ reason: FutureIncompatibilityReason::FutureReleaseError,
+ reference: "issue #114095 <https://github.com/rust-lang/rust/issues/114095>",
+ };
+}
diff --git a/compiler/rustc_lint_defs/src/lib.rs b/compiler/rustc_lint_defs/src/lib.rs
index 5a5031b79..f350957f7 100644
--- a/compiler/rustc_lint_defs/src/lib.rs
+++ b/compiler/rustc_lint_defs/src/lib.rs
@@ -467,6 +467,21 @@ impl<HCX> ToStableHashKey<HCX> for LintId {
}
}
+#[derive(Debug)]
+pub struct AmbiguityErrorDiag {
+ pub msg: String,
+ pub span: Span,
+ pub label_span: Span,
+ pub label_msg: String,
+ pub note_msg: String,
+ pub b1_span: Span,
+ pub b1_note_msg: String,
+ pub b1_help_msgs: Vec<String>,
+ pub b2_span: Span,
+ pub b2_note_msg: String,
+ pub b2_help_msgs: Vec<String>,
+}
+
// This could be a closure, but then implementing derive trait
// becomes hacky (and it gets allocated).
#[derive(Debug)]
@@ -530,6 +545,9 @@ pub enum BuiltinLintDiagnostics {
vis_span: Span,
ident_span: Span,
},
+ AmbiguousGlobImports {
+ diag: AmbiguityErrorDiag,
+ },
AmbiguousGlobReexports {
/// The name for which collision(s) have occurred.
name: String,
@@ -550,6 +568,10 @@ pub enum BuiltinLintDiagnostics {
/// The local binding that shadows the glob reexport.
private_item_span: Span,
},
+ UnusedQualifications {
+ /// The span of the unnecessarily-qualified path to remove.
+ removal_span: Span,
+ },
}
/// Lints that are buffered up early on in the `Session` before the
diff --git a/compiler/rustc_llvm/build.rs b/compiler/rustc_llvm/build.rs
index b0783d75d..4302b1618 100644
--- a/compiler/rustc_llvm/build.rs
+++ b/compiler/rustc_llvm/build.rs
@@ -12,6 +12,7 @@ const OPTIONAL_COMPONENTS: &[&str] = &[
"avr",
"loongarch",
"m68k",
+ "csky",
"mips",
"powerpc",
"systemz",
@@ -251,8 +252,11 @@ fn main() {
} else if target.contains("windows-gnu") {
println!("cargo:rustc-link-lib=shell32");
println!("cargo:rustc-link-lib=uuid");
- } else if target.contains("netbsd") || target.contains("haiku") || target.contains("darwin") {
+ } else if target.contains("haiku") || target.contains("darwin") {
println!("cargo:rustc-link-lib=z");
+ } else if target.contains("netbsd") {
+ println!("cargo:rustc-link-lib=z");
+ println!("cargo:rustc-link-lib=execinfo");
}
cmd.args(&components);
diff --git a/compiler/rustc_llvm/llvm-wrapper/CoverageMappingWrapper.cpp b/compiler/rustc_llvm/llvm-wrapper/CoverageMappingWrapper.cpp
index 87906dee4..d61ec0b64 100644
--- a/compiler/rustc_llvm/llvm-wrapper/CoverageMappingWrapper.cpp
+++ b/compiler/rustc_llvm/llvm-wrapper/CoverageMappingWrapper.cpp
@@ -103,12 +103,20 @@ fromRust(LLVMRustCounterExprKind Kind) {
}
extern "C" void LLVMRustCoverageWriteFilenamesSectionToBuffer(
- const char* const Filenames[],
+ const char *const Filenames[],
size_t FilenamesLen,
+ const size_t *const Lengths,
+ size_t LengthsLen,
RustStringRef BufferOut) {
+ if (FilenamesLen != LengthsLen) {
+ report_fatal_error(
+ "Mismatched lengths in LLVMRustCoverageWriteFilenamesSectionToBuffer");
+ }
+
SmallVector<std::string,32> FilenameRefs;
+ FilenameRefs.reserve(FilenamesLen);
for (size_t i = 0; i < FilenamesLen; i++) {
- FilenameRefs.push_back(std::string(Filenames[i]));
+ FilenameRefs.emplace_back(Filenames[i], Lengths[i]);
}
auto FilenamesWriter =
coverage::CoverageFilenamesSectionWriter(ArrayRef<std::string>(FilenameRefs));
@@ -153,19 +161,17 @@ extern "C" void LLVMRustCoverageWriteMappingToBuffer(
CoverageMappingWriter.write(OS);
}
-extern "C" LLVMValueRef LLVMRustCoverageCreatePGOFuncNameVar(LLVMValueRef F, const char *FuncName) {
- StringRef FuncNameRef(FuncName);
+extern "C" LLVMValueRef LLVMRustCoverageCreatePGOFuncNameVar(
+ LLVMValueRef F,
+ const char *FuncName,
+ size_t FuncNameLen) {
+ StringRef FuncNameRef(FuncName, FuncNameLen);
return wrap(createPGOFuncNameVar(*cast<Function>(unwrap(F)), FuncNameRef));
}
-extern "C" uint64_t LLVMRustCoverageHashCString(const char *StrVal) {
- StringRef StrRef(StrVal);
- return IndexedInstrProf::ComputeHash(StrRef);
-}
-
extern "C" uint64_t LLVMRustCoverageHashByteArray(
const char *Bytes,
- unsigned NumBytes) {
+ size_t NumBytes) {
StringRef StrRef(Bytes, NumBytes);
return IndexedInstrProf::ComputeHash(StrRef);
}
diff --git a/compiler/rustc_llvm/llvm-wrapper/LLVMWrapper.h b/compiler/rustc_llvm/llvm-wrapper/LLVMWrapper.h
index af6f4d5ea..3f2bf2c9b 100644
--- a/compiler/rustc_llvm/llvm-wrapper/LLVMWrapper.h
+++ b/compiler/rustc_llvm/llvm-wrapper/LLVMWrapper.h
@@ -15,7 +15,6 @@
#include "llvm/Support/DynamicLibrary.h"
#include "llvm/Support/FormattedStream.h"
#include "llvm/Support/JSON.h"
-#include "llvm/Support/Host.h"
#include "llvm/Support/Memory.h"
#include "llvm/Support/SourceMgr.h"
#include "llvm/Support/TargetSelect.h"
@@ -92,10 +91,8 @@ enum LLVMRustAttribute {
NoCfCheck = 35,
ShadowCallStack = 36,
AllocSize = 37,
-#if LLVM_VERSION_GE(15, 0)
AllocatedPointer = 38,
AllocAlign = 39,
-#endif
SanitizeSafeStack = 40,
};
diff --git a/compiler/rustc_llvm/llvm-wrapper/PassWrapper.cpp b/compiler/rustc_llvm/llvm-wrapper/PassWrapper.cpp
index c43a02724..b566ea496 100644
--- a/compiler/rustc_llvm/llvm-wrapper/PassWrapper.cpp
+++ b/compiler/rustc_llvm/llvm-wrapper/PassWrapper.cpp
@@ -1,5 +1,6 @@
#include <stdio.h>
+#include <iomanip>
#include <vector>
#include <set>
@@ -24,11 +25,11 @@
#if LLVM_VERSION_GE(17, 0)
#include "llvm/Support/VirtualFileSystem.h"
#endif
-#include "llvm/Support/Host.h"
#include "llvm/Target/TargetMachine.h"
#include "llvm/Transforms/IPO/AlwaysInliner.h"
#include "llvm/Transforms/IPO/FunctionImport.h"
#include "llvm/Transforms/IPO/Internalize.h"
+#include "llvm/Transforms/IPO/LowerTypeTests.h"
#include "llvm/Transforms/IPO/ThinLTOBitcodeWriter.h"
#include "llvm/Transforms/Utils/AddDiscriminators.h"
#include "llvm/Transforms/Utils/FunctionImportUtils.h"
@@ -104,6 +105,12 @@ extern "C" void LLVMTimeTraceProfilerFinish(const char* FileName) {
#define SUBTARGET_M68K
#endif
+#ifdef LLVM_COMPONENT_CSKY
+#define SUBTARGET_CSKY SUBTARGET(CSKY)
+#else
+#define SUBTARGET_CSKY
+#endif
+
#ifdef LLVM_COMPONENT_MIPS
#define SUBTARGET_MIPS SUBTARGET(Mips)
#else
@@ -158,6 +165,7 @@ extern "C" void LLVMTimeTraceProfilerFinish(const char* FileName) {
SUBTARGET_AARCH64 \
SUBTARGET_AVR \
SUBTARGET_M68K \
+ SUBTARGET_CSKY \
SUBTARGET_MIPS \
SUBTARGET_PPC \
SUBTARGET_SYSTEMZ \
@@ -306,44 +314,53 @@ static size_t getLongestEntryLength(ArrayRef<KV> Table) {
return MaxLen;
}
-extern "C" void LLVMRustPrintTargetCPUs(LLVMTargetMachineRef TM, const char* TargetCPU) {
+using PrintBackendInfo = void(void*, const char* Data, size_t Len);
+
+extern "C" void LLVMRustPrintTargetCPUs(LLVMTargetMachineRef TM,
+ const char* TargetCPU,
+ PrintBackendInfo Print,
+ void* Out) {
const TargetMachine *Target = unwrap(TM);
const MCSubtargetInfo *MCInfo = Target->getMCSubtargetInfo();
const Triple::ArchType HostArch = Triple(sys::getDefaultTargetTriple()).getArch();
const Triple::ArchType TargetArch = Target->getTargetTriple().getArch();
+ std::ostringstream Buf;
+
#if LLVM_VERSION_GE(17, 0)
const ArrayRef<SubtargetSubTypeKV> CPUTable = MCInfo->getAllProcessorDescriptions();
-#elif defined(LLVM_RUSTLLVM)
- const ArrayRef<SubtargetSubTypeKV> CPUTable = MCInfo->getCPUTable();
#else
- printf("Full target CPU help is not supported by this LLVM version.\n\n");
+ Buf << "Full target CPU help is not supported by this LLVM version.\n\n";
SubtargetSubTypeKV TargetCPUKV = { TargetCPU, {{}}, {{}} };
const ArrayRef<SubtargetSubTypeKV> CPUTable = TargetCPUKV;
#endif
unsigned MaxCPULen = getLongestEntryLength(CPUTable);
- printf("Available CPUs for this target:\n");
+ Buf << "Available CPUs for this target:\n";
// Don't print the "native" entry when the user specifies --target with a
// different arch since that could be wrong or misleading.
if (HostArch == TargetArch) {
MaxCPULen = std::max(MaxCPULen, (unsigned) std::strlen("native"));
const StringRef HostCPU = sys::getHostCPUName();
- printf(" %-*s - Select the CPU of the current host (currently %.*s).\n",
- MaxCPULen, "native", (int)HostCPU.size(), HostCPU.data());
+ Buf << " " << std::left << std::setw(MaxCPULen) << "native"
+ << " - Select the CPU of the current host "
+ "(currently " << HostCPU.str() << ").\n";
}
for (auto &CPU : CPUTable) {
// Compare cpu against current target to label the default
if (strcmp(CPU.Key, TargetCPU) == 0) {
- printf(" %-*s - This is the default target CPU"
- " for the current build target (currently %s).",
- MaxCPULen, CPU.Key, Target->getTargetTriple().str().c_str());
+ Buf << " " << std::left << std::setw(MaxCPULen) << CPU.Key
+ << " - This is the default target CPU for the current build target "
+ "(currently " << Target->getTargetTriple().str() << ").";
}
else {
- printf(" %-*s", MaxCPULen, CPU.Key);
+ Buf << " " << CPU.Key;
}
- printf("\n");
+ Buf << "\n";
}
+
+ const auto &BufString = Buf.str();
+ Print(Out, BufString.data(), BufString.size());
}
extern "C" size_t LLVMRustGetTargetFeaturesCount(LLVMTargetMachineRef TM) {
@@ -599,6 +616,8 @@ enum class LLVMRustOptStage {
struct LLVMRustSanitizerOptions {
bool SanitizeAddress;
bool SanitizeAddressRecover;
+ bool SanitizeCFI;
+ bool SanitizeKCFI;
bool SanitizeMemory;
bool SanitizeMemoryRecover;
int SanitizeMemoryTrackOrigins;
@@ -615,6 +634,7 @@ LLVMRustOptimize(
LLVMTargetMachineRef TMRef,
LLVMRustPassBuilderOptLevel OptLevelRust,
LLVMRustOptStage OptStage,
+ bool IsLinkerPluginLTO,
bool NoPrepopulatePasses, bool VerifyIR, bool UseThinLTOBuffers,
bool MergeFunctions, bool UnrollLoops, bool SLPVectorize, bool LoopVectorize,
bool DisableSimplifyLibCalls, bool EmitLifetimeMarkers,
@@ -667,6 +687,7 @@ LLVMRustOptimize(
assert(!PGOUsePath && !PGOSampleUsePath);
PGOOpt = PGOOptions(PGOGenPath, "", "",
#if LLVM_VERSION_GE(17, 0)
+ "",
FS,
#endif
PGOOptions::IRInstr, PGOOptions::NoCSAction,
@@ -675,6 +696,7 @@ LLVMRustOptimize(
assert(!PGOSampleUsePath);
PGOOpt = PGOOptions(PGOUsePath, "", "",
#if LLVM_VERSION_GE(17, 0)
+ "",
FS,
#endif
PGOOptions::IRUse, PGOOptions::NoCSAction,
@@ -682,6 +704,7 @@ LLVMRustOptimize(
} else if (PGOSampleUsePath) {
PGOOpt = PGOOptions(PGOSampleUsePath, "", "",
#if LLVM_VERSION_GE(17, 0)
+ "",
FS,
#endif
PGOOptions::SampleUse, PGOOptions::NoCSAction,
@@ -689,6 +712,7 @@ LLVMRustOptimize(
} else if (DebugInfoForProfiling) {
PGOOpt = PGOOptions("", "", "",
#if LLVM_VERSION_GE(17, 0)
+ "",
FS,
#endif
PGOOptions::NoAction, PGOOptions::NoCSAction,
@@ -722,6 +746,18 @@ LLVMRustOptimize(
std::vector<std::function<void(ModulePassManager &, OptimizationLevel)>>
OptimizerLastEPCallbacks;
+ if (!IsLinkerPluginLTO
+ && SanitizerOptions && SanitizerOptions->SanitizeCFI
+ && !NoPrepopulatePasses) {
+ PipelineStartEPCallbacks.push_back(
+ [](ModulePassManager &MPM, OptimizationLevel Level) {
+ MPM.addPass(LowerTypeTestsPass(/*ExportSummary=*/nullptr,
+ /*ImportSummary=*/nullptr,
+ /*DropTypeTests=*/false));
+ }
+ );
+ }
+
if (VerifyIR) {
PipelineStartEPCallbacks.push_back(
[VerifyIR](ModulePassManager &MPM, OptimizationLevel Level) {
@@ -785,9 +821,6 @@ LLVMRustOptimize(
OptimizerLastEPCallbacks.push_back(
[SanitizerOptions](ModulePassManager &MPM, OptimizationLevel Level) {
auto CompileKernel = SanitizerOptions->SanitizeKernelAddress;
-#if LLVM_VERSION_LT(15, 0)
- MPM.addPass(RequireAnalysisPass<ASanGlobalsMetadataAnalysis, Module>());
-#endif
AddressSanitizerOptions opts = AddressSanitizerOptions{
CompileKernel,
SanitizerOptions->SanitizeAddressRecover
@@ -1107,9 +1140,15 @@ struct LLVMRustThinLTOData {
// Not 100% sure what these are, but they impact what's internalized and
// what's inlined across modules, I believe.
+#if LLVM_VERSION_GE(18, 0)
+ DenseMap<StringRef, FunctionImporter::ImportMapTy> ImportLists;
+ DenseMap<StringRef, FunctionImporter::ExportSetTy> ExportLists;
+ DenseMap<StringRef, GVSummaryMapTy> ModuleToDefinedGVSummaries;
+#else
StringMap<FunctionImporter::ImportMapTy> ImportLists;
StringMap<FunctionImporter::ExportSetTy> ExportLists;
StringMap<GVSummaryMapTy> ModuleToDefinedGVSummaries;
+#endif
StringMap<std::map<GlobalValue::GUID, GlobalValue::LinkageTypes>> ResolvedODR;
LLVMRustThinLTOData() : Index(/* HaveGVs = */ false) {}
@@ -1350,6 +1389,11 @@ LLVMRustPrepareThinLTOImport(const LLVMRustThinLTOData *Data, LLVMModuleRef M,
if (WasmCustomSections)
WasmCustomSections->eraseFromParent();
+ // `llvm.ident` named metadata also gets duplicated.
+ auto *llvmIdent = (*MOrErr)->getNamedMetadata("llvm.ident");
+ if (llvmIdent)
+ llvmIdent->eraseFromParent();
+
return MOrErr;
};
bool ClearDSOLocal = clearDSOLocalOnDeclarations(Mod, Target);
diff --git a/compiler/rustc_llvm/llvm-wrapper/RustWrapper.cpp b/compiler/rustc_llvm/llvm-wrapper/RustWrapper.cpp
index bb7510b3a..70cdf3d6d 100644
--- a/compiler/rustc_llvm/llvm-wrapper/RustWrapper.cpp
+++ b/compiler/rustc_llvm/llvm-wrapper/RustWrapper.cpp
@@ -1,4 +1,5 @@
#include "LLVMWrapper.h"
+#include "llvm/ADT/Statistic.h"
#include "llvm/IR/DebugInfoMetadata.h"
#include "llvm/IR/DiagnosticHandler.h"
#include "llvm/IR/DiagnosticInfo.h"
@@ -111,9 +112,26 @@ extern "C" void LLVMRustSetNormalizedTarget(LLVMModuleRef M,
unwrap(M)->setTargetTriple(Triple::normalize(Triple));
}
-extern "C" void LLVMRustPrintPassTimings() {
- raw_fd_ostream OS(2, false); // stderr.
- TimerGroup::printAll(OS);
+extern "C" const char *LLVMRustPrintPassTimings(size_t *Len) {
+ std::string buf;
+ raw_string_ostream SS(buf);
+ TimerGroup::printAll(SS);
+ SS.flush();
+ *Len = buf.length();
+ char *CStr = (char *)malloc(*Len);
+ memcpy(CStr, buf.c_str(), *Len);
+ return CStr;
+}
+
+extern "C" const char *LLVMRustPrintStatistics(size_t *Len) {
+ std::string buf;
+ raw_string_ostream SS(buf);
+ llvm::PrintStatistics(SS);
+ SS.flush();
+ *Len = buf.length();
+ char *CStr = (char *)malloc(*Len);
+ memcpy(CStr, buf.c_str(), *Len);
+ return CStr;
}
extern "C" LLVMValueRef LLVMRustGetNamedValue(LLVMModuleRef M, const char *Name,
@@ -259,12 +277,10 @@ static Attribute::AttrKind fromRust(LLVMRustAttribute Kind) {
return Attribute::ShadowCallStack;
case AllocSize:
return Attribute::AllocSize;
-#if LLVM_VERSION_GE(15, 0)
case AllocatedPointer:
return Attribute::AllocatedPointer;
case AllocAlign:
return Attribute::AllocAlign;
-#endif
case SanitizeSafeStack:
return Attribute::SafeStack;
}
@@ -322,20 +338,12 @@ extern "C" LLVMAttributeRef LLVMRustCreateStructRetAttr(LLVMContextRef C, LLVMTy
}
extern "C" LLVMAttributeRef LLVMRustCreateElementTypeAttr(LLVMContextRef C, LLVMTypeRef Ty) {
-#if LLVM_VERSION_GE(15, 0)
return wrap(Attribute::get(*unwrap(C), Attribute::ElementType, unwrap(Ty)));
-#else
- report_fatal_error("Should not be needed on LLVM < 15");
-#endif
}
extern "C" LLVMAttributeRef LLVMRustCreateUWTableAttr(LLVMContextRef C, bool Async) {
-#if LLVM_VERSION_LT(15, 0)
- return wrap(Attribute::get(*unwrap(C), Attribute::UWTable));
-#else
return wrap(Attribute::getWithUWTableKind(
*unwrap(C), Async ? UWTableKind::Async : UWTableKind::Sync));
-#endif
}
extern "C" LLVMAttributeRef LLVMRustCreateAllocSizeAttr(LLVMContextRef C, uint32_t ElementSizeArg) {
@@ -348,8 +356,6 @@ extern "C" LLVMAttributeRef LLVMRustCreateAllocSizeAttr(LLVMContextRef C, uint32
));
}
-#if LLVM_VERSION_GE(15, 0)
-
// These values **must** match ffi::AllocKindFlags.
// It _happens_ to match the LLVM values of llvm::AllocFnKind,
// but that's happenstance and we do explicit conversions before
@@ -393,16 +399,10 @@ static llvm::AllocFnKind allocKindFromRust(LLVMRustAllocKindFlags F) {
}
return AFK;
}
-#endif
extern "C" LLVMAttributeRef LLVMRustCreateAllocKindAttr(LLVMContextRef C, uint64_t AllocKindArg) {
-#if LLVM_VERSION_GE(15, 0)
return wrap(Attribute::get(*unwrap(C), Attribute::AllocKind,
static_cast<uint64_t>(allocKindFromRust(static_cast<LLVMRustAllocKindFlags>(AllocKindArg)))));
-#else
- report_fatal_error(
- "allockind attributes are new in LLVM 15 and should not be used on older LLVMs");
-#endif
}
// Simplified representation of `MemoryEffects` across the FFI boundary.
@@ -499,14 +499,9 @@ LLVMRustInlineAsm(LLVMTypeRef Ty, char *AsmString, size_t AsmStringLen,
extern "C" bool LLVMRustInlineAsmVerify(LLVMTypeRef Ty, char *Constraints,
size_t ConstraintsLen) {
-#if LLVM_VERSION_LT(15, 0)
- return InlineAsm::Verify(unwrap<FunctionType>(Ty),
- StringRef(Constraints, ConstraintsLen));
-#else
// llvm::Error converts to true if it is an error.
return !llvm::errorToBool(InlineAsm::verify(
unwrap<FunctionType>(Ty), StringRef(Constraints, ConstraintsLen)));
-#endif
}
typedef DIBuilder *LLVMRustDIBuilderRef;
@@ -1616,17 +1611,6 @@ extern "C" void LLVMRustSetLinkage(LLVMValueRef V,
LLVMSetLinkage(V, fromRust(RustLinkage));
}
-// FIXME: replace with LLVMConstInBoundsGEP2 when bumped minimal version to llvm-14
-extern "C" LLVMValueRef LLVMRustConstInBoundsGEP2(LLVMTypeRef Ty,
- LLVMValueRef ConstantVal,
- LLVMValueRef *ConstantIndices,
- unsigned NumIndices) {
- ArrayRef<Constant *> IdxList(unwrap<Constant>(ConstantIndices, NumIndices),
- NumIndices);
- Constant *Val = unwrap<Constant>(ConstantVal);
- return wrap(ConstantExpr::getInBoundsGetElementPtr(unwrap(Ty), Val, IdxList));
-}
-
extern "C" bool LLVMRustConstIntGetZExtValue(LLVMValueRef CV, uint64_t *value) {
auto C = unwrap<llvm::ConstantInt>(CV);
if (C->getBitWidth() > 64)
@@ -1642,19 +1626,11 @@ extern "C" bool LLVMRustConstInt128Get(LLVMValueRef CV, bool sext, uint64_t *hig
auto C = unwrap<llvm::ConstantInt>(CV);
if (C->getBitWidth() > 128) { return false; }
APInt AP;
-#if LLVM_VERSION_GE(15, 0)
if (sext) {
AP = C->getValue().sext(128);
} else {
AP = C->getValue().zext(128);
}
-#else
- if (sext) {
- AP = C->getValue().sextOrSelf(128);
- } else {
- AP = C->getValue().zextOrSelf(128);
- }
-#endif
*low = AP.getLoBits(64).getZExtValue();
*high = AP.getHiBits(64).getZExtValue();
return true;
@@ -1893,7 +1869,8 @@ extern "C" void LLVMRustContextConfigureDiagnosticHandler(
LLVMContextRef C, LLVMDiagnosticHandlerTy DiagnosticHandlerCallback,
void *DiagnosticHandlerContext, bool RemarkAllPasses,
const char * const * RemarkPasses, size_t RemarkPassesLen,
- const char * RemarkFilePath
+ const char * RemarkFilePath,
+ bool PGOAvailable
) {
class RustDiagnosticHandler final : public DiagnosticHandler {
@@ -1916,12 +1893,19 @@ extern "C" void LLVMRustContextConfigureDiagnosticHandler(
LlvmRemarkStreamer(std::move(LlvmRemarkStreamer)) {}
virtual bool handleDiagnostics(const DiagnosticInfo &DI) override {
- if (this->LlvmRemarkStreamer) {
- if (auto *OptDiagBase = dyn_cast<DiagnosticInfoOptimizationBase>(&DI)) {
- if (OptDiagBase->isEnabled()) {
+ // If this diagnostic is one of the optimization remark kinds, we can check if it's enabled
+ // before emitting it. This can avoid many short-lived allocations when unpacking the
+ // diagnostic and converting its various C++ strings into rust strings.
+ // FIXME: some diagnostic infos still allocate before we get here, and avoiding that would be
+ // good in the future. That will require changing a few call sites in LLVM.
+ if (auto *OptDiagBase = dyn_cast<DiagnosticInfoOptimizationBase>(&DI)) {
+ if (OptDiagBase->isEnabled()) {
+ if (this->LlvmRemarkStreamer) {
this->LlvmRemarkStreamer->emit(*OptDiagBase);
return true;
}
+ } else {
+ return true;
}
}
if (DiagnosticHandlerCallback) {
@@ -1984,6 +1968,11 @@ extern "C" void LLVMRustContextConfigureDiagnosticHandler(
std::unique_ptr<LLVMRemarkStreamer> LlvmRemarkStreamer;
if (RemarkFilePath != nullptr) {
+ if (PGOAvailable) {
+ // Enable PGO hotness data for remarks, if available
+ unwrap(C)->setDiagnosticsHotnessRequested(true);
+ }
+
std::error_code EC;
RemarkFile = std::make_unique<ToolOutputFile>(
RemarkFilePath,
@@ -2030,16 +2019,7 @@ extern "C" void LLVMRustGetMangledName(LLVMValueRef V, RustStringRef Str) {
Mangler().getNameWithPrefix(OS, GV, true);
}
-// LLVMGetAggregateElement was added in LLVM 15. For earlier LLVM versions just
-// use its implementation.
-#if LLVM_VERSION_LT(15, 0)
-extern "C" LLVMValueRef LLVMGetAggregateElement(LLVMValueRef C, unsigned Idx) {
- return wrap(unwrap<Constant>(C)->getAggregateElement(Idx));
-}
-#endif
-
extern "C" int32_t LLVMRustGetElementTypeArgIndex(LLVMValueRef CallSite) {
-#if LLVM_VERSION_GE(15, 0)
auto *CB = unwrap<CallBase>(CallSite);
switch (CB->getIntrinsicID()) {
case Intrinsic::arm_ldrex:
@@ -2047,10 +2027,20 @@ extern "C" int32_t LLVMRustGetElementTypeArgIndex(LLVMValueRef CallSite) {
case Intrinsic::arm_strex:
return 1;
}
-#endif
return -1;
}
extern "C" bool LLVMRustIsBitcode(char *ptr, size_t len) {
return identify_magic(StringRef(ptr, len)) == file_magic::bitcode;
}
+
+extern "C" bool LLVMRustIsNonGVFunctionPointerTy(LLVMValueRef V) {
+ if (unwrap<Value>(V)->getType()->isPointerTy()) {
+ if (auto *GV = dyn_cast<GlobalValue>(unwrap<Value>(V))) {
+ if (GV->getValueType()->isFunctionTy())
+ return false;
+ }
+ return true;
+ }
+ return false;
+}
diff --git a/compiler/rustc_llvm/llvm-wrapper/SymbolWrapper.cpp b/compiler/rustc_llvm/llvm-wrapper/SymbolWrapper.cpp
index 0493d6b05..bf00d11ed 100644
--- a/compiler/rustc_llvm/llvm-wrapper/SymbolWrapper.cpp
+++ b/compiler/rustc_llvm/llvm-wrapper/SymbolWrapper.cpp
@@ -7,6 +7,7 @@
// * https://github.com/llvm/llvm-project/blob/8ef3e895ad8ab1724e2b87cabad1dacdc7a397a3/llvm/include/llvm/Object/ArchiveWriter.h
// * https://github.com/llvm/llvm-project/blob/8ef3e895ad8ab1724e2b87cabad1dacdc7a397a3/llvm/lib/Object/ArchiveWriter.cpp
+#include "llvm/ADT/SmallString.h"
#include "llvm/IR/LLVMContext.h"
#include "llvm/Object/ObjectFile.h"
diff --git a/compiler/rustc_llvm/src/lib.rs b/compiler/rustc_llvm/src/lib.rs
index a49ded4fd..eb7096150 100644
--- a/compiler/rustc_llvm/src/lib.rs
+++ b/compiler/rustc_llvm/src/lib.rs
@@ -103,6 +103,14 @@ pub fn initialize_available_targets() {
LLVMInitializeM68kAsmParser
);
init_target!(
+ llvm_component = "csky",
+ LLVMInitializeCSKYTargetInfo,
+ LLVMInitializeCSKYTarget,
+ LLVMInitializeCSKYTargetMC,
+ LLVMInitializeCSKYAsmPrinter,
+ LLVMInitializeCSKYAsmParser
+ );
+ init_target!(
llvm_component = "loongarch",
LLVMInitializeLoongArchTargetInfo,
LLVMInitializeLoongArchTarget,
diff --git a/compiler/rustc_log/src/lib.rs b/compiler/rustc_log/src/lib.rs
index 3cbb2c21e..e7b80c641 100644
--- a/compiler/rustc_log/src/lib.rs
+++ b/compiler/rustc_log/src/lib.rs
@@ -123,7 +123,7 @@ where
return Ok(());
}
let backtrace = std::backtrace::Backtrace::capture();
- writeln!(writer, "stack backtrace: \n{:?}", backtrace)
+ writeln!(writer, "stack backtrace: \n{backtrace:?}")
}
}
diff --git a/compiler/rustc_macros/Cargo.toml b/compiler/rustc_macros/Cargo.toml
index 16c4a8500..17651ce95 100644
--- a/compiler/rustc_macros/Cargo.toml
+++ b/compiler/rustc_macros/Cargo.toml
@@ -8,7 +8,6 @@ proc-macro = true
[dependencies]
synstructure = "0.13.0"
-# FIXME(Nilstrieb): Updating this causes changes in the diagnostics output.
-syn = { version = "=2.0.8", features = ["full"] }
+syn = { version = "2.0.9", features = ["full"] }
proc-macro2 = "1"
quote = "1"
diff --git a/compiler/rustc_macros/src/diagnostics/diagnostic_builder.rs b/compiler/rustc_macros/src/diagnostics/diagnostic_builder.rs
index 2e6e84ad8..e9a5cd9de 100644
--- a/compiler/rustc_macros/src/diagnostics/diagnostic_builder.rs
+++ b/compiler/rustc_macros/src/diagnostics/diagnostic_builder.rs
@@ -203,14 +203,18 @@ impl<'a> DiagnosticDeriveVariantBuilder<'a> {
if first && (nested.input.is_empty() || nested.input.peek(Token![,])) {
self.slug.set_once(path.clone(), path.span().unwrap());
first = false;
- return Ok(())
+ return Ok(());
}
first = false;
let Ok(nested) = nested.value() else {
- span_err(nested.input.span().unwrap(), "diagnostic slug must be the first argument").emit();
- return Ok(())
+ span_err(
+ nested.input.span().unwrap(),
+ "diagnostic slug must be the first argument",
+ )
+ .emit();
+ return Ok(());
};
if path.is_ident("code") {
@@ -221,7 +225,9 @@ impl<'a> DiagnosticDeriveVariantBuilder<'a> {
#diag.code(rustc_errors::DiagnosticId::Error(#code.to_string()));
});
} else {
- span_err(path.span().unwrap(), "unknown argument").note("only the `code` parameter is valid after the slug").emit();
+ span_err(path.span().unwrap(), "unknown argument")
+ .note("only the `code` parameter is valid after the slug")
+ .emit();
// consume the buffer so we don't have syntax errors from syn
let _ = nested.parse::<TokenStream>();
diff --git a/compiler/rustc_macros/src/diagnostics/subdiagnostic.rs b/compiler/rustc_macros/src/diagnostics/subdiagnostic.rs
index e8dc98691..877e97450 100644
--- a/compiler/rustc_macros/src/diagnostics/subdiagnostic.rs
+++ b/compiler/rustc_macros/src/diagnostics/subdiagnostic.rs
@@ -188,7 +188,9 @@ impl<'parent, 'a> SubdiagnosticDeriveVariantBuilder<'parent, 'a> {
let mut kind_slugs = vec![];
for attr in self.variant.ast().attrs {
- let Some(SubdiagnosticVariant { kind, slug, no_span }) = SubdiagnosticVariant::from_attr(attr, self)? else {
+ let Some(SubdiagnosticVariant { kind, slug, no_span }) =
+ SubdiagnosticVariant::from_attr(attr, self)?
+ else {
// Some attributes aren't errors - like documentation comments - but also aren't
// subdiagnostics.
continue;
diff --git a/compiler/rustc_macros/src/lib.rs b/compiler/rustc_macros/src/lib.rs
index 904f8eb57..f4593d0fe 100644
--- a/compiler/rustc_macros/src/lib.rs
+++ b/compiler/rustc_macros/src/lib.rs
@@ -7,6 +7,7 @@
#![allow(rustc::default_hash_types)]
#![deny(rustc::untranslatable_diagnostic)]
#![deny(rustc::diagnostic_outside_of_impl)]
+#![cfg_attr(not(bootstrap), allow(internal_features))]
#![recursion_limit = "128"]
use synstructure::decl_derive;
diff --git a/compiler/rustc_macros/src/newtype.rs b/compiler/rustc_macros/src/newtype.rs
index 415a89b0f..72b47de1a 100644
--- a/compiler/rustc_macros/src/newtype.rs
+++ b/compiler/rustc_macros/src/newtype.rs
@@ -36,7 +36,8 @@ impl Parse for Newtype {
false
}
"max" => {
- let Meta::NameValue(MetaNameValue { value: Expr::Lit(lit), .. }) = &attr.meta else {
+ let Meta::NameValue(MetaNameValue { value: Expr::Lit(lit), .. }) = &attr.meta
+ else {
panic!("#[max = NUMBER] attribute requires max value");
};
@@ -47,7 +48,8 @@ impl Parse for Newtype {
false
}
"debug_format" => {
- let Meta::NameValue(MetaNameValue { value: Expr::Lit(lit), .. }) = &attr.meta else {
+ let Meta::NameValue(MetaNameValue { value: Expr::Lit(lit), .. }) = &attr.meta
+ else {
panic!("#[debug_format = FMT] attribute requires a format");
};
diff --git a/compiler/rustc_macros/src/serialize.rs b/compiler/rustc_macros/src/serialize.rs
index 8d017d149..ba75517d7 100644
--- a/compiler/rustc_macros/src/serialize.rs
+++ b/compiler/rustc_macros/src/serialize.rs
@@ -43,7 +43,7 @@ fn decodable_body(
let ty_name = s.ast().ident.to_string();
let decode_body = match s.variants() {
[] => {
- let message = format!("`{}` has no variants to decode", ty_name);
+ let message = format!("`{ty_name}` has no variants to decode");
quote! {
panic!(#message)
}
@@ -59,14 +59,14 @@ fn decodable_body(
})
.collect();
let message = format!(
- "invalid enum variant tag while decoding `{}`, expected 0..{}",
+ "invalid enum variant tag while decoding `{}`, expected 0..{}, actual {{}}",
ty_name,
variants.len()
);
quote! {
match ::rustc_serialize::Decoder::read_usize(__decoder) {
#match_inner
- _ => panic!(#message),
+ n => panic!(#message, n),
}
}
}
diff --git a/compiler/rustc_metadata/messages.ftl b/compiler/rustc_metadata/messages.ftl
index 13b3dac85..cc58d51be 100644
--- a/compiler/rustc_metadata/messages.ftl
+++ b/compiler/rustc_metadata/messages.ftl
@@ -25,6 +25,9 @@ metadata_conflicting_alloc_error_handler =
metadata_conflicting_global_alloc =
the `#[global_allocator]` in {$other_crate_name} conflicts with global allocator in: {$crate_name}
+metadata_consider_adding_std =
+ consider adding the standard library to the sysroot with `x build library --target {$locator_triple}`
+
metadata_consider_building_std =
consider building the standard library from source with `cargo build -Zbuild-std`
diff --git a/compiler/rustc_metadata/src/creader.rs b/compiler/rustc_metadata/src/creader.rs
index b3976d756..fce80ab37 100644
--- a/compiler/rustc_metadata/src/creader.rs
+++ b/compiler/rustc_metadata/src/creader.rs
@@ -15,12 +15,12 @@ use rustc_hir::definitions::Definitions;
use rustc_index::IndexVec;
use rustc_middle::ty::TyCtxt;
use rustc_session::config::{self, CrateType, ExternLocation};
-use rustc_session::cstore::ExternCrateSource;
-use rustc_session::cstore::{CrateDepKind, CrateSource, ExternCrate};
+use rustc_session::cstore::{
+ CrateDepKind, CrateSource, ExternCrate, ExternCrateSource, MetadataLoaderDyn,
+};
use rustc_session::lint;
use rustc_session::output::validate_crate_name;
use rustc_session::search_paths::PathKind;
-use rustc_session::Session;
use rustc_span::edition::Edition;
use rustc_span::symbol::{sym, Symbol};
use rustc_span::{Span, DUMMY_SP};
@@ -34,6 +34,8 @@ use std::time::Duration;
use std::{cmp, env, iter};
pub struct CStore {
+ metadata_loader: Box<MetadataLoaderDyn>,
+
metas: IndexVec<CrateNum, Option<Box<CrateMetadata>>>,
injected_panic_runtime: Option<CrateNum>,
/// This crate needs an allocator and either provides it itself, or finds it in a dependency.
@@ -262,10 +264,14 @@ impl CStore {
}
}
- pub fn new(sess: &Session) -> CStore {
+ pub fn new(
+ metadata_loader: Box<MetadataLoaderDyn>,
+ local_stable_crate_id: StableCrateId,
+ ) -> CStore {
let mut stable_crate_ids = StableCrateIdMap::default();
- stable_crate_ids.insert(sess.local_stable_crate_id(), LOCAL_CRATE);
+ stable_crate_ids.insert(local_stable_crate_id, LOCAL_CRATE);
CStore {
+ metadata_loader,
// We add an empty entry for LOCAL_CRATE (which maps to zero) in
// order to make array indices in `metas` match with the
// corresponding `CrateNum`. This first entry will always remain
@@ -539,11 +545,13 @@ impl<'a, 'tcx> CrateLoader<'a, 'tcx> {
(LoadResult::Previous(cnum), None)
} else {
info!("falling back to a load");
- let metadata_loader = self.tcx.metadata_loader(()).borrow();
let mut locator = CrateLocator::new(
self.sess,
- &**metadata_loader,
+ &*self.cstore.metadata_loader,
name,
+ // The all loop is because `--crate-type=rlib --crate-type=rlib` is
+ // legal and produces both inside this type.
+ self.tcx.crate_types().iter().all(|c| *c == CrateType::Rlib),
hash,
extra_filename,
false, // is_host
@@ -687,7 +695,7 @@ impl<'a, 'tcx> CrateLoader<'a, 'tcx> {
fn inject_panic_runtime(&mut self, krate: &ast::Crate) {
// If we're only compiling an rlib, then there's no need to select a
// panic runtime, so we just skip this section entirely.
- let any_non_rlib = self.sess.crate_types().iter().any(|ct| *ct != CrateType::Rlib);
+ let any_non_rlib = self.tcx.crate_types().iter().any(|ct| *ct != CrateType::Rlib);
if !any_non_rlib {
info!("panic runtime injection skipped, only generating rlib");
return;
@@ -741,7 +749,9 @@ impl<'a, 'tcx> CrateLoader<'a, 'tcx> {
};
info!("panic runtime not found -- loading {}", name);
- let Some(cnum) = self.resolve_crate(name, DUMMY_SP, CrateDepKind::Implicit) else { return; };
+ let Some(cnum) = self.resolve_crate(name, DUMMY_SP, CrateDepKind::Implicit) else {
+ return;
+ };
let data = self.cstore.get_crate_data(cnum);
// Sanity check the loaded crate to ensure it is indeed a panic runtime
@@ -774,7 +784,9 @@ impl<'a, 'tcx> CrateLoader<'a, 'tcx> {
self.sess.emit_err(errors::ProfilerBuiltinsNeedsCore);
}
- let Some(cnum) = self.resolve_crate(name, DUMMY_SP, CrateDepKind::Implicit) else { return; };
+ let Some(cnum) = self.resolve_crate(name, DUMMY_SP, CrateDepKind::Implicit) else {
+ return;
+ };
let data = self.cstore.get_crate_data(cnum);
// Sanity check the loaded crate to ensure it is indeed a profiler runtime
@@ -812,7 +824,7 @@ impl<'a, 'tcx> CrateLoader<'a, 'tcx> {
// At this point we've determined that we need an allocator. Let's see
// if our compilation session actually needs an allocator based on what
// we're emitting.
- let all_rlib = self.sess.crate_types().iter().all(|ct| matches!(*ct, CrateType::Rlib));
+ let all_rlib = self.tcx.crate_types().iter().all(|ct| matches!(*ct, CrateType::Rlib));
if all_rlib {
return;
}
diff --git a/compiler/rustc_metadata/src/dependency_format.rs b/compiler/rustc_metadata/src/dependency_format.rs
index 72b208a71..783d35ac7 100644
--- a/compiler/rustc_metadata/src/dependency_format.rs
+++ b/compiler/rustc_metadata/src/dependency_format.rs
@@ -66,8 +66,7 @@ use rustc_session::cstore::CrateDepKind;
use rustc_session::cstore::LinkagePreference::{self, RequireDynamic, RequireStatic};
pub(crate) fn calculate(tcx: TyCtxt<'_>) -> Dependencies {
- tcx.sess
- .crate_types()
+ tcx.crate_types()
.iter()
.map(|&ty| {
let linkage = calculate_type(tcx, ty);
diff --git a/compiler/rustc_metadata/src/errors.rs b/compiler/rustc_metadata/src/errors.rs
index fca06c0f4..91220629f 100644
--- a/compiler/rustc_metadata/src/errors.rs
+++ b/compiler/rustc_metadata/src/errors.rs
@@ -623,6 +623,7 @@ pub struct CannotFindCrate {
pub is_nightly_build: bool,
pub profiler_runtime: Symbol,
pub locator_triple: TargetTriple,
+ pub is_ui_testing: bool,
}
impl IntoDiagnostic<'_> for CannotFindCrate {
@@ -646,12 +647,19 @@ impl IntoDiagnostic<'_> for CannotFindCrate {
} else {
diag.note(fluent::metadata_target_no_std_support);
}
- // NOTE: this suggests using rustup, even though the user may not have it installed.
- // That's because they could choose to install it; or this may give them a hint which
- // target they need to install from their distro.
+
if self.missing_core {
- diag.help(fluent::metadata_consider_downloading_target);
+ if env!("CFG_RELEASE_CHANNEL") == "dev" && !self.is_ui_testing {
+ // Note: Emits the nicer suggestion only for the dev channel.
+ diag.help(fluent::metadata_consider_adding_std);
+ } else {
+ // NOTE: this suggests using rustup, even though the user may not have it installed.
+ // That's because they could choose to install it; or this may give them a hint which
+ // target they need to install from their distro.
+ diag.help(fluent::metadata_consider_downloading_target);
+ }
}
+
// Suggest using #![no_std]. #[no_core] is unstable and not really supported anyway.
// NOTE: this is a dummy span if `extern crate std` was injected by the compiler.
// If it's not a dummy, that means someone added `extern crate std` explicitly and
diff --git a/compiler/rustc_metadata/src/foreign_modules.rs b/compiler/rustc_metadata/src/foreign_modules.rs
index d1c2f3104..154eb684f 100644
--- a/compiler/rustc_metadata/src/foreign_modules.rs
+++ b/compiler/rustc_metadata/src/foreign_modules.rs
@@ -1,19 +1,28 @@
+use rustc_data_structures::fx::FxIndexMap;
use rustc_hir as hir;
use rustc_hir::def::DefKind;
+use rustc_hir::def_id::DefId;
+use rustc_middle::query::LocalCrate;
use rustc_middle::ty::TyCtxt;
use rustc_session::cstore::ForeignModule;
-pub(crate) fn collect(tcx: TyCtxt<'_>) -> Vec<ForeignModule> {
- let mut modules = Vec::new();
+pub(crate) fn collect(tcx: TyCtxt<'_>, LocalCrate: LocalCrate) -> FxIndexMap<DefId, ForeignModule> {
+ let mut modules = FxIndexMap::default();
+
+ // We need to collect all the `ForeignMod`, even if they are empty.
for id in tcx.hir().items() {
if !matches!(tcx.def_kind(id.owner_id), DefKind::ForeignMod) {
continue;
}
+
+ let def_id = id.owner_id.to_def_id();
let item = tcx.hir().item(id);
- if let hir::ItemKind::ForeignMod { items, .. } = item.kind {
+
+ if let hir::ItemKind::ForeignMod { abi, items } = item.kind {
let foreign_items = items.iter().map(|it| it.id.owner_id.to_def_id()).collect();
- modules.push(ForeignModule { foreign_items, def_id: id.owner_id.to_def_id() });
+ modules.insert(def_id, ForeignModule { def_id, abi, foreign_items });
}
}
+
modules
}
diff --git a/compiler/rustc_metadata/src/fs.rs b/compiler/rustc_metadata/src/fs.rs
index 238f963ed..2a9662b80 100644
--- a/compiler/rustc_metadata/src/fs.rs
+++ b/compiler/rustc_metadata/src/fs.rs
@@ -56,7 +56,7 @@ pub fn encode_and_write_metadata(tcx: TyCtxt<'_>) -> (EncodedMetadata, bool) {
// Always create a file at `metadata_filename`, even if we have nothing to write to it.
// This simplifies the creation of the output `out_filename` when requested.
- let metadata_kind = tcx.sess.metadata_kind();
+ let metadata_kind = tcx.metadata_kind();
match metadata_kind {
MetadataKind::None => {
std::fs::File::create(&metadata_filename).unwrap_or_else(|err| {
@@ -104,8 +104,8 @@ pub fn encode_and_write_metadata(tcx: TyCtxt<'_>) -> (EncodedMetadata, bool) {
};
// Load metadata back to memory: codegen may need to include it in object files.
- let metadata = EncodedMetadata::from_path(metadata_filename.clone(), metadata_tmpdir)
- .unwrap_or_else(|err| {
+ let metadata =
+ EncodedMetadata::from_path(metadata_filename, metadata_tmpdir).unwrap_or_else(|err| {
tcx.sess.emit_fatal(FailedCreateEncodedMetadata { err });
});
diff --git a/compiler/rustc_metadata/src/locator.rs b/compiler/rustc_metadata/src/locator.rs
index a89d7b464..bf6004ba8 100644
--- a/compiler/rustc_metadata/src/locator.rs
+++ b/compiler/rustc_metadata/src/locator.rs
@@ -222,7 +222,7 @@ use rustc_data_structures::owned_slice::slice_owned;
use rustc_data_structures::svh::Svh;
use rustc_errors::{DiagnosticArgValue, FatalError, IntoDiagnosticArg};
use rustc_fs_util::try_canonicalize;
-use rustc_session::config::{self, CrateType};
+use rustc_session::config;
use rustc_session::cstore::{CrateSource, MetadataLoader};
use rustc_session::filesearch::FileSearch;
use rustc_session::search_paths::PathKind;
@@ -305,14 +305,12 @@ impl<'a> CrateLocator<'a> {
sess: &'a Session,
metadata_loader: &'a dyn MetadataLoader,
crate_name: Symbol,
+ is_rlib: bool,
hash: Option<Svh>,
extra_filename: Option<&'a str>,
is_host: bool,
path_kind: PathKind,
) -> CrateLocator<'a> {
- // The all loop is because `--crate-type=rlib --crate-type=rlib` is
- // legal and produces both inside this type.
- let is_rlib = sess.crate_types().iter().all(|c| *c == CrateType::Rlib);
let needs_object_code = sess.opts.output_types.should_codegen();
// If we're producing an rlib, then we don't need object code.
// Or, if we're not producing object code, then we don't need it either
@@ -511,7 +509,7 @@ impl<'a> CrateLocator<'a> {
rlib: self.extract_one(rlibs, CrateFlavor::Rlib, &mut slot)?,
dylib: self.extract_one(dylibs, CrateFlavor::Dylib, &mut slot)?,
};
- Ok(slot.map(|(svh, metadata)| (svh, Library { source, metadata })))
+ Ok(slot.map(|(svh, metadata, _)| (svh, Library { source, metadata })))
}
fn needs_crate_flavor(&self, flavor: CrateFlavor) -> bool {
@@ -535,11 +533,13 @@ impl<'a> CrateLocator<'a> {
// read the metadata from it if `*slot` is `None`. If the metadata couldn't
// be read, it is assumed that the file isn't a valid rust library (no
// errors are emitted).
+ //
+ // The `PathBuf` in `slot` will only be used for diagnostic purposes.
fn extract_one(
&mut self,
m: FxHashMap<PathBuf, PathKind>,
flavor: CrateFlavor,
- slot: &mut Option<(Svh, MetadataBlob)>,
+ slot: &mut Option<(Svh, MetadataBlob, PathBuf)>,
) -> Result<Option<(PathBuf, PathKind)>, CrateError> {
// If we are producing an rlib, and we've already loaded metadata, then
// we should not attempt to discover further crate sources (unless we're
@@ -550,16 +550,9 @@ impl<'a> CrateLocator<'a> {
//
// See also #68149 which provides more detail on why emitting the
// dependency on the rlib is a bad thing.
- //
- // We currently do not verify that these other sources are even in sync,
- // and this is arguably a bug (see #10786), but because reading metadata
- // is quite slow (especially from dylibs) we currently do not read it
- // from the other crate sources.
if slot.is_some() {
if m.is_empty() || !self.needs_crate_flavor(flavor) {
return Ok(None);
- } else if m.len() == 1 {
- return Ok(Some(m.into_iter().next().unwrap()));
}
}
@@ -610,8 +603,7 @@ impl<'a> CrateLocator<'a> {
candidates,
));
}
- err_data = Some(vec![ret.as_ref().unwrap().0.clone()]);
- *slot = None;
+ err_data = Some(vec![slot.take().unwrap().2]);
}
if let Some(candidates) = &mut err_data {
candidates.push(lib);
@@ -644,7 +636,7 @@ impl<'a> CrateLocator<'a> {
continue;
}
}
- *slot = Some((hash, metadata));
+ *slot = Some((hash, metadata, lib.clone()));
ret = Some((lib, kind));
}
@@ -804,25 +796,36 @@ fn get_metadata_section<'p>(
}
// Length of the compressed stream - this allows linkers to pad the section if they want
- let Ok(len_bytes) = <[u8; 4]>::try_from(&buf[header_len..cmp::min(data_start, buf.len())]) else {
- return Err(MetadataError::LoadFailure("invalid metadata length found".to_string()));
+ let Ok(len_bytes) =
+ <[u8; 4]>::try_from(&buf[header_len..cmp::min(data_start, buf.len())])
+ else {
+ return Err(MetadataError::LoadFailure(
+ "invalid metadata length found".to_string(),
+ ));
};
let compressed_len = u32::from_be_bytes(len_bytes) as usize;
// Header is okay -> inflate the actual metadata
- let compressed_bytes = &buf[data_start..(data_start + compressed_len)];
- debug!("inflating {} bytes of compressed metadata", compressed_bytes.len());
- // Assume the decompressed data will be at least the size of the compressed data, so we
- // don't have to grow the buffer as much.
- let mut inflated = Vec::with_capacity(compressed_bytes.len());
- FrameDecoder::new(compressed_bytes).read_to_end(&mut inflated).map_err(|_| {
- MetadataError::LoadFailure(format!(
- "failed to decompress metadata: {}",
- filename.display()
- ))
- })?;
+ let compressed_bytes = buf.slice(|buf| &buf[data_start..(data_start + compressed_len)]);
+ if &compressed_bytes[..cmp::min(METADATA_HEADER.len(), compressed_bytes.len())]
+ == METADATA_HEADER
+ {
+ // The metadata was not actually compressed.
+ compressed_bytes
+ } else {
+ debug!("inflating {} bytes of compressed metadata", compressed_bytes.len());
+ // Assume the decompressed data will be at least the size of the compressed data, so we
+ // don't have to grow the buffer as much.
+ let mut inflated = Vec::with_capacity(compressed_bytes.len());
+ FrameDecoder::new(&*compressed_bytes).read_to_end(&mut inflated).map_err(|_| {
+ MetadataError::LoadFailure(format!(
+ "failed to decompress metadata: {}",
+ filename.display()
+ ))
+ })?;
- slice_owned(inflated, Deref::deref)
+ slice_owned(inflated, Deref::deref)
+ }
}
CrateFlavor::Rmeta => {
// mmap the file, because only a small fraction of it is read.
@@ -878,9 +881,10 @@ fn find_plugin_registrar_impl<'a>(
sess,
metadata_loader,
name,
- None, // hash
- None, // extra_filename
- true, // is_host
+ false, // is_rlib
+ None, // hash
+ None, // extra_filename
+ true, // is_host
PathKind::Crate,
);
@@ -903,7 +907,7 @@ pub fn list_file_metadata(
let flavor = get_flavor_from_path(path);
match get_metadata_section(target, flavor, path, metadata_loader) {
Ok(metadata) => metadata.list_crate_metadata(out),
- Err(msg) => write!(out, "{}\n", msg),
+ Err(msg) => write!(out, "{msg}\n"),
}
}
@@ -1125,6 +1129,7 @@ impl CrateError {
is_nightly_build: sess.is_nightly_build(),
profiler_runtime: Symbol::intern(&sess.opts.unstable_opts.profiler_runtime),
locator_triple: locator.triple,
+ is_ui_testing: sess.opts.unstable_opts.ui_testing,
});
}
}
@@ -1141,6 +1146,7 @@ impl CrateError {
is_nightly_build: sess.is_nightly_build(),
profiler_runtime: Symbol::intern(&sess.opts.unstable_opts.profiler_runtime),
locator_triple: sess.opts.target_triple.clone(),
+ is_ui_testing: sess.opts.unstable_opts.ui_testing,
});
}
}
diff --git a/compiler/rustc_metadata/src/native_libs.rs b/compiler/rustc_metadata/src/native_libs.rs
index 0dd7b1197..098c411c8 100644
--- a/compiler/rustc_metadata/src/native_libs.rs
+++ b/compiler/rustc_metadata/src/native_libs.rs
@@ -1,15 +1,17 @@
use rustc_ast::{NestedMetaItem, CRATE_NODE_ID};
use rustc_attr as attr;
use rustc_data_structures::fx::FxHashSet;
-use rustc_hir as hir;
-use rustc_hir::def::DefKind;
+use rustc_middle::query::LocalCrate;
use rustc_middle::ty::{List, ParamEnv, ParamEnvAnd, Ty, TyCtxt};
use rustc_session::config::CrateType;
-use rustc_session::cstore::{DllCallingConvention, DllImport, NativeLib, PeImportNameType};
+use rustc_session::cstore::{
+ DllCallingConvention, DllImport, ForeignModule, NativeLib, PeImportNameType,
+};
use rustc_session::parse::feature_err;
use rustc_session::search_paths::PathKind;
use rustc_session::utils::NativeLibKind;
use rustc_session::Session;
+use rustc_span::def_id::{DefId, LOCAL_CRATE};
use rustc_span::symbol::{sym, Symbol};
use rustc_target::spec::abi::Abi;
@@ -50,10 +52,11 @@ fn find_bundled_library(
verbatim: Option<bool>,
kind: NativeLibKind,
has_cfg: bool,
- sess: &Session,
+ tcx: TyCtxt<'_>,
) -> Option<Symbol> {
+ let sess = tcx.sess;
if let NativeLibKind::Static { bundle: Some(true) | None, whole_archive } = kind
- && sess.crate_types().iter().any(|t| matches!(t, &CrateType::Rlib | CrateType::Staticlib))
+ && tcx.crate_types().iter().any(|t| matches!(t, &CrateType::Rlib | CrateType::Staticlib))
&& (sess.opts.unstable_opts.packed_bundled_libs || has_cfg || whole_archive == Some(true))
{
let verbatim = verbatim.unwrap_or(false);
@@ -66,10 +69,12 @@ fn find_bundled_library(
None
}
-pub(crate) fn collect(tcx: TyCtxt<'_>) -> Vec<NativeLib> {
+pub(crate) fn collect(tcx: TyCtxt<'_>, LocalCrate: LocalCrate) -> Vec<NativeLib> {
let mut collector = Collector { tcx, libs: Vec::new() };
- for id in tcx.hir().items() {
- collector.process_item(id);
+ if tcx.sess.opts.unstable_opts.link_directives {
+ for module in tcx.foreign_modules(LOCAL_CRATE).values() {
+ collector.process_module(module);
+ }
}
collector.process_command_line();
collector.libs
@@ -88,29 +93,20 @@ struct Collector<'tcx> {
}
impl<'tcx> Collector<'tcx> {
- fn process_item(&mut self, id: rustc_hir::ItemId) {
- if !matches!(self.tcx.def_kind(id.owner_id), DefKind::ForeignMod) {
- return;
- }
+ fn process_module(&mut self, module: &ForeignModule) {
+ let ForeignModule { def_id, abi, ref foreign_items } = *module;
+ let def_id = def_id.expect_local();
- let it = self.tcx.hir().item(id);
- let hir::ItemKind::ForeignMod { abi, items: foreign_mod_items } = it.kind else {
- return;
- };
+ let sess = self.tcx.sess;
if matches!(abi, Abi::Rust | Abi::RustIntrinsic | Abi::PlatformIntrinsic) {
return;
}
// Process all of the #[link(..)]-style arguments
- let sess = self.tcx.sess;
let features = self.tcx.features();
- if !sess.opts.unstable_opts.link_directives {
- return;
- }
-
- for m in self.tcx.hir().attrs(it.hir_id()).iter().filter(|a| a.has_name(sym::link)) {
+ for m in self.tcx.get_attrs(def_id, sym::link) {
let Some(items) = m.meta_item_list() else {
continue;
};
@@ -340,9 +336,9 @@ impl<'tcx> Collector<'tcx> {
if name.as_str().contains('\0') {
sess.emit_err(errors::RawDylibNoNul { span: name_span });
}
- foreign_mod_items
+ foreign_items
.iter()
- .map(|child_item| {
+ .map(|&child_item| {
self.build_dll_import(
abi,
import_name_type.map(|(import_name_type, _)| import_name_type),
@@ -352,21 +348,12 @@ impl<'tcx> Collector<'tcx> {
.collect()
}
_ => {
- for child_item in foreign_mod_items {
- if self.tcx.def_kind(child_item.id.owner_id).has_codegen_attrs()
- && self
- .tcx
- .codegen_fn_attrs(child_item.id.owner_id)
- .link_ordinal
- .is_some()
+ for &child_item in foreign_items {
+ if self.tcx.def_kind(child_item).has_codegen_attrs()
+ && self.tcx.codegen_fn_attrs(child_item).link_ordinal.is_some()
{
- let link_ordinal_attr = self
- .tcx
- .hir()
- .attrs(child_item.id.owner_id.into())
- .iter()
- .find(|a| a.has_name(sym::link_ordinal))
- .unwrap();
+ let link_ordinal_attr =
+ self.tcx.get_attr(child_item, sym::link_ordinal).unwrap();
sess.emit_err(errors::LinkOrdinalRawDylib {
span: link_ordinal_attr.span,
});
@@ -378,13 +365,13 @@ impl<'tcx> Collector<'tcx> {
};
let kind = kind.unwrap_or(NativeLibKind::Unspecified);
- let filename = find_bundled_library(name, verbatim, kind, cfg.is_some(), sess);
+ let filename = find_bundled_library(name, verbatim, kind, cfg.is_some(), self.tcx);
self.libs.push(NativeLib {
name,
filename,
kind,
cfg,
- foreign_module: Some(it.owner_id.to_def_id()),
+ foreign_module: Some(def_id.to_def_id()),
verbatim,
dll_imports,
});
@@ -456,9 +443,13 @@ impl<'tcx> Collector<'tcx> {
// Add if not found
let new_name: Option<&str> = passed_lib.new_name.as_deref();
let name = Symbol::intern(new_name.unwrap_or(&passed_lib.name));
- let sess = self.tcx.sess;
- let filename =
- find_bundled_library(name, passed_lib.verbatim, passed_lib.kind, false, sess);
+ let filename = find_bundled_library(
+ name,
+ passed_lib.verbatim,
+ passed_lib.kind,
+ false,
+ self.tcx,
+ );
self.libs.push(NativeLib {
name,
filename,
@@ -476,11 +467,11 @@ impl<'tcx> Collector<'tcx> {
}
}
- fn i686_arg_list_size(&self, item: &hir::ForeignItemRef) -> usize {
+ fn i686_arg_list_size(&self, item: DefId) -> usize {
let argument_types: &List<Ty<'_>> = self.tcx.erase_late_bound_regions(
self.tcx
- .type_of(item.id.owner_id)
- .subst_identity()
+ .type_of(item)
+ .instantiate_identity()
.fn_sig(self.tcx)
.inputs()
.map_bound(|slice| self.tcx.mk_type_list(slice)),
@@ -505,8 +496,10 @@ impl<'tcx> Collector<'tcx> {
&self,
abi: Abi,
import_name_type: Option<PeImportNameType>,
- item: &hir::ForeignItemRef,
+ item: DefId,
) -> DllImport {
+ let span = self.tcx.def_span(item);
+
let calling_convention = if self.tcx.sess.target.arch == "x86" {
match abi {
Abi::C { .. } | Abi::Cdecl { .. } => DllCallingConvention::C,
@@ -520,29 +513,29 @@ impl<'tcx> Collector<'tcx> {
DllCallingConvention::Vectorcall(self.i686_arg_list_size(item))
}
_ => {
- self.tcx.sess.emit_fatal(errors::UnsupportedAbiI686 { span: item.span });
+ self.tcx.sess.emit_fatal(errors::UnsupportedAbiI686 { span });
}
}
} else {
match abi {
Abi::C { .. } | Abi::Win64 { .. } | Abi::System { .. } => DllCallingConvention::C,
_ => {
- self.tcx.sess.emit_fatal(errors::UnsupportedAbi { span: item.span });
+ self.tcx.sess.emit_fatal(errors::UnsupportedAbi { span });
}
}
};
- let codegen_fn_attrs = self.tcx.codegen_fn_attrs(item.id.owner_id);
+ let codegen_fn_attrs = self.tcx.codegen_fn_attrs(item);
let import_name_type = codegen_fn_attrs
.link_ordinal
.map_or(import_name_type, |ord| Some(PeImportNameType::Ordinal(ord)));
DllImport {
- name: codegen_fn_attrs.link_name.unwrap_or(item.ident.name),
+ name: codegen_fn_attrs.link_name.unwrap_or(self.tcx.item_name(item)),
import_name_type,
calling_convention,
- span: item.span,
- is_fn: self.tcx.def_kind(item.id.owner_id).is_fn_like(),
+ span,
+ is_fn: self.tcx.def_kind(item).is_fn_like(),
}
}
}
diff --git a/compiler/rustc_metadata/src/rmeta/decoder.rs b/compiler/rustc_metadata/src/rmeta/decoder.rs
index b9318aee5..e8f66c36a 100644
--- a/compiler/rustc_metadata/src/rmeta/decoder.rs
+++ b/compiler/rustc_metadata/src/rmeta/decoder.rs
@@ -34,7 +34,7 @@ use rustc_session::cstore::{
use rustc_session::Session;
use rustc_span::hygiene::ExpnIndex;
use rustc_span::symbol::{kw, Ident, Symbol};
-use rustc_span::{self, BytePos, ExpnId, Pos, Span, SyntaxContext, DUMMY_SP};
+use rustc_span::{self, BytePos, ExpnId, Pos, Span, SpanData, SyntaxContext, DUMMY_SP};
use proc_macro::bridge::client::ProcMacro;
use std::iter::TrustedLen;
@@ -311,8 +311,10 @@ impl<'a, 'tcx> DecodeContext<'a, 'tcx> {
#[inline]
fn tcx(&self) -> TyCtxt<'tcx> {
let Some(tcx) = self.tcx else {
- bug!("No TyCtxt found for decoding. \
- You need to explicitly pass `(crate_metadata_ref, tcx)` to `decode` instead of just `crate_metadata_ref`.");
+ bug!(
+ "No TyCtxt found for decoding. \
+ You need to explicitly pass `(crate_metadata_ref, tcx)` to `decode` instead of just `crate_metadata_ref`."
+ );
};
tcx
}
@@ -448,8 +450,10 @@ impl<'a, 'tcx> Decodable<DecodeContext<'a, 'tcx>> for SyntaxContext {
let cdata = decoder.cdata();
let Some(sess) = decoder.sess else {
- bug!("Cannot decode SyntaxContext without Session.\
- You need to explicitly pass `(crate_metadata_ref, tcx)` to `decode` instead of just `crate_metadata_ref`.");
+ bug!(
+ "Cannot decode SyntaxContext without Session.\
+ You need to explicitly pass `(crate_metadata_ref, tcx)` to `decode` instead of just `crate_metadata_ref`."
+ );
};
let cname = cdata.root.name();
@@ -470,8 +474,10 @@ impl<'a, 'tcx> Decodable<DecodeContext<'a, 'tcx>> for ExpnId {
let local_cdata = decoder.cdata();
let Some(sess) = decoder.sess else {
- bug!("Cannot decode ExpnId without Session. \
- You need to explicitly pass `(crate_metadata_ref, tcx)` to `decode` instead of just `crate_metadata_ref`.");
+ bug!(
+ "Cannot decode ExpnId without Session. \
+ You need to explicitly pass `(crate_metadata_ref, tcx)` to `decode` instead of just `crate_metadata_ref`."
+ );
};
let cnum = CrateNum::decode(decoder);
@@ -507,11 +513,26 @@ impl<'a, 'tcx> Decodable<DecodeContext<'a, 'tcx>> for ExpnId {
impl<'a, 'tcx> Decodable<DecodeContext<'a, 'tcx>> for Span {
fn decode(decoder: &mut DecodeContext<'a, 'tcx>) -> Span {
+ let mode = SpanEncodingMode::decode(decoder);
+ let data = match mode {
+ SpanEncodingMode::Direct => SpanData::decode(decoder),
+ SpanEncodingMode::Shorthand(position) => decoder.with_position(position, |decoder| {
+ let mode = SpanEncodingMode::decode(decoder);
+ debug_assert!(matches!(mode, SpanEncodingMode::Direct));
+ SpanData::decode(decoder)
+ }),
+ };
+ Span::new(data.lo, data.hi, data.ctxt, data.parent)
+ }
+}
+
+impl<'a, 'tcx> Decodable<DecodeContext<'a, 'tcx>> for SpanData {
+ fn decode(decoder: &mut DecodeContext<'a, 'tcx>) -> SpanData {
let ctxt = SyntaxContext::decode(decoder);
let tag = u8::decode(decoder);
if tag == TAG_PARTIAL_SPAN {
- return DUMMY_SP.with_ctxt(ctxt);
+ return DUMMY_SP.with_ctxt(ctxt).data();
}
debug_assert!(tag == TAG_VALID_SPAN_LOCAL || tag == TAG_VALID_SPAN_FOREIGN);
@@ -521,8 +542,10 @@ impl<'a, 'tcx> Decodable<DecodeContext<'a, 'tcx>> for Span {
let hi = lo + len;
let Some(sess) = decoder.sess else {
- bug!("Cannot decode Span without Session. \
- You need to explicitly pass `(crate_metadata_ref, tcx)` to `decode` instead of just `crate_metadata_ref`.")
+ bug!(
+ "Cannot decode Span without Session. \
+ You need to explicitly pass `(crate_metadata_ref, tcx)` to `decode` instead of just `crate_metadata_ref`."
+ )
};
// Index of the file in the corresponding crate's list of encoded files.
@@ -604,7 +627,7 @@ impl<'a, 'tcx> Decodable<DecodeContext<'a, 'tcx>> for Span {
let hi = hi + source_file.translated_source_file.start_pos;
// Do not try to decode parent for foreign spans.
- Span::new(lo, hi, ctxt, None)
+ SpanData { lo, hi, ctxt, parent: None }
}
}
@@ -819,7 +842,7 @@ impl<'a, 'tcx> CrateMetadataRef<'a> {
.decode((self, sess))
}
- fn load_proc_macro(self, id: DefIndex, sess: &Session) -> SyntaxExtension {
+ fn load_proc_macro(self, id: DefIndex, tcx: TyCtxt<'tcx>) -> SyntaxExtension {
let (name, kind, helper_attrs) = match *self.raw_proc_macro(id) {
ProcMacro::CustomDerive { trait_name, attributes, client } => {
let helper_attrs =
@@ -838,9 +861,11 @@ impl<'a, 'tcx> CrateMetadataRef<'a> {
}
};
+ let sess = tcx.sess;
let attrs: Vec<_> = self.get_item_attrs(id, sess).collect();
SyntaxExtension::new(
sess,
+ tcx.features(),
kind,
self.get_span(id, sess),
helper_attrs,
diff --git a/compiler/rustc_metadata/src/rmeta/decoder/cstore_impl.rs b/compiler/rustc_metadata/src/rmeta/decoder/cstore_impl.rs
index 848535fb3..aeda8af6d 100644
--- a/compiler/rustc_metadata/src/rmeta/decoder/cstore_impl.rs
+++ b/compiler/rustc_metadata/src/rmeta/decoder/cstore_impl.rs
@@ -6,6 +6,7 @@ use crate::rmeta::AttrFlags;
use rustc_ast as ast;
use rustc_attr::Deprecation;
+use rustc_data_structures::sync::Lrc;
use rustc_hir::def::{CtorKind, DefKind, Res};
use rustc_hir::def_id::{CrateNum, DefId, DefIdMap, LOCAL_CRATE};
use rustc_hir::definitions::{DefKey, DefPath, DefPathHash};
@@ -23,7 +24,6 @@ use rustc_span::hygiene::{ExpnHash, ExpnId};
use rustc_span::symbol::{kw, Symbol};
use rustc_span::Span;
-use rustc_data_structures::sync::Lrc;
use std::any::Any;
use super::{Decodable, DecodeContext, DecodeIterator};
@@ -246,6 +246,7 @@ provide! { tcx, def_id, other, cdata,
debug_assert_eq!(tcx.def_kind(def_id), DefKind::OpaqueTy);
cdata.root.tables.is_type_alias_impl_trait.get(cdata, def_id.index)
}
+ assumed_wf_types_for_rpitit => { table }
collect_return_position_impl_trait_in_trait_tys => {
Ok(cdata
.root
@@ -403,10 +404,8 @@ pub(in crate::rmeta) fn provide(providers: &mut Providers) {
.contains(&id)
})
},
- native_libraries: |tcx, LocalCrate| native_libs::collect(tcx),
- foreign_modules: |tcx, LocalCrate| {
- foreign_modules::collect(tcx).into_iter().map(|m| (m.def_id, m)).collect()
- },
+ native_libraries: native_libs::collect,
+ foreign_modules: foreign_modules::collect,
// Returns a map from a sufficiently visible external item (i.e., an
// external item that is visible from at least one local module) to a
@@ -523,12 +522,13 @@ impl CStore {
self.get_crate_data(def.krate).get_ctor(def.index)
}
- pub fn load_macro_untracked(&self, id: DefId, sess: &Session) -> LoadedMacro {
+ pub fn load_macro_untracked(&self, id: DefId, tcx: TyCtxt<'_>) -> LoadedMacro {
+ let sess = tcx.sess;
let _prof_timer = sess.prof.generic_activity("metadata_load_macro");
let data = self.get_crate_data(id.krate);
if data.root.is_proc_macro_crate() {
- return LoadedMacro::ProcMacro(data.load_proc_macro(id.index, sess));
+ return LoadedMacro::ProcMacro(data.load_proc_macro(id.index, tcx));
}
let span = data.get_span(id.index, sess);
diff --git a/compiler/rustc_metadata/src/rmeta/encoder.rs b/compiler/rustc_metadata/src/rmeta/encoder.rs
index 541c19c35..be91ad408 100644
--- a/compiler/rustc_metadata/src/rmeta/encoder.rs
+++ b/compiler/rustc_metadata/src/rmeta/encoder.rs
@@ -30,6 +30,7 @@ use rustc_middle::query::Providers;
use rustc_middle::traits::specialization_graph;
use rustc_middle::ty::codec::TyEncoder;
use rustc_middle::ty::fast_reject::{self, SimplifiedType, TreatParams};
+use rustc_middle::ty::TypeVisitableExt;
use rustc_middle::ty::{self, AssocItemContainer, SymbolName, Ty, TyCtxt};
use rustc_middle::util::common::to_readable_str;
use rustc_serialize::{opaque, Decodable, Decoder, Encodable, Encoder};
@@ -37,7 +38,7 @@ use rustc_session::config::{CrateType, OptLevel};
use rustc_session::cstore::{ForeignModule, LinkagePreference, NativeLib};
use rustc_span::hygiene::{ExpnIndex, HygieneEncodeContext, MacroKind};
use rustc_span::symbol::{sym, Symbol};
-use rustc_span::{self, ExternalSource, FileName, SourceFile, Span, SyntaxContext};
+use rustc_span::{self, ExternalSource, FileName, SourceFile, Span, SpanData, SyntaxContext};
use std::borrow::Borrow;
use std::collections::hash_map::Entry;
use std::hash::Hash;
@@ -53,6 +54,7 @@ pub(super) struct EncodeContext<'a, 'tcx> {
tables: TableBuilders,
lazy_state: LazyState,
+ span_shorthands: FxHashMap<Span, usize>,
type_shorthands: FxHashMap<Ty<'tcx>, usize>,
predicate_shorthands: FxHashMap<ty::PredicateKind<'tcx>, usize>,
@@ -177,8 +179,20 @@ impl<'a, 'tcx> Encodable<EncodeContext<'a, 'tcx>> for ExpnId {
impl<'a, 'tcx> Encodable<EncodeContext<'a, 'tcx>> for Span {
fn encode(&self, s: &mut EncodeContext<'a, 'tcx>) {
- let span = self.data();
+ match s.span_shorthands.entry(*self) {
+ Entry::Occupied(o) => SpanEncodingMode::Shorthand(*o.get()).encode(s),
+ Entry::Vacant(v) => {
+ let position = s.opaque.position();
+ v.insert(position);
+ SpanEncodingMode::Direct.encode(s);
+ self.data().encode(s);
+ }
+ }
+ }
+}
+impl<'a, 'tcx> Encodable<EncodeContext<'a, 'tcx>> for SpanData {
+ fn encode(&self, s: &mut EncodeContext<'a, 'tcx>) {
// Don't serialize any `SyntaxContext`s from a proc-macro crate,
// since we don't load proc-macro dependencies during serialization.
// This means that any hygiene information from macros used *within*
@@ -213,7 +227,7 @@ impl<'a, 'tcx> Encodable<EncodeContext<'a, 'tcx>> for Span {
if s.is_proc_macro {
SyntaxContext::root().encode(s);
} else {
- span.ctxt.encode(s);
+ self.ctxt.encode(s);
}
if self.is_dummy() {
@@ -221,18 +235,18 @@ impl<'a, 'tcx> Encodable<EncodeContext<'a, 'tcx>> for Span {
}
// The Span infrastructure should make sure that this invariant holds:
- debug_assert!(span.lo <= span.hi);
+ debug_assert!(self.lo <= self.hi);
- if !s.source_file_cache.0.contains(span.lo) {
+ if !s.source_file_cache.0.contains(self.lo) {
let source_map = s.tcx.sess.source_map();
- let source_file_index = source_map.lookup_source_file_idx(span.lo);
+ let source_file_index = source_map.lookup_source_file_idx(self.lo);
s.source_file_cache =
(source_map.files()[source_file_index].clone(), source_file_index);
}
let (ref source_file, source_file_index) = s.source_file_cache;
- debug_assert!(source_file.contains(span.lo));
+ debug_assert!(source_file.contains(self.lo));
- if !source_file.contains(span.hi) {
+ if !source_file.contains(self.hi) {
// Unfortunately, macro expansion still sometimes generates Spans
// that malformed in this way.
return TAG_PARTIAL_SPAN.encode(s);
@@ -286,11 +300,11 @@ impl<'a, 'tcx> Encodable<EncodeContext<'a, 'tcx>> for Span {
// Encode the start position relative to the file start, so we profit more from the
// variable-length integer encoding.
- let lo = span.lo - source_file.start_pos;
+ let lo = self.lo - source_file.start_pos;
// Encode length which is usually less than span.hi and profits more
// from the variable-length integer encoding that we use.
- let len = span.hi - span.lo;
+ let len = self.hi - self.lo;
tag.encode(s);
lo.encode(s);
@@ -608,7 +622,7 @@ impl<'a, 'tcx> EncodeContext<'a, 'tcx> {
trace!("encoding {} further alloc ids", new_n - n);
for idx in n..new_n {
let id = self.interpret_allocs[idx];
- let pos = self.position() as u32;
+ let pos = self.position() as u64;
interpret_alloc_index.push(pos);
interpret::specialized_encode_alloc_id(self, tcx, id);
}
@@ -805,7 +819,7 @@ fn should_encode_span(def_kind: DefKind) -> bool {
| DefKind::Enum
| DefKind::Variant
| DefKind::Trait
- | DefKind::TyAlias
+ | DefKind::TyAlias { .. }
| DefKind::ForeignTy
| DefKind::TraitAlias
| DefKind::AssocTy
@@ -824,7 +838,6 @@ fn should_encode_span(def_kind: DefKind) -> bool {
| DefKind::AnonConst
| DefKind::InlineConst
| DefKind::OpaqueTy
- | DefKind::ImplTraitPlaceholder
| DefKind::Field
| DefKind::Impl { .. }
| DefKind::Closure
@@ -841,7 +854,7 @@ fn should_encode_attrs(def_kind: DefKind) -> bool {
| DefKind::Enum
| DefKind::Variant
| DefKind::Trait
- | DefKind::TyAlias
+ | DefKind::TyAlias { .. }
| DefKind::ForeignTy
| DefKind::TraitAlias
| DefKind::AssocTy
@@ -867,7 +880,6 @@ fn should_encode_attrs(def_kind: DefKind) -> bool {
| DefKind::AnonConst
| DefKind::InlineConst
| DefKind::OpaqueTy
- | DefKind::ImplTraitPlaceholder
| DefKind::LifetimeParam
| DefKind::GlobalAsm
| DefKind::Generator => false,
@@ -883,7 +895,7 @@ fn should_encode_expn_that_defined(def_kind: DefKind) -> bool {
| DefKind::Variant
| DefKind::Trait
| DefKind::Impl { .. } => true,
- DefKind::TyAlias
+ DefKind::TyAlias { .. }
| DefKind::ForeignTy
| DefKind::TraitAlias
| DefKind::AssocTy
@@ -902,7 +914,6 @@ fn should_encode_expn_that_defined(def_kind: DefKind) -> bool {
| DefKind::AnonConst
| DefKind::InlineConst
| DefKind::OpaqueTy
- | DefKind::ImplTraitPlaceholder
| DefKind::Field
| DefKind::LifetimeParam
| DefKind::GlobalAsm
@@ -919,7 +930,7 @@ fn should_encode_visibility(def_kind: DefKind) -> bool {
| DefKind::Enum
| DefKind::Variant
| DefKind::Trait
- | DefKind::TyAlias
+ | DefKind::TyAlias { .. }
| DefKind::ForeignTy
| DefKind::TraitAlias
| DefKind::AssocTy
@@ -939,7 +950,6 @@ fn should_encode_visibility(def_kind: DefKind) -> bool {
| DefKind::AnonConst
| DefKind::InlineConst
| DefKind::OpaqueTy
- | DefKind::ImplTraitPlaceholder
| DefKind::GlobalAsm
| DefKind::Impl { .. }
| DefKind::Closure
@@ -964,9 +974,8 @@ fn should_encode_stability(def_kind: DefKind) -> bool {
| DefKind::Const
| DefKind::Fn
| DefKind::ForeignMod
- | DefKind::TyAlias
+ | DefKind::TyAlias { .. }
| DefKind::OpaqueTy
- | DefKind::ImplTraitPlaceholder
| DefKind::Enum
| DefKind::Union
| DefKind::Impl { .. }
@@ -1026,14 +1035,13 @@ fn should_encode_mir(tcx: TyCtxt<'_>, def_id: LocalDefId) -> (bool, bool) {
}
}
-fn should_encode_variances(def_kind: DefKind) -> bool {
+fn should_encode_variances<'tcx>(tcx: TyCtxt<'tcx>, def_id: DefId, def_kind: DefKind) -> bool {
match def_kind {
DefKind::Struct
| DefKind::Union
| DefKind::Enum
| DefKind::Variant
| DefKind::OpaqueTy
- | DefKind::ImplTraitPlaceholder
| DefKind::Fn
| DefKind::Ctor(..)
| DefKind::AssocFn => true,
@@ -1046,7 +1054,6 @@ fn should_encode_variances(def_kind: DefKind) -> bool {
| DefKind::Static(..)
| DefKind::Const
| DefKind::ForeignMod
- | DefKind::TyAlias
| DefKind::Impl { .. }
| DefKind::Trait
| DefKind::TraitAlias
@@ -1060,6 +1067,9 @@ fn should_encode_variances(def_kind: DefKind) -> bool {
| DefKind::Closure
| DefKind::Generator
| DefKind::ExternCrate => false,
+ DefKind::TyAlias { lazy } => {
+ lazy || tcx.type_of(def_id).instantiate_identity().has_opaque_types()
+ }
}
}
@@ -1070,7 +1080,7 @@ fn should_encode_generics(def_kind: DefKind) -> bool {
| DefKind::Enum
| DefKind::Variant
| DefKind::Trait
- | DefKind::TyAlias
+ | DefKind::TyAlias { .. }
| DefKind::ForeignTy
| DefKind::TraitAlias
| DefKind::AssocTy
@@ -1083,7 +1093,6 @@ fn should_encode_generics(def_kind: DefKind) -> bool {
| DefKind::AnonConst
| DefKind::InlineConst
| DefKind::OpaqueTy
- | DefKind::ImplTraitPlaceholder
| DefKind::Impl { .. }
| DefKind::Field
| DefKind::TyParam
@@ -1111,7 +1120,7 @@ fn should_encode_type(tcx: TyCtxt<'_>, def_id: LocalDefId, def_kind: DefKind) ->
| DefKind::Fn
| DefKind::Const
| DefKind::Static(..)
- | DefKind::TyAlias
+ | DefKind::TyAlias { .. }
| DefKind::ForeignTy
| DefKind::Impl { .. }
| DefKind::AssocFn
@@ -1134,30 +1143,11 @@ fn should_encode_type(tcx: TyCtxt<'_>, def_id: LocalDefId, def_kind: DefKind) ->
}
}
- DefKind::ImplTraitPlaceholder => {
- let parent_def_id = tcx.impl_trait_in_trait_parent_fn(def_id.to_def_id());
- let assoc_item = tcx.associated_item(parent_def_id);
- match assoc_item.container {
- // Always encode an RPIT in an impl fn, since it always has a body
- ty::AssocItemContainer::ImplContainer => true,
- ty::AssocItemContainer::TraitContainer => {
- // Encode an RPIT for a trait only if the trait has a default body
- assoc_item.defaultness(tcx).has_value()
- }
- }
- }
-
DefKind::AssocTy => {
let assoc_item = tcx.associated_item(def_id);
match assoc_item.container {
ty::AssocItemContainer::ImplContainer => true,
- // Always encode RPITITs, since we need to be able to project
- // from an RPITIT associated item to an opaque when installing
- // the default projection predicates in default trait methods
- // with RPITITs.
- ty::AssocItemContainer::TraitContainer => {
- assoc_item.defaultness(tcx).has_value() || assoc_item.opt_rpitit_info.is_some()
- }
+ ty::AssocItemContainer::TraitContainer => assoc_item.defaultness(tcx).has_value(),
}
}
DefKind::TyParam => {
@@ -1190,9 +1180,8 @@ fn should_encode_fn_sig(def_kind: DefKind) -> bool {
| DefKind::Const
| DefKind::Static(..)
| DefKind::Ctor(..)
- | DefKind::TyAlias
+ | DefKind::TyAlias { .. }
| DefKind::OpaqueTy
- | DefKind::ImplTraitPlaceholder
| DefKind::ForeignTy
| DefKind::Impl { .. }
| DefKind::AssocConst
@@ -1232,10 +1221,9 @@ fn should_encode_constness(def_kind: DefKind) -> bool {
| DefKind::AssocConst
| DefKind::AnonConst
| DefKind::Static(..)
- | DefKind::TyAlias
+ | DefKind::TyAlias { .. }
| DefKind::OpaqueTy
| DefKind::Impl { of_trait: false }
- | DefKind::ImplTraitPlaceholder
| DefKind::ForeignTy
| DefKind::Generator
| DefKind::ConstParam
@@ -1266,9 +1254,8 @@ fn should_encode_const(def_kind: DefKind) -> bool {
| DefKind::Field
| DefKind::Fn
| DefKind::Static(..)
- | DefKind::TyAlias
+ | DefKind::TyAlias { .. }
| DefKind::OpaqueTy
- | DefKind::ImplTraitPlaceholder
| DefKind::ForeignTy
| DefKind::Impl { .. }
| DefKind::AssocFn
@@ -1289,11 +1276,8 @@ fn should_encode_const(def_kind: DefKind) -> bool {
}
}
-// We only encode impl trait in trait when using `lower-impl-trait-in-trait-to-assoc-ty` unstable
-// option.
fn should_encode_fn_impl_trait_in_trait<'tcx>(tcx: TyCtxt<'tcx>, def_id: DefId) -> bool {
- if tcx.lower_impl_trait_in_trait_to_assoc_ty()
- && let Some(assoc_item) = tcx.opt_associated_item(def_id)
+ if let Some(assoc_item) = tcx.opt_associated_item(def_id)
&& assoc_item.container == ty::AssocItemContainer::TraitContainer
&& assoc_item.kind == ty::AssocKind::Fn
{
@@ -1368,7 +1352,7 @@ impl<'a, 'tcx> EncodeContext<'a, 'tcx> {
self.encode_default_body_stability(def_id);
self.encode_deprecation(def_id);
}
- if should_encode_variances(def_kind) {
+ if should_encode_variances(tcx, def_id, def_kind) {
let v = self.tcx.variances_of(def_id);
record_array!(self.tables.variances_of[def_id] <- v);
}
@@ -1447,9 +1431,6 @@ impl<'a, 'tcx> EncodeContext<'a, 'tcx> {
.is_type_alias_impl_trait
.set(def_id.index, self.tcx.is_type_alias_impl_trait(def_id));
}
- if let DefKind::ImplTraitPlaceholder = def_kind {
- self.encode_explicit_item_bounds(def_id);
- }
if tcx.impl_method_has_trait_impl_trait_tys(def_id)
&& let Ok(table) = self.tcx.collect_return_position_impl_trait_in_trait_tys(def_id)
{
@@ -1576,6 +1557,12 @@ impl<'a, 'tcx> EncodeContext<'a, 'tcx> {
}
if let Some(rpitit_info) = item.opt_rpitit_info {
record!(self.tables.opt_rpitit_info[def_id] <- rpitit_info);
+ if matches!(rpitit_info, ty::ImplTraitInTraitData::Trait { .. }) {
+ record_array!(
+ self.tables.assumed_wf_types_for_rpitit[def_id]
+ <- self.tcx.assumed_wf_types_for_rpitit(def_id)
+ );
+ }
}
}
@@ -1700,7 +1687,9 @@ impl<'a, 'tcx> EncodeContext<'a, 'tcx> {
fn encode_info_for_macro(&mut self, def_id: LocalDefId) {
let tcx = self.tcx;
- let hir::ItemKind::Macro(ref macro_def, _) = tcx.hir().expect_item(def_id).kind else { bug!() };
+ let hir::ItemKind::Macro(ref macro_def, _) = tcx.hir().expect_item(def_id).kind else {
+ bug!()
+ };
self.tables.is_macro_rules.set(def_id.local_def_index, macro_def.macro_rules);
record!(self.tables.macro_definition[def_id.to_def_id()] <- &*macro_def.body);
}
@@ -1752,7 +1741,7 @@ impl<'a, 'tcx> EncodeContext<'a, 'tcx> {
}
fn encode_proc_macros(&mut self) -> Option<ProcMacroData> {
- let is_proc_macro = self.tcx.sess.crate_types().contains(&CrateType::ProcMacro);
+ let is_proc_macro = self.tcx.crate_types().contains(&CrateType::ProcMacro);
if is_proc_macro {
let tcx = self.tcx;
let hir = tcx.hir();
@@ -1940,7 +1929,9 @@ impl<'a, 'tcx> EncodeContext<'a, 'tcx> {
FxHashMap::default();
for id in tcx.hir().items() {
- let DefKind::Impl { of_trait } = tcx.def_kind(id.owner_id) else { continue; };
+ let DefKind::Impl { of_trait } = tcx.def_kind(id.owner_id) else {
+ continue;
+ };
let def_id = id.owner_id.to_def_id();
self.tables.defaultness.set_some(def_id.index, tcx.defaultness(def_id));
@@ -1949,7 +1940,7 @@ impl<'a, 'tcx> EncodeContext<'a, 'tcx> {
if of_trait && let Some(trait_ref) = tcx.impl_trait_ref(def_id) {
record!(self.tables.impl_trait_ref[def_id] <- trait_ref);
- let trait_ref = trait_ref.subst_identity();
+ let trait_ref = trait_ref.instantiate_identity();
let simplified_self_ty =
fast_reject::simplify_type(self.tcx, trait_ref.self_ty(), TreatParams::AsCandidateKey);
fx_hash_map
@@ -2207,12 +2198,13 @@ fn encode_metadata_impl(tcx: TyCtxt<'_>, path: &Path) {
feat: tcx.features(),
tables: Default::default(),
lazy_state: LazyState::NoNode,
+ span_shorthands: Default::default(),
type_shorthands: Default::default(),
predicate_shorthands: Default::default(),
source_file_cache,
interpret_allocs: Default::default(),
required_source_files,
- is_proc_macro: tcx.sess.crate_types().contains(&CrateType::ProcMacro),
+ is_proc_macro: tcx.crate_types().contains(&CrateType::ProcMacro),
hygiene_ctxt: &hygiene_ctxt,
symbol_table: Default::default(),
};
@@ -2255,13 +2247,12 @@ pub fn provide(providers: &mut Providers) {
tcx.resolutions(())
.doc_link_resolutions
.get(&def_id)
- .expect("no resolutions for a doc link")
+ .unwrap_or_else(|| span_bug!(tcx.def_span(def_id), "no resolutions for a doc link"))
},
doc_link_traits_in_scope: |tcx, def_id| {
- tcx.resolutions(())
- .doc_link_traits_in_scope
- .get(&def_id)
- .expect("no traits in scope for a doc link")
+ tcx.resolutions(()).doc_link_traits_in_scope.get(&def_id).unwrap_or_else(|| {
+ span_bug!(tcx.def_span(def_id), "no traits in scope for a doc link")
+ })
},
traits: |tcx, LocalCrate| {
let mut traits = Vec::new();
diff --git a/compiler/rustc_metadata/src/rmeta/mod.rs b/compiler/rustc_metadata/src/rmeta/mod.rs
index 9cffd96f4..a89e235ff 100644
--- a/compiler/rustc_metadata/src/rmeta/mod.rs
+++ b/compiler/rustc_metadata/src/rmeta/mod.rs
@@ -51,7 +51,7 @@ mod encoder;
mod table;
pub(crate) fn rustc_version(cfg_version: &'static str) -> String {
- format!("rustc {}", cfg_version)
+ format!("rustc {cfg_version}")
}
/// Metadata encoding version.
@@ -66,6 +66,12 @@ const METADATA_VERSION: u8 = 8;
/// unsigned integer, and further followed by the rustc version string.
pub const METADATA_HEADER: &[u8] = &[b'r', b'u', b's', b't', 0, 0, 0, METADATA_VERSION];
+#[derive(Encodable, Decodable)]
+enum SpanEncodingMode {
+ Shorthand(usize),
+ Direct,
+}
+
/// A value of type T referred to by its absolute position
/// in the metadata, and which can be decoded lazily.
///
@@ -264,7 +270,7 @@ pub(crate) struct CrateRoot {
traits: LazyArray<DefIndex>,
impls: LazyArray<TraitImpls>,
incoherent_impls: LazyArray<IncoherentImpls>,
- interpret_alloc_index: LazyArray<u32>,
+ interpret_alloc_index: LazyArray<u64>,
proc_macro_data: Option<ProcMacroData>,
tables: LazyTables,
@@ -451,6 +457,7 @@ define_tables! {
trait_impl_trait_tys: Table<DefIndex, LazyValue<FxHashMap<DefId, ty::EarlyBinder<Ty<'static>>>>>,
doc_link_resolutions: Table<DefIndex, LazyValue<DocLinkResMap>>,
doc_link_traits_in_scope: Table<DefIndex, LazyArray<DefId>>,
+ assumed_wf_types_for_rpitit: Table<DefIndex, LazyArray<(Ty<'static>, Span)>>,
}
#[derive(TyEncodable, TyDecodable)]
diff --git a/compiler/rustc_metadata/src/rmeta/table.rs b/compiler/rustc_metadata/src/rmeta/table.rs
index f002d7f97..ea66c770b 100644
--- a/compiler/rustc_metadata/src/rmeta/table.rs
+++ b/compiler/rustc_metadata/src/rmeta/table.rs
@@ -126,7 +126,8 @@ fixed_size_enum! {
( Enum )
( Variant )
( Trait )
- ( TyAlias )
+ ( TyAlias { lazy: false } )
+ ( TyAlias { lazy: true } )
( ForeignTy )
( TraitAlias )
( AssocTy )
@@ -142,7 +143,6 @@ fixed_size_enum! {
( AnonConst )
( InlineConst )
( OpaqueTy )
- ( ImplTraitPlaceholder )
( Field )
( LifetimeParam )
( GlobalAsm )
@@ -324,7 +324,7 @@ impl<T> FixedSizeEncoding for Option<LazyValue<T>> {
impl<T> LazyArray<T> {
#[inline]
fn write_to_bytes_impl(self, b: &mut [u8; 8]) {
- let ([position_bytes, meta_bytes],[])= b.as_chunks_mut::<4>() else { panic!() };
+ let ([position_bytes, meta_bytes], []) = b.as_chunks_mut::<4>() else { panic!() };
let position = self.position.get();
let position: u32 = position.try_into().unwrap();
@@ -347,7 +347,7 @@ impl<T> FixedSizeEncoding for LazyArray<T> {
#[inline]
fn from_bytes(b: &[u8; 8]) -> Self {
- let ([position_bytes, meta_bytes],[])= b.as_chunks::<4>() else { panic!() };
+ let ([position_bytes, meta_bytes], []) = b.as_chunks::<4>() else { panic!() };
if *meta_bytes == [0; 4] {
return Default::default();
}
@@ -366,7 +366,7 @@ impl<T> FixedSizeEncoding for Option<LazyArray<T>> {
#[inline]
fn from_bytes(b: &[u8; 8]) -> Self {
- let ([position_bytes, meta_bytes],[])= b.as_chunks::<4>() else { panic!() };
+ let ([position_bytes, meta_bytes], []) = b.as_chunks::<4>() else { panic!() };
LazyArray::from_bytes_impl(position_bytes, meta_bytes)
}
diff --git a/compiler/rustc_middle/Cargo.toml b/compiler/rustc_middle/Cargo.toml
index 4c238308f..bb8e774ce 100644
--- a/compiler/rustc_middle/Cargo.toml
+++ b/compiler/rustc_middle/Cargo.toml
@@ -13,7 +13,7 @@ gsgdt = "0.1.2"
field-offset = "0.3.5"
measureme = "10.0.0"
polonius-engine = "0.13.0"
-rustc_apfloat = { path = "../rustc_apfloat" }
+rustc_apfloat = "0.2.0"
rustc_arena = { path = "../rustc_arena" }
rustc_ast = { path = "../rustc_ast" }
rustc_attr = { path = "../rustc_attr" }
diff --git a/compiler/rustc_middle/messages.ftl b/compiler/rustc_middle/messages.ftl
index bb7147ac8..108a10b50 100644
--- a/compiler/rustc_middle/messages.ftl
+++ b/compiler/rustc_middle/messages.ftl
@@ -52,6 +52,9 @@ middle_drop_check_overflow =
overflow while adding drop-check rules for {$ty}
.note = overflowed on {$overflow_ty}
+middle_layout_references_error =
+ the type has an unknown layout
+
middle_limit_invalid =
`limit` must be a non-negative integer
.label = {$error_str}
diff --git a/compiler/rustc_middle/src/arena.rs b/compiler/rustc_middle/src/arena.rs
index 5a320865c..952c796f5 100644
--- a/compiler/rustc_middle/src/arena.rs
+++ b/compiler/rustc_middle/src/arena.rs
@@ -40,7 +40,6 @@ macro_rules! arena_types {
rustc_data_structures::sync::Lrc<rustc_ast::Crate>,
)>,
[] output_filenames: std::sync::Arc<rustc_session::config::OutputFilenames>,
- [] metadata_loader: rustc_data_structures::steal::Steal<Box<rustc_session::cstore::MetadataLoaderDyn>>,
[] crate_for_resolver: rustc_data_structures::steal::Steal<(rustc_ast::Crate, rustc_ast::AttrVec)>,
[] resolutions: rustc_middle::ty::ResolverGlobalCtxt,
[decode] unsafety_check_result: rustc_middle::mir::UnsafetyCheckResult,
@@ -131,6 +130,7 @@ macro_rules! arena_types {
[] closure_kind_origin: (rustc_span::Span, rustc_middle::hir::place::Place<'tcx>),
[] stripped_cfg_items: rustc_ast::expand::StrippedCfgItem,
[] mod_child: rustc_middle::metadata::ModChild,
+ [] features: rustc_feature::Features,
]);
)
}
diff --git a/compiler/rustc_middle/src/dep_graph/dep_node.rs b/compiler/rustc_middle/src/dep_graph/dep_node.rs
index 2dc5b8969..04c09d334 100644
--- a/compiler/rustc_middle/src/dep_graph/dep_node.rs
+++ b/compiler/rustc_middle/src/dep_graph/dep_node.rs
@@ -60,7 +60,7 @@ use crate::mir::mono::MonoItem;
use crate::ty::TyCtxt;
use rustc_data_structures::fingerprint::Fingerprint;
-use rustc_hir::def_id::{CrateNum, DefId, LocalDefId};
+use rustc_hir::def_id::{CrateNum, DefId, LocalDefId, LocalModDefId, ModDefId, LOCAL_CRATE};
use rustc_hir::definitions::DefPathHash;
use rustc_hir::{HirId, ItemLocalId, OwnerId};
use rustc_query_system::dep_graph::FingerprintStyle;
@@ -371,7 +371,7 @@ impl<'tcx> DepNodeParams<TyCtxt<'tcx>> for HirId {
fn recover(tcx: TyCtxt<'tcx>, dep_node: &DepNode) -> Option<Self> {
if tcx.fingerprint_style(dep_node.kind) == FingerprintStyle::HirId {
let (local_hash, local_id) = Fingerprint::from(dep_node.hash).split();
- let def_path_hash = DefPathHash::new(tcx.sess.local_stable_crate_id(), local_hash);
+ let def_path_hash = DefPathHash::new(tcx.stable_crate_id(LOCAL_CRATE), local_hash);
let def_id = tcx
.def_path_hash_to_def_id(def_path_hash, &mut || {
panic!("Failed to extract HirId: {:?} {}", dep_node.kind, dep_node.hash)
@@ -380,10 +380,60 @@ impl<'tcx> DepNodeParams<TyCtxt<'tcx>> for HirId {
let local_id = local_id
.as_u64()
.try_into()
- .unwrap_or_else(|_| panic!("local id should be u32, found {:?}", local_id));
+ .unwrap_or_else(|_| panic!("local id should be u32, found {local_id:?}"));
Some(HirId { owner: OwnerId { def_id }, local_id: ItemLocalId::from_u32(local_id) })
} else {
None
}
}
}
+
+macro_rules! impl_for_typed_def_id {
+ ($Name:ident, $LocalName:ident) => {
+ impl<'tcx> DepNodeParams<TyCtxt<'tcx>> for $Name {
+ #[inline(always)]
+ fn fingerprint_style() -> FingerprintStyle {
+ FingerprintStyle::DefPathHash
+ }
+
+ #[inline(always)]
+ fn to_fingerprint(&self, tcx: TyCtxt<'tcx>) -> Fingerprint {
+ self.to_def_id().to_fingerprint(tcx)
+ }
+
+ #[inline(always)]
+ fn to_debug_str(&self, tcx: TyCtxt<'tcx>) -> String {
+ self.to_def_id().to_debug_str(tcx)
+ }
+
+ #[inline(always)]
+ fn recover(tcx: TyCtxt<'tcx>, dep_node: &DepNode) -> Option<Self> {
+ DefId::recover(tcx, dep_node).map($Name::new_unchecked)
+ }
+ }
+
+ impl<'tcx> DepNodeParams<TyCtxt<'tcx>> for $LocalName {
+ #[inline(always)]
+ fn fingerprint_style() -> FingerprintStyle {
+ FingerprintStyle::DefPathHash
+ }
+
+ #[inline(always)]
+ fn to_fingerprint(&self, tcx: TyCtxt<'tcx>) -> Fingerprint {
+ self.to_def_id().to_fingerprint(tcx)
+ }
+
+ #[inline(always)]
+ fn to_debug_str(&self, tcx: TyCtxt<'tcx>) -> String {
+ self.to_def_id().to_debug_str(tcx)
+ }
+
+ #[inline(always)]
+ fn recover(tcx: TyCtxt<'tcx>, dep_node: &DepNode) -> Option<Self> {
+ LocalDefId::recover(tcx, dep_node).map($LocalName::new_unchecked)
+ }
+ }
+ };
+}
+
+impl_for_typed_def_id! { ModDefId, LocalModDefId }
diff --git a/compiler/rustc_middle/src/dep_graph/mod.rs b/compiler/rustc_middle/src/dep_graph/mod.rs
index 0ddbe7d1c..f79ce08b8 100644
--- a/compiler/rustc_middle/src/dep_graph/mod.rs
+++ b/compiler/rustc_middle/src/dep_graph/mod.rs
@@ -8,7 +8,7 @@ mod dep_node;
pub use rustc_query_system::dep_graph::{
debug::DepNodeFilter, hash_result, DepContext, DepNodeColor, DepNodeIndex,
- SerializedDepNodeIndex, WorkProduct, WorkProductId,
+ SerializedDepNodeIndex, WorkProduct, WorkProductId, WorkProductMap,
};
pub use dep_node::{label_strs, DepKind, DepNode, DepNodeExt};
@@ -35,7 +35,7 @@ impl rustc_query_system::dep_graph::DepKind for DepKind {
if let Some(def_id) = node.extract_def_id(tcx) {
write!(f, "{}", tcx.def_path_debug_str(def_id))?;
} else if let Some(ref s) = tcx.dep_graph.dep_node_debug_str(*node) {
- write!(f, "{}", s)?;
+ write!(f, "{s}")?;
} else {
write!(f, "{}", node.hash)?;
}
diff --git a/compiler/rustc_middle/src/error.rs b/compiler/rustc_middle/src/error.rs
index 57b2de84b..b346cd453 100644
--- a/compiler/rustc_middle/src/error.rs
+++ b/compiler/rustc_middle/src/error.rs
@@ -132,6 +132,9 @@ pub enum LayoutError<'tcx> {
#[diag(middle_cycle)]
Cycle,
+
+ #[diag(middle_layout_references_error)]
+ ReferencesError,
}
#[derive(Diagnostic)]
diff --git a/compiler/rustc_middle/src/hir/map/mod.rs b/compiler/rustc_middle/src/hir/map/mod.rs
index 5f2eb890c..467962b39 100644
--- a/compiler/rustc_middle/src/hir/map/mod.rs
+++ b/compiler/rustc_middle/src/hir/map/mod.rs
@@ -8,7 +8,7 @@ use rustc_data_structures::stable_hasher::{HashStable, StableHasher};
use rustc_data_structures::svh::Svh;
use rustc_data_structures::sync::{par_for_each_in, DynSend, DynSync};
use rustc_hir::def::{DefKind, Res};
-use rustc_hir::def_id::{DefId, LocalDefId, CRATE_DEF_ID, LOCAL_CRATE};
+use rustc_hir::def_id::{DefId, LocalDefId, LocalModDefId, LOCAL_CRATE};
use rustc_hir::definitions::{DefKey, DefPath, DefPathData, DefPathHash};
use rustc_hir::intravisit::{self, Visitor};
use rustc_hir::*;
@@ -24,7 +24,7 @@ pub fn associated_body(node: Node<'_>) -> Option<(LocalDefId, BodyId)> {
match node {
Node::Item(Item {
owner_id,
- kind: ItemKind::Const(_, body) | ItemKind::Static(.., body) | ItemKind::Fn(.., body),
+ kind: ItemKind::Const(_, _, body) | ItemKind::Static(.., body) | ItemKind::Fn(.., body),
..
})
| Node::TraitItem(TraitItem {
@@ -148,7 +148,7 @@ impl<'hir> Map<'hir> {
}
#[inline]
- pub fn module_items(self, module: LocalDefId) -> impl Iterator<Item = ItemId> + 'hir {
+ pub fn module_items(self, module: LocalModDefId) -> impl Iterator<Item = ItemId> + 'hir {
self.tcx.hir_module_items(module).items()
}
@@ -169,8 +169,8 @@ impl<'hir> Map<'hir> {
}
#[inline]
- pub fn local_def_id_to_hir_id(self, def_id: LocalDefId) -> HirId {
- self.tcx.local_def_id_to_hir_id(def_id)
+ pub fn local_def_id_to_hir_id(self, def_id: impl Into<LocalDefId>) -> HirId {
+ self.tcx.local_def_id_to_hir_id(def_id.into())
}
/// Do not call this function directly. The query should be called.
@@ -195,14 +195,10 @@ impl<'hir> Map<'hir> {
ItemKind::Fn(..) => DefKind::Fn,
ItemKind::Macro(_, macro_kind) => DefKind::Macro(macro_kind),
ItemKind::Mod(..) => DefKind::Mod,
- ItemKind::OpaqueTy(ref opaque) => {
- if opaque.in_trait && !self.tcx.lower_impl_trait_in_trait_to_assoc_ty() {
- DefKind::ImplTraitPlaceholder
- } else {
- DefKind::OpaqueTy
- }
+ ItemKind::OpaqueTy(..) => DefKind::OpaqueTy,
+ ItemKind::TyAlias(..) => {
+ DefKind::TyAlias { lazy: self.tcx.features().lazy_type_alias }
}
- ItemKind::TyAlias(..) => DefKind::TyAlias,
ItemKind::Enum(..) => DefKind::Enum,
ItemKind::Struct(..) => DefKind::Struct,
ItemKind::Union(..) => DefKind::Union,
@@ -533,20 +529,20 @@ impl<'hir> Map<'hir> {
self.krate_attrs().iter().any(|attr| attr.has_name(sym::rustc_coherence_is_core))
}
- pub fn get_module(self, module: LocalDefId) -> (&'hir Mod<'hir>, Span, HirId) {
- let hir_id = HirId::make_owner(module);
+ pub fn get_module(self, module: LocalModDefId) -> (&'hir Mod<'hir>, Span, HirId) {
+ let hir_id = HirId::make_owner(module.to_local_def_id());
match self.tcx.hir_owner(hir_id.owner).map(|o| o.node) {
Some(OwnerNode::Item(&Item { span, kind: ItemKind::Mod(ref m), .. })) => {
(m, span, hir_id)
}
Some(OwnerNode::Crate(item)) => (item, item.spans.inner_span, hir_id),
- node => panic!("not a module: {:?}", node),
+ node => panic!("not a module: {node:?}"),
}
}
/// Walks the contents of the local crate. See also `visit_all_item_likes_in_crate`.
pub fn walk_toplevel_module(self, visitor: &mut impl Visitor<'hir>) {
- let (top_mod, span, hir_id) = self.get_module(CRATE_DEF_ID);
+ let (top_mod, span, hir_id) = self.get_module(LocalModDefId::CRATE_DEF_ID);
visitor.visit_mod(top_mod, span, hir_id);
}
@@ -599,7 +595,7 @@ impl<'hir> Map<'hir> {
/// This method is the equivalent of `visit_all_item_likes_in_crate` but restricted to
/// item-likes in a single module.
- pub fn visit_item_likes_in_module<V>(self, module: LocalDefId, visitor: &mut V)
+ pub fn visit_item_likes_in_module<V>(self, module: LocalModDefId, visitor: &mut V)
where
V: Visitor<'hir>,
{
@@ -622,17 +618,19 @@ impl<'hir> Map<'hir> {
}
}
- pub fn for_each_module(self, mut f: impl FnMut(LocalDefId)) {
+ pub fn for_each_module(self, mut f: impl FnMut(LocalModDefId)) {
let crate_items = self.tcx.hir_crate_items(());
for module in crate_items.submodules.iter() {
- f(module.def_id)
+ f(LocalModDefId::new_unchecked(module.def_id))
}
}
#[inline]
- pub fn par_for_each_module(self, f: impl Fn(LocalDefId) + DynSend + DynSync) {
+ pub fn par_for_each_module(self, f: impl Fn(LocalModDefId) + DynSend + DynSync) {
let crate_items = self.tcx.hir_crate_items(());
- par_for_each_in(&crate_items.submodules[..], |module| f(module.def_id))
+ par_for_each_in(&crate_items.submodules[..], |module| {
+ f(LocalModDefId::new_unchecked(module.def_id))
+ })
}
/// Returns an iterator for the nodes in the ancestor tree of the `current_id`
@@ -741,17 +739,6 @@ impl<'hir> Map<'hir> {
}
}
- /// Returns the `OwnerId` of `id`'s nearest module parent, or `id` itself if no
- /// module parent is in this map.
- pub(super) fn get_module_parent_node(self, hir_id: HirId) -> OwnerId {
- for (def_id, node) in self.parent_owner_iter(hir_id) {
- if let OwnerNode::Item(&Item { kind: ItemKind::Mod(_), .. }) = node {
- return def_id;
- }
- }
- CRATE_OWNER_ID
- }
-
/// When on an if expression, a match arm tail expression or a match arm, give back
/// the enclosing `if` or `match` expression.
///
@@ -1109,6 +1096,33 @@ impl<'hir> Map<'hir> {
_ => None,
}
}
+
+ pub fn maybe_get_struct_pattern_shorthand_field(&self, expr: &Expr<'_>) -> Option<Symbol> {
+ let local = match expr {
+ Expr {
+ kind:
+ ExprKind::Path(QPath::Resolved(
+ None,
+ Path {
+ res: def::Res::Local(_), segments: [PathSegment { ident, .. }], ..
+ },
+ )),
+ ..
+ } => Some(ident),
+ _ => None,
+ }?;
+
+ match self.find_parent(expr.hir_id)? {
+ Node::ExprField(field) => {
+ if field.ident.name == local.name && field.is_shorthand {
+ return Some(local.name);
+ }
+ }
+ _ => {}
+ }
+
+ None
+ }
}
impl<'hir> intravisit::Map<'hir> for Map<'hir> {
@@ -1198,7 +1212,7 @@ pub(super) fn crate_hash(tcx: TyCtxt<'_>, _: LocalCrate) -> Svh {
owner_spans.hash_stable(&mut hcx, &mut stable_hasher);
}
tcx.sess.opts.dep_tracking_hash(true).hash_stable(&mut hcx, &mut stable_hasher);
- tcx.sess.local_stable_crate_id().hash_stable(&mut hcx, &mut stable_hasher);
+ tcx.stable_crate_id(LOCAL_CRATE).hash_stable(&mut hcx, &mut stable_hasher);
// Hash visibility information since it does not appear in HIR.
resolutions.visibilities.hash_stable(&mut hcx, &mut stable_hasher);
resolutions.has_pub_restricted.hash_stable(&mut hcx, &mut stable_hasher);
@@ -1312,7 +1326,7 @@ fn hir_id_to_string(map: Map<'_>, id: HirId) -> String {
}
}
-pub(super) fn hir_module_items(tcx: TyCtxt<'_>, module_id: LocalDefId) -> ModuleItems {
+pub(super) fn hir_module_items(tcx: TyCtxt<'_>, module_id: LocalModDefId) -> ModuleItems {
let mut collector = ItemCollector::new(tcx, false);
let (hir_mod, span, hir_id) = tcx.hir().get_module(module_id);
diff --git a/compiler/rustc_middle/src/hir/mod.rs b/compiler/rustc_middle/src/hir/mod.rs
index 45a07fdd2..0da8fe9cc 100644
--- a/compiler/rustc_middle/src/hir/mod.rs
+++ b/compiler/rustc_middle/src/hir/mod.rs
@@ -11,7 +11,7 @@ use crate::ty::{EarlyBinder, ImplSubject, TyCtxt};
use rustc_data_structures::stable_hasher::{HashStable, StableHasher};
use rustc_data_structures::sync::{par_for_each_in, DynSend, DynSync};
use rustc_hir::def::DefKind;
-use rustc_hir::def_id::{DefId, LocalDefId};
+use rustc_hir::def_id::{DefId, LocalDefId, LocalModDefId};
use rustc_hir::*;
use rustc_query_system::ich::StableHashingContext;
use rustc_span::{ExpnId, DUMMY_SP};
@@ -101,8 +101,22 @@ impl<'tcx> TyCtxt<'tcx> {
map::Map { tcx: self }
}
- pub fn parent_module(self, id: HirId) -> LocalDefId {
- self.parent_module_from_def_id(id.owner.def_id)
+ pub fn parent_module(self, id: HirId) -> LocalModDefId {
+ if !id.is_owner() && self.def_kind(id.owner) == DefKind::Mod {
+ LocalModDefId::new_unchecked(id.owner.def_id)
+ } else {
+ self.parent_module_from_def_id(id.owner.def_id)
+ }
+ }
+
+ pub fn parent_module_from_def_id(self, mut id: LocalDefId) -> LocalModDefId {
+ while let Some(parent) = self.opt_local_parent(id) {
+ id = parent;
+ if self.def_kind(id) == DefKind::Mod {
+ break;
+ }
+ }
+ LocalModDefId::new_unchecked(id)
}
pub fn impl_subject(self, def_id: DefId) -> EarlyBinder<ImplSubject<'tcx>> {
@@ -120,10 +134,6 @@ impl<'tcx> TyCtxt<'tcx> {
}
pub fn provide(providers: &mut Providers) {
- providers.parent_module_from_def_id = |tcx, id| {
- let hir = tcx.hir();
- hir.get_module_parent_node(hir.local_def_id_to_hir_id(id)).def_id
- };
providers.hir_crate_items = map::hir_crate_items;
providers.crate_hash = map::crate_hash;
providers.hir_module_items = map::hir_module_items;
@@ -154,18 +164,15 @@ pub fn provide(providers: &mut Providers) {
tcx.hir_crate(()).owners[id.def_id].as_owner().map_or(AttributeMap::EMPTY, |o| &o.attrs)
};
providers.def_span = |tcx, def_id| {
- let def_id = def_id;
let hir_id = tcx.hir().local_def_id_to_hir_id(def_id);
tcx.hir().opt_span(hir_id).unwrap_or(DUMMY_SP)
};
providers.def_ident_span = |tcx, def_id| {
- let def_id = def_id;
let hir_id = tcx.hir().local_def_id_to_hir_id(def_id);
tcx.hir().opt_ident_span(hir_id)
};
- providers.fn_arg_names = |tcx, id| {
+ providers.fn_arg_names = |tcx, def_id| {
let hir = tcx.hir();
- let def_id = id;
let hir_id = hir.local_def_id_to_hir_id(def_id);
if let Some(body_id) = hir.maybe_body_owned_by(def_id) {
tcx.arena.alloc_from_iter(hir.body_param_names(body_id))
@@ -180,7 +187,7 @@ pub fn provide(providers: &mut Providers) {
{
idents
} else {
- span_bug!(hir.span(hir_id), "fn_arg_names: unexpected item {:?}", id);
+ span_bug!(hir.span(hir_id), "fn_arg_names: unexpected item {:?}", def_id);
}
};
providers.opt_def_kind = |tcx, def_id| tcx.hir().opt_def_kind(def_id);
diff --git a/compiler/rustc_middle/src/hir/place.rs b/compiler/rustc_middle/src/hir/place.rs
index 8a22de931..32f3a1775 100644
--- a/compiler/rustc_middle/src/hir/place.rs
+++ b/compiler/rustc_middle/src/hir/place.rs
@@ -36,6 +36,10 @@ pub enum ProjectionKind {
/// A subslice covering a range of values like `B[x..y]`.
Subslice,
+
+ /// A conversion from an opaque type to its hidden type so we can
+ /// do further projections on it.
+ OpaqueCast,
}
#[derive(Clone, Copy, Debug, PartialEq, Eq, Hash, TyEncodable, TyDecodable, HashStable)]
diff --git a/compiler/rustc_middle/src/infer/canonical.rs b/compiler/rustc_middle/src/infer/canonical.rs
index d5e8330b3..81823118a 100644
--- a/compiler/rustc_middle/src/infer/canonical.rs
+++ b/compiler/rustc_middle/src/infer/canonical.rs
@@ -23,7 +23,7 @@
use crate::infer::MemberConstraint;
use crate::mir::ConstraintCategory;
-use crate::ty::subst::GenericArg;
+use crate::ty::GenericArg;
use crate::ty::{self, BoundVar, List, Region, Ty, TyCtxt};
use rustc_macros::HashStable;
use smallvec::SmallVec;
@@ -63,7 +63,7 @@ impl<'tcx> ty::TypeFoldable<TyCtxt<'tcx>> for CanonicalVarInfos<'tcx> {
#[derive(Copy, Clone, Debug, PartialEq, Eq, Hash, TyDecodable, TyEncodable)]
#[derive(HashStable, TypeFoldable, TypeVisitable, Lift)]
pub struct CanonicalVarValues<'tcx> {
- pub var_values: ty::SubstsRef<'tcx>,
+ pub var_values: ty::GenericArgsRef<'tcx>,
}
impl CanonicalVarValues<'_> {
@@ -429,7 +429,7 @@ impl<'tcx> CanonicalVarValues<'tcx> {
infos: CanonicalVarInfos<'tcx>,
) -> CanonicalVarValues<'tcx> {
CanonicalVarValues {
- var_values: tcx.mk_substs_from_iter(infos.iter().enumerate().map(
+ var_values: tcx.mk_args_from_iter(infos.iter().enumerate().map(
|(i, info)| -> ty::GenericArg<'tcx> {
match info.kind {
CanonicalVarKind::Ty(_) | CanonicalVarKind::PlaceholderTy(_) => {
diff --git a/compiler/rustc_middle/src/infer/mod.rs b/compiler/rustc_middle/src/infer/mod.rs
index 2db59f37f..493bb8a68 100644
--- a/compiler/rustc_middle/src/infer/mod.rs
+++ b/compiler/rustc_middle/src/infer/mod.rs
@@ -15,7 +15,7 @@ use rustc_span::Span;
#[derive(Debug, Clone, PartialEq, Eq, Hash)]
#[derive(HashStable, TypeFoldable, TypeVisitable, Lift)]
pub struct MemberConstraint<'tcx> {
- /// The `DefId` and substs of the opaque type causing this constraint.
+ /// The `DefId` and args of the opaque type causing this constraint.
/// Used for error reporting.
pub key: OpaqueTypeKey<'tcx>,
diff --git a/compiler/rustc_middle/src/lib.rs b/compiler/rustc_middle/src/lib.rs
index 1b125e8e2..d3fc1b285 100644
--- a/compiler/rustc_middle/src/lib.rs
+++ b/compiler/rustc_middle/src/lib.rs
@@ -35,7 +35,6 @@
#![feature(if_let_guard)]
#![feature(inline_const)]
#![feature(iter_from_generator)]
-#![feature(local_key_cell_methods)]
#![feature(negative_impls)]
#![feature(never_type)]
#![feature(extern_types)]
@@ -64,6 +63,7 @@
#![feature(macro_metavar_expr)]
#![recursion_limit = "512"]
#![allow(rustc::potential_query_instability)]
+#![cfg_attr(not(bootstrap), allow(internal_features))]
#[macro_use]
extern crate bitflags;
diff --git a/compiler/rustc_middle/src/lint.rs b/compiler/rustc_middle/src/lint.rs
index 81c1ae4f6..f62e40669 100644
--- a/compiler/rustc_middle/src/lint.rs
+++ b/compiler/rustc_middle/src/lint.rs
@@ -169,26 +169,6 @@ impl TyCtxt<'_> {
pub fn lint_level_at_node(self, lint: &'static Lint, id: HirId) -> (Level, LintLevelSource) {
self.shallow_lint_levels_on(id.owner).lint_level_id_at_node(self, LintId::of(lint), id)
}
-
- /// Walks upwards from `id` to find a node which might change lint levels with attributes.
- /// It stops at `bound` and just returns it if reached.
- pub fn maybe_lint_level_root_bounded(self, mut id: HirId, bound: HirId) -> HirId {
- let hir = self.hir();
- loop {
- if id == bound {
- return bound;
- }
-
- if hir.attrs(id).iter().any(|attr| Level::from_attr(attr).is_some()) {
- return id;
- }
- let next = hir.parent_id(id);
- if next == id {
- bug!("lint traversal reached the root of the crate");
- }
- id = next;
- }
- }
}
/// This struct represents a lint expectation and holds all required information
@@ -238,14 +218,12 @@ pub fn explain_lint_level_source(
let hyphen_case_lint_name = name.replace('_', "-");
if lint_flag_val.as_str() == name {
err.note_once(format!(
- "requested on the command line with `{} {}`",
- flag, hyphen_case_lint_name
+ "requested on the command line with `{flag} {hyphen_case_lint_name}`"
));
} else {
let hyphen_case_flag_val = lint_flag_val.as_str().replace('_', "-");
err.note_once(format!(
- "`{} {}` implied by `{} {}`",
- flag, hyphen_case_lint_name, flag, hyphen_case_flag_val
+ "`{flag} {hyphen_case_lint_name}` implied by `{flag} {hyphen_case_flag_val}`"
));
}
}
@@ -257,8 +235,7 @@ pub fn explain_lint_level_source(
if lint_attr_name.as_str() != name {
let level_str = level.as_str();
err.note_once(format!(
- "`#[{}({})]` implied by `#[{}({})]`",
- level_str, name, level_str, lint_attr_name
+ "`#[{level_str}({name})]` implied by `#[{level_str}({lint_attr_name})]`"
));
}
}
@@ -298,6 +275,7 @@ pub fn explain_lint_level_source(
/// // ^^^^^^^^^^^^^^^^^^^^^ returns `&mut DiagnosticBuilder` by default
/// )
/// ```
+#[track_caller]
pub fn struct_lint_level(
sess: &Session,
lint: &'static Lint,
@@ -311,6 +289,7 @@ pub fn struct_lint_level(
) {
// Avoid codegen bloat from monomorphization by immediately doing dyn dispatch of `decorate` to
// the "real" work.
+ #[track_caller]
fn struct_lint_level_impl(
sess: &Session,
lint: &'static Lint,
@@ -434,12 +413,11 @@ pub fn struct_lint_level(
FutureIncompatibilityReason::EditionError(edition) => {
let current_edition = sess.edition();
format!(
- "this is accepted in the current edition (Rust {}) but is a hard error in Rust {}!",
- current_edition, edition
+ "this is accepted in the current edition (Rust {current_edition}) but is a hard error in Rust {edition}!"
)
}
FutureIncompatibilityReason::EditionSemanticsChange(edition) => {
- format!("this changes meaning in Rust {}", edition)
+ format!("this changes meaning in Rust {edition}")
}
FutureIncompatibilityReason::Custom(reason) => reason.to_owned(),
};
@@ -471,7 +449,11 @@ pub fn in_external_macro(sess: &Session, span: Span) -> bool {
match expn_data.kind {
ExpnKind::Root
| ExpnKind::Desugaring(
- DesugaringKind::ForLoop | DesugaringKind::WhileLoop | DesugaringKind::OpaqueTy,
+ DesugaringKind::ForLoop
+ | DesugaringKind::WhileLoop
+ | DesugaringKind::OpaqueTy
+ | DesugaringKind::Async
+ | DesugaringKind::Await,
) => false,
ExpnKind::AstPass(_) | ExpnKind::Desugaring(_) => true, // well, it's "external"
ExpnKind::Macro(MacroKind::Bang, _) => {
@@ -481,3 +463,12 @@ pub fn in_external_macro(sess: &Session, span: Span) -> bool {
ExpnKind::Macro { .. } => true, // definitely a plugin
}
}
+
+/// Return whether `span` is generated by `async` or `await`.
+pub fn is_from_async_await(span: Span) -> bool {
+ let expn_data = span.ctxt().outer_expn_data();
+ match expn_data.kind {
+ ExpnKind::Desugaring(DesugaringKind::Async | DesugaringKind::Await) => true,
+ _ => false,
+ }
+}
diff --git a/compiler/rustc_middle/src/macros.rs b/compiler/rustc_middle/src/macros.rs
index cd1c6c330..fca16d8e5 100644
--- a/compiler/rustc_middle/src/macros.rs
+++ b/compiler/rustc_middle/src/macros.rs
@@ -43,7 +43,7 @@ macro_rules! span_bug {
#[macro_export]
macro_rules! CloneLiftImpls {
- ($($ty:ty,)+) => {
+ ($($ty:ty),+ $(,)?) => {
$(
impl<'tcx> $crate::ty::Lift<'tcx> for $ty {
type Lifted = Self;
@@ -59,7 +59,7 @@ macro_rules! CloneLiftImpls {
/// allocated data** (i.e., don't need to be folded).
#[macro_export]
macro_rules! TrivialTypeTraversalImpls {
- ($($ty:ty,)+) => {
+ ($($ty:ty),+ $(,)?) => {
$(
impl<'tcx> $crate::ty::fold::TypeFoldable<$crate::ty::TyCtxt<'tcx>> for $ty {
fn try_fold_with<F: $crate::ty::fold::FallibleTypeFolder<$crate::ty::TyCtxt<'tcx>>>(
diff --git a/compiler/rustc_middle/src/middle/codegen_fn_attrs.rs b/compiler/rustc_middle/src/middle/codegen_fn_attrs.rs
index c4601a1fb..02fd6ed7b 100644
--- a/compiler/rustc_middle/src/middle/codegen_fn_attrs.rs
+++ b/compiler/rustc_middle/src/middle/codegen_fn_attrs.rs
@@ -100,6 +100,8 @@ bitflags! {
const REALLOCATOR = 1 << 18;
/// `#[rustc_allocator_zeroed]`: a hint to LLVM that the function only allocates zeroed memory.
const ALLOCATOR_ZEROED = 1 << 19;
+ /// `#[no_builtins]`: indicates that disable implicit builtin knowledge of functions for the function.
+ const NO_BUILTINS = 1 << 20;
}
}
diff --git a/compiler/rustc_middle/src/middle/exported_symbols.rs b/compiler/rustc_middle/src/middle/exported_symbols.rs
index 9041da9a0..e30b6b203 100644
--- a/compiler/rustc_middle/src/middle/exported_symbols.rs
+++ b/compiler/rustc_middle/src/middle/exported_symbols.rs
@@ -1,4 +1,4 @@
-use crate::ty::subst::SubstsRef;
+use crate::ty::GenericArgsRef;
use crate::ty::{self, Ty, TyCtxt};
use rustc_hir::def_id::{DefId, LOCAL_CRATE};
use rustc_macros::HashStable;
@@ -41,7 +41,7 @@ pub struct SymbolExportInfo {
#[derive(Eq, PartialEq, Debug, Copy, Clone, TyEncodable, TyDecodable, HashStable)]
pub enum ExportedSymbol<'tcx> {
NonGeneric(DefId),
- Generic(DefId, SubstsRef<'tcx>),
+ Generic(DefId, GenericArgsRef<'tcx>),
DropGlue(Ty<'tcx>),
ThreadLocalShim(DefId),
NoDefId(ty::SymbolName<'tcx>),
@@ -53,15 +53,15 @@ impl<'tcx> ExportedSymbol<'tcx> {
pub fn symbol_name_for_local_instance(&self, tcx: TyCtxt<'tcx>) -> ty::SymbolName<'tcx> {
match *self {
ExportedSymbol::NonGeneric(def_id) => tcx.symbol_name(ty::Instance::mono(tcx, def_id)),
- ExportedSymbol::Generic(def_id, substs) => {
- tcx.symbol_name(ty::Instance::new(def_id, substs))
+ ExportedSymbol::Generic(def_id, args) => {
+ tcx.symbol_name(ty::Instance::new(def_id, args))
}
ExportedSymbol::DropGlue(ty) => {
tcx.symbol_name(ty::Instance::resolve_drop_in_place(tcx, ty))
}
ExportedSymbol::ThreadLocalShim(def_id) => tcx.symbol_name(ty::Instance {
def: ty::InstanceDef::ThreadLocalShim(def_id),
- substs: ty::InternalSubsts::empty(),
+ args: ty::GenericArgs::empty(),
}),
ExportedSymbol::NoDefId(symbol_name) => symbol_name,
}
@@ -72,6 +72,6 @@ pub fn metadata_symbol_name(tcx: TyCtxt<'_>) -> String {
format!(
"rust_metadata_{}_{:08x}",
tcx.crate_name(LOCAL_CRATE),
- tcx.sess.local_stable_crate_id(),
+ tcx.stable_crate_id(LOCAL_CRATE),
)
}
diff --git a/compiler/rustc_middle/src/middle/privacy.rs b/compiler/rustc_middle/src/middle/privacy.rs
index 5baeb1ee0..1913421f5 100644
--- a/compiler/rustc_middle/src/middle/privacy.rs
+++ b/compiler/rustc_middle/src/middle/privacy.rs
@@ -178,7 +178,12 @@ impl EffectiveVisibilities {
// All effective visibilities except `reachable_through_impl_trait` are limited to
// nominal visibility. For some items nominal visibility doesn't make sense so we
// don't check this condition for them.
- if !matches!(tcx.def_kind(def_id), DefKind::Impl { .. }) {
+ let is_impl = matches!(tcx.def_kind(def_id), DefKind::Impl { .. });
+ let is_associated_item_in_trait_impl = tcx
+ .impl_of_method(def_id.to_def_id())
+ .and_then(|impl_id| tcx.trait_id_of_impl(impl_id))
+ .is_some();
+ if !is_impl && !is_associated_item_in_trait_impl {
let nominal_vis = tcx.visibility(def_id);
if !nominal_vis.is_at_least(ev.reachable, tcx) {
span_bug!(
@@ -186,7 +191,7 @@ impl EffectiveVisibilities {
"{:?}: reachable {:?} > nominal {:?}",
def_id,
ev.reachable,
- nominal_vis
+ nominal_vis,
);
}
}
diff --git a/compiler/rustc_middle/src/middle/region.rs b/compiler/rustc_middle/src/middle/region.rs
index 10712e146..c50c5e6f7 100644
--- a/compiler/rustc_middle/src/middle/region.rs
+++ b/compiler/rustc_middle/src/middle/region.rs
@@ -10,7 +10,7 @@ use crate::ty::TyCtxt;
use rustc_data_structures::fx::{FxHashMap, FxIndexMap};
use rustc_data_structures::stable_hasher::{HashStable, StableHasher};
use rustc_hir as hir;
-use rustc_hir::Node;
+use rustc_hir::{HirIdMap, Node};
use rustc_macros::HashStable;
use rustc_query_system::ich::StableHashingContext;
use rustc_span::{Span, DUMMY_SP};
@@ -228,7 +228,7 @@ pub struct ScopeTree {
/// and not the enclosing *statement*. Expressions that are not present in this
/// table are not rvalue candidates. The set of rvalue candidates is computed
/// during type check based on a traversal of the AST.
- pub rvalue_candidates: FxHashMap<hir::HirId, RvalueCandidateType>,
+ pub rvalue_candidates: HirIdMap<RvalueCandidateType>,
/// If there are any `yield` nested within a scope, this map
/// stores the `Span` of the last one and its index in the
diff --git a/compiler/rustc_middle/src/middle/stability.rs b/compiler/rustc_middle/src/middle/stability.rs
index 60844c17e..908ab8b61 100644
--- a/compiler/rustc_middle/src/middle/stability.rs
+++ b/compiler/rustc_middle/src/middle/stability.rs
@@ -107,7 +107,7 @@ pub fn report_unstable(
soft_handler: impl FnOnce(&'static Lint, Span, String),
) {
let msg = match reason {
- Some(r) => format!("use of unstable library feature '{}': {}", feature, r),
+ Some(r) => format!("use of unstable library feature '{feature}': {r}"),
None => format!("use of unstable library feature '{}'", &feature),
};
@@ -170,7 +170,7 @@ pub fn deprecation_suggestion(
if let Some(suggestion) = suggestion {
diag.span_suggestion_verbose(
span,
- format!("replace the use of the deprecated {}", kind),
+ format!("replace the use of the deprecated {kind}"),
suggestion,
Applicability::MachineApplicable,
);
@@ -189,12 +189,12 @@ fn deprecation_message(
path: &str,
) -> String {
let message = if is_in_effect {
- format!("use of deprecated {} `{}`", kind, path)
+ format!("use of deprecated {kind} `{path}`")
} else {
let since = since.as_ref().map(Symbol::as_str);
if since == Some("TBD") {
- format!("use of {} `{}` that will be deprecated in a future Rust version", kind, path)
+ format!("use of {kind} `{path}` that will be deprecated in a future Rust version")
} else {
format!(
"use of {} `{}` that will be deprecated in future version {}",
@@ -206,7 +206,7 @@ fn deprecation_message(
};
match note {
- Some(reason) => format!("{}: {}", message, reason),
+ Some(reason) => format!("{message}: {reason}"),
None => message,
}
}
@@ -312,7 +312,7 @@ fn suggestion_for_allocator_api(
return Some((
inner_types,
"consider wrapping the inner types in tuple".to_string(),
- format!("({})", snippet),
+ format!("({snippet})"),
Applicability::MaybeIncorrect,
));
}
@@ -599,7 +599,7 @@ impl<'tcx> TyCtxt<'tcx> {
|span, def_id| {
// The API could be uncallable for other reasons, for example when a private module
// was referenced.
- self.sess.delay_span_bug(span, format!("encountered unmarked API: {:?}", def_id));
+ self.sess.delay_span_bug(span, format!("encountered unmarked API: {def_id:?}"));
},
)
}
diff --git a/compiler/rustc_middle/src/mir/basic_blocks.rs b/compiler/rustc_middle/src/mir/basic_blocks.rs
index 7722e7b47..0ad17e819 100644
--- a/compiler/rustc_middle/src/mir/basic_blocks.rs
+++ b/compiler/rustc_middle/src/mir/basic_blocks.rs
@@ -178,9 +178,7 @@ impl<'tcx> graph::WithPredecessors for BasicBlocks<'tcx> {
}
}
-TrivialTypeTraversalAndLiftImpls! {
- Cache,
-}
+TrivialTypeTraversalAndLiftImpls! { Cache }
impl<S: Encoder> Encodable<S> for Cache {
#[inline]
diff --git a/compiler/rustc_middle/src/mir/coverage.rs b/compiler/rustc_middle/src/mir/coverage.rs
index db24dae11..1efb54bdb 100644
--- a/compiler/rustc_middle/src/mir/coverage.rs
+++ b/compiler/rustc_middle/src/mir/coverage.rs
@@ -6,69 +6,43 @@ use rustc_span::Symbol;
use std::fmt::{self, Debug, Formatter};
rustc_index::newtype_index! {
- /// An ExpressionOperandId value is assigned directly from either a
- /// CounterValueReference.as_u32() (which ascend from 1) or an ExpressionOperandId.as_u32()
- /// (which _*descend*_ from u32::MAX). Id value `0` (zero) represents a virtual counter with a
- /// constant value of `0`.
- #[derive(HashStable)]
- #[max = 0xFFFF_FFFF]
- #[debug_format = "ExpressionOperandId({})"]
- pub struct ExpressionOperandId {
- }
-}
-
-impl ExpressionOperandId {
- /// An expression operand for a "zero counter", as described in the following references:
- ///
- /// * <https://github.com/rust-lang/llvm-project/blob/rustc/13.0-2021-09-30/llvm/docs/CoverageMappingFormat.rst#counter>
- /// * <https://github.com/rust-lang/llvm-project/blob/rustc/13.0-2021-09-30/llvm/docs/CoverageMappingFormat.rst#tag>
- /// * <https://github.com/rust-lang/llvm-project/blob/rustc/13.0-2021-09-30/llvm/docs/CoverageMappingFormat.rst#counter-expressions>
+ /// ID of a coverage counter. Values ascend from 0.
///
- /// This operand can be used to count two or more separate code regions with a single counter,
- /// if they run sequentially with no branches, by injecting the `Counter` in a `BasicBlock` for
- /// one of the code regions, and inserting `CounterExpression`s ("add ZERO to the counter") in
- /// the coverage map for the other code regions.
- pub const ZERO: Self = Self::from_u32(0);
-}
-
-rustc_index::newtype_index! {
+ /// Note that LLVM handles counter IDs as `uint32_t`, so there is no need
+ /// to use a larger representation on the Rust side.
#[derive(HashStable)]
#[max = 0xFFFF_FFFF]
- #[debug_format = "CounterValueReference({})"]
- pub struct CounterValueReference {}
+ #[debug_format = "CounterId({})"]
+ pub struct CounterId {}
}
-impl CounterValueReference {
- /// Counters start at 1 to reserve 0 for ExpressionOperandId::ZERO.
- pub const START: Self = Self::from_u32(1);
+impl CounterId {
+ pub const START: Self = Self::from_u32(0);
- /// Returns explicitly-requested zero-based version of the counter id, used
- /// during codegen. LLVM expects zero-based indexes.
- pub fn zero_based_index(self) -> u32 {
- let one_based_index = self.as_u32();
- debug_assert!(one_based_index > 0);
- one_based_index - 1
+ #[inline(always)]
+ pub fn next_id(self) -> Self {
+ Self::from_u32(self.as_u32() + 1)
}
}
rustc_index::newtype_index! {
- /// InjectedExpressionId.as_u32() converts to ExpressionOperandId.as_u32()
+ /// ID of a coverage-counter expression. Values ascend from 0.
///
- /// Values descend from u32::MAX.
+ /// Note that LLVM handles expression IDs as `uint32_t`, so there is no need
+ /// to use a larger representation on the Rust side.
#[derive(HashStable)]
#[max = 0xFFFF_FFFF]
- #[debug_format = "InjectedExpressionId({})"]
- pub struct InjectedExpressionId {}
+ #[debug_format = "ExpressionId({})"]
+ pub struct ExpressionId {}
}
-rustc_index::newtype_index! {
- /// InjectedExpressionIndex.as_u32() translates to u32::MAX - ExpressionOperandId.as_u32()
- ///
- /// Values ascend from 0.
- #[derive(HashStable)]
- #[max = 0xFFFF_FFFF]
- #[debug_format = "InjectedExpressionIndex({})"]
- pub struct InjectedExpressionIndex {}
+impl ExpressionId {
+ pub const START: Self = Self::from_u32(0);
+
+ #[inline(always)]
+ pub fn next_id(self) -> Self {
+ Self::from_u32(self.as_u32() + 1)
+ }
}
rustc_index::newtype_index! {
@@ -81,17 +55,25 @@ rustc_index::newtype_index! {
pub struct MappedExpressionIndex {}
}
-impl From<CounterValueReference> for ExpressionOperandId {
- #[inline]
- fn from(v: CounterValueReference) -> ExpressionOperandId {
- ExpressionOperandId::from(v.as_u32())
- }
+/// Operand of a coverage-counter expression.
+///
+/// Operands can be a constant zero value, an actual coverage counter, or another
+/// expression. Counter/expression operands are referred to by ID.
+#[derive(Copy, Clone, PartialEq, Eq)]
+#[derive(TyEncodable, TyDecodable, Hash, HashStable, TypeFoldable, TypeVisitable)]
+pub enum Operand {
+ Zero,
+ Counter(CounterId),
+ Expression(ExpressionId),
}
-impl From<InjectedExpressionId> for ExpressionOperandId {
- #[inline]
- fn from(v: InjectedExpressionId) -> ExpressionOperandId {
- ExpressionOperandId::from(v.as_u32())
+impl Debug for Operand {
+ fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result {
+ match self {
+ Self::Zero => write!(f, "Zero"),
+ Self::Counter(id) => f.debug_tuple("Counter").field(&id.as_u32()).finish(),
+ Self::Expression(id) => f.debug_tuple("Expression").field(&id.as_u32()).finish(),
+ }
}
}
@@ -99,32 +81,21 @@ impl From<InjectedExpressionId> for ExpressionOperandId {
pub enum CoverageKind {
Counter {
function_source_hash: u64,
- id: CounterValueReference,
+ /// ID of this counter within its enclosing function.
+ /// Expressions in the same function can refer to it as an operand.
+ id: CounterId,
},
Expression {
- id: InjectedExpressionId,
- lhs: ExpressionOperandId,
+ /// ID of this coverage-counter expression within its enclosing function.
+ /// Other expressions in the same function can refer to it as an operand.
+ id: ExpressionId,
+ lhs: Operand,
op: Op,
- rhs: ExpressionOperandId,
+ rhs: Operand,
},
Unreachable,
}
-impl CoverageKind {
- pub fn as_operand_id(&self) -> ExpressionOperandId {
- use CoverageKind::*;
- match *self {
- Counter { id, .. } => ExpressionOperandId::from(id),
- Expression { id, .. } => ExpressionOperandId::from(id),
- Unreachable => bug!("Unreachable coverage cannot be part of an expression"),
- }
- }
-
- pub fn is_expression(&self) -> bool {
- matches!(self, Self::Expression { .. })
- }
-}
-
impl Debug for CoverageKind {
fn fmt(&self, fmt: &mut Formatter<'_>) -> fmt::Result {
use CoverageKind::*;
@@ -132,14 +103,14 @@ impl Debug for CoverageKind {
Counter { id, .. } => write!(fmt, "Counter({:?})", id.index()),
Expression { id, lhs, op, rhs } => write!(
fmt,
- "Expression({:?}) = {} {} {}",
+ "Expression({:?}) = {:?} {} {:?}",
id.index(),
- lhs.index(),
+ lhs,
match op {
Op::Add => "+",
Op::Subtract => "-",
},
- rhs.index(),
+ rhs,
),
Unreachable => write!(fmt, "Unreachable"),
}
diff --git a/compiler/rustc_middle/src/mir/generic_graph.rs b/compiler/rustc_middle/src/mir/generic_graph.rs
index d1f3561c0..d1753427e 100644
--- a/compiler/rustc_middle/src/mir/generic_graph.rs
+++ b/compiler/rustc_middle/src/mir/generic_graph.rs
@@ -7,7 +7,7 @@ use rustc_middle::ty::TyCtxt;
pub fn mir_fn_to_generic_graph<'tcx>(tcx: TyCtxt<'tcx>, body: &Body<'_>) -> Graph {
let def_id = body.source.def_id();
let def_name = graphviz_safe_def_name(def_id);
- let graph_name = format!("Mir_{}", def_name);
+ let graph_name = format!("Mir_{def_name}");
let dark_mode = tcx.sess.opts.unstable_opts.graphviz_dark_mode;
// Nodes
@@ -48,7 +48,7 @@ fn bb_to_graph_node(block: BasicBlock, body: &Body<'_>, dark_mode: bool) -> Node
};
let style = NodeStyle { title_bg: Some(bgcolor.to_owned()), ..Default::default() };
- let mut stmts: Vec<String> = data.statements.iter().map(|x| format!("{:?}", x)).collect();
+ let mut stmts: Vec<String> = data.statements.iter().map(|x| format!("{x:?}")).collect();
// add the terminator to the stmts, gsgdt can print it out separately
let mut terminator_head = String::new();
diff --git a/compiler/rustc_middle/src/mir/generic_graphviz.rs b/compiler/rustc_middle/src/mir/generic_graphviz.rs
index ccae7e159..299b50525 100644
--- a/compiler/rustc_middle/src/mir/generic_graphviz.rs
+++ b/compiler/rustc_middle/src/mir/generic_graphviz.rs
@@ -70,8 +70,8 @@ impl<
writeln!(w, r#" graph [{}];"#, graph_attrs.join(" "))?;
let content_attrs_str = content_attrs.join(" ");
- writeln!(w, r#" node [{}];"#, content_attrs_str)?;
- writeln!(w, r#" edge [{}];"#, content_attrs_str)?;
+ writeln!(w, r#" node [{content_attrs_str}];"#)?;
+ writeln!(w, r#" edge [{content_attrs_str}];"#)?;
// Graph label
if let Some(graph_label) = &self.graph_label {
@@ -112,7 +112,7 @@ impl<
// (format!("{:?}", node), color)
// };
let color = if dark_mode { "dimgray" } else { "gray" };
- let (blk, bgcolor) = (format!("{:?}", node), color);
+ let (blk, bgcolor) = (format!("{node:?}"), color);
write!(
w,
r#"<tr><td bgcolor="{bgcolor}" {attrs} colspan="{colspan}">{blk}</td></tr>"#,
@@ -151,7 +151,7 @@ impl<
} else {
"".to_owned()
};
- writeln!(w, r#" {} -> {} [label=<{}>];"#, src, trg, escaped_edge_label)?;
+ writeln!(w, r#" {src} -> {trg} [label=<{escaped_edge_label}>];"#)?;
}
Ok(())
}
@@ -163,7 +163,7 @@ impl<
W: Write,
{
let escaped_label = dot::escape_html(label);
- writeln!(w, r#" label=<<br/><br/>{}<br align="left"/><br/><br/><br/>>;"#, escaped_label)
+ writeln!(w, r#" label=<<br/><br/>{escaped_label}<br align="left"/><br/><br/><br/>>;"#)
}
fn node(&self, node: G::Node) -> String {
diff --git a/compiler/rustc_middle/src/mir/graphviz.rs b/compiler/rustc_middle/src/mir/graphviz.rs
index 2de73db3a..5c7de8644 100644
--- a/compiler/rustc_middle/src/mir/graphviz.rs
+++ b/compiler/rustc_middle/src/mir/graphviz.rs
@@ -127,5 +127,5 @@ fn write_graph_label<'tcx, W: std::fmt::Write>(
}
fn escape<T: Debug>(t: &T) -> String {
- dot::escape_html(&format!("{:?}", t))
+ dot::escape_html(&format!("{t:?}"))
}
diff --git a/compiler/rustc_middle/src/mir/interpret/allocation.rs b/compiler/rustc_middle/src/mir/interpret/allocation.rs
index b8030d9db..c787481bf 100644
--- a/compiler/rustc_middle/src/mir/interpret/allocation.rs
+++ b/compiler/rustc_middle/src/mir/interpret/allocation.rs
@@ -18,9 +18,9 @@ use rustc_span::DUMMY_SP;
use rustc_target::abi::{Align, HasDataLayout, Size};
use super::{
- read_target_uint, write_target_uint, AllocId, InterpError, InterpResult, Pointer, Provenance,
- ResourceExhaustionInfo, Scalar, ScalarSizeMismatch, UndefinedBehaviorInfo, UninitBytesAccess,
- UnsupportedOpInfo,
+ read_target_uint, write_target_uint, AllocId, BadBytesAccess, InterpError, InterpResult,
+ Pointer, PointerArithmetic, Provenance, ResourceExhaustionInfo, Scalar, ScalarSizeMismatch,
+ UndefinedBehaviorInfo, UnsupportedOpInfo,
};
use crate::ty;
use init_mask::*;
@@ -173,13 +173,13 @@ pub enum AllocError {
/// A scalar had the wrong size.
ScalarSizeMismatch(ScalarSizeMismatch),
/// Encountered a pointer where we needed raw bytes.
- ReadPointerAsBytes,
+ ReadPointerAsInt(Option<BadBytesAccess>),
/// Partially overwriting a pointer.
- PartialPointerOverwrite(Size),
+ OverwritePartialPointer(Size),
/// Partially copying a pointer.
- PartialPointerCopy(Size),
+ ReadPartialPointer(Size),
/// Using uninitialized data where it is not allowed.
- InvalidUninitBytes(Option<UninitBytesAccess>),
+ InvalidUninitBytes(Option<BadBytesAccess>),
}
pub type AllocResult<T = ()> = Result<T, AllocError>;
@@ -196,12 +196,14 @@ impl AllocError {
ScalarSizeMismatch(s) => {
InterpError::UndefinedBehavior(UndefinedBehaviorInfo::ScalarSizeMismatch(s))
}
- ReadPointerAsBytes => InterpError::Unsupported(UnsupportedOpInfo::ReadPointerAsBytes),
- PartialPointerOverwrite(offset) => InterpError::Unsupported(
- UnsupportedOpInfo::PartialPointerOverwrite(Pointer::new(alloc_id, offset)),
+ ReadPointerAsInt(info) => InterpError::Unsupported(
+ UnsupportedOpInfo::ReadPointerAsInt(info.map(|b| (alloc_id, b))),
),
- PartialPointerCopy(offset) => InterpError::Unsupported(
- UnsupportedOpInfo::PartialPointerCopy(Pointer::new(alloc_id, offset)),
+ OverwritePartialPointer(offset) => InterpError::Unsupported(
+ UnsupportedOpInfo::OverwritePartialPointer(Pointer::new(alloc_id, offset)),
+ ),
+ ReadPartialPointer(offset) => InterpError::Unsupported(
+ UnsupportedOpInfo::ReadPartialPointer(Pointer::new(alloc_id, offset)),
),
InvalidUninitBytes(info) => InterpError::UndefinedBehavior(
UndefinedBehaviorInfo::InvalidUninitBytes(info.map(|b| (alloc_id, b))),
@@ -327,6 +329,9 @@ impl<Prov: Provenance, Bytes: AllocBytes> Allocation<Prov, (), Bytes> {
/// Try to create an Allocation of `size` bytes, panics if there is not enough memory
/// available to the compiler to do so.
+ ///
+ /// Example use case: To obtain an Allocation filled with specific data,
+ /// first call this function and then call write_scalar to fill in the right data.
pub fn uninit(size: Size, align: Align) -> Self {
match Self::uninit_inner(size, align, || {
panic!("Allocation::uninit called with panic_on_fail had allocation failure");
@@ -433,14 +438,26 @@ impl<Prov: Provenance, Extra, Bytes: AllocBytes> Allocation<Prov, Extra, Bytes>
range: AllocRange,
) -> AllocResult<&[u8]> {
self.init_mask.is_range_initialized(range).map_err(|uninit_range| {
- AllocError::InvalidUninitBytes(Some(UninitBytesAccess {
+ AllocError::InvalidUninitBytes(Some(BadBytesAccess {
access: range,
- uninit: uninit_range,
+ bad: uninit_range,
}))
})?;
if !Prov::OFFSET_IS_ADDR {
if !self.provenance.range_empty(range, cx) {
- return Err(AllocError::ReadPointerAsBytes);
+ // Find the provenance.
+ let (offset, _prov) = self
+ .provenance
+ .range_get_ptrs(range, cx)
+ .first()
+ .copied()
+ .expect("there must be provenance somewhere here");
+ let start = offset.max(range.start); // the pointer might begin before `range`!
+ let end = (offset + cx.pointer_size()).min(range.end()); // the pointer might end after `range`!
+ return Err(AllocError::ReadPointerAsInt(Some(BadBytesAccess {
+ access: range,
+ bad: AllocRange::from(start..end),
+ })));
}
}
Ok(self.get_bytes_unchecked(range))
@@ -536,23 +553,25 @@ impl<Prov: Provenance, Extra, Bytes: AllocBytes> Allocation<Prov, Extra, Bytes>
// Now use this provenance.
let ptr = Pointer::new(prov, Size::from_bytes(bits));
return Ok(Scalar::from_maybe_pointer(ptr, cx));
+ } else {
+ // Without OFFSET_IS_ADDR, the only remaining case we can handle is total absence of
+ // provenance.
+ if self.provenance.range_empty(range, cx) {
+ return Ok(Scalar::from_uint(bits, range.size));
+ }
+ // Else we have mixed provenance, that doesn't work.
+ return Err(AllocError::ReadPartialPointer(range.start));
}
} else {
// We are *not* reading a pointer.
- // If we can just ignore provenance, do exactly that.
- if Prov::OFFSET_IS_ADDR {
+ // If we can just ignore provenance or there is none, that's easy.
+ if Prov::OFFSET_IS_ADDR || self.provenance.range_empty(range, cx) {
// We just strip provenance.
return Ok(Scalar::from_uint(bits, range.size));
}
+ // There is some provenance and we don't have OFFSET_IS_ADDR. This doesn't work.
+ return Err(AllocError::ReadPointerAsInt(None));
}
-
- // Fallback path for when we cannot treat provenance bytewise or ignore it.
- assert!(!Prov::OFFSET_IS_ADDR);
- if !self.provenance.range_empty(range, cx) {
- return Err(AllocError::ReadPointerAsBytes);
- }
- // There is no provenance, we can just return the bits.
- Ok(Scalar::from_uint(bits, range.size))
}
/// Writes a *non-ZST* scalar.
@@ -571,7 +590,7 @@ impl<Prov: Provenance, Extra, Bytes: AllocBytes> Allocation<Prov, Extra, Bytes>
assert!(self.mutability == Mutability::Mut);
// `to_bits_or_ptr_internal` is the right method because we just want to store this data
- // as-is into memory.
+ // as-is into memory. This also double-checks that `val.size()` matches `range.size`.
let (bytes, provenance) = match val.to_bits_or_ptr_internal(range.size)? {
Right(ptr) => {
let (provenance, offset) = ptr.into_parts();
diff --git a/compiler/rustc_middle/src/mir/interpret/allocation/init_mask.rs b/compiler/rustc_middle/src/mir/interpret/allocation/init_mask.rs
index d4dd56a42..2c6bb908f 100644
--- a/compiler/rustc_middle/src/mir/interpret/allocation/init_mask.rs
+++ b/compiler/rustc_middle/src/mir/interpret/allocation/init_mask.rs
@@ -542,11 +542,7 @@ impl InitMaskMaterialized {
debug_assert_eq!(
result,
find_bit_slow(self, start, end, is_init),
- "optimized implementation of find_bit is wrong for start={:?} end={:?} is_init={} init_mask={:#?}",
- start,
- end,
- is_init,
- self
+ "optimized implementation of find_bit is wrong for start={start:?} end={end:?} is_init={is_init} init_mask={self:#?}"
);
result
diff --git a/compiler/rustc_middle/src/mir/interpret/allocation/provenance_map.rs b/compiler/rustc_middle/src/mir/interpret/allocation/provenance_map.rs
index 318f93e12..0243fc451 100644
--- a/compiler/rustc_middle/src/mir/interpret/allocation/provenance_map.rs
+++ b/compiler/rustc_middle/src/mir/interpret/allocation/provenance_map.rs
@@ -66,7 +66,11 @@ impl<Prov: Provenance> ProvenanceMap<Prov> {
/// Returns all ptr-sized provenance in the given range.
/// If the range has length 0, returns provenance that crosses the edge between `start-1` and
/// `start`.
- fn range_get_ptrs(&self, range: AllocRange, cx: &impl HasDataLayout) -> &[(Size, Prov)] {
+ pub(super) fn range_get_ptrs(
+ &self,
+ range: AllocRange,
+ cx: &impl HasDataLayout,
+ ) -> &[(Size, Prov)] {
// We have to go back `pointer_size - 1` bytes, as that one would still overlap with
// the beginning of this range.
let adjusted_start = Size::from_bytes(
@@ -158,7 +162,7 @@ impl<Prov: Provenance> ProvenanceMap<Prov> {
if first < start {
if !Prov::OFFSET_IS_ADDR {
// We can't split up the provenance into less than a pointer.
- return Err(AllocError::PartialPointerOverwrite(first));
+ return Err(AllocError::OverwritePartialPointer(first));
}
// Insert the remaining part in the bytewise provenance.
let prov = self.ptrs[&first];
@@ -171,7 +175,7 @@ impl<Prov: Provenance> ProvenanceMap<Prov> {
let begin_of_last = last - cx.data_layout().pointer_size;
if !Prov::OFFSET_IS_ADDR {
// We can't split up the provenance into less than a pointer.
- return Err(AllocError::PartialPointerOverwrite(begin_of_last));
+ return Err(AllocError::OverwritePartialPointer(begin_of_last));
}
// Insert the remaining part in the bytewise provenance.
let prov = self.ptrs[&begin_of_last];
@@ -246,10 +250,10 @@ impl<Prov: Provenance> ProvenanceMap<Prov> {
if !Prov::OFFSET_IS_ADDR {
// There can't be any bytewise provenance, and we cannot split up the begin/end overlap.
if let Some(entry) = begin_overlap {
- return Err(AllocError::PartialPointerCopy(entry.0));
+ return Err(AllocError::ReadPartialPointer(entry.0));
}
if let Some(entry) = end_overlap {
- return Err(AllocError::PartialPointerCopy(entry.0));
+ return Err(AllocError::ReadPartialPointer(entry.0));
}
debug_assert!(self.bytes.is_none());
} else {
diff --git a/compiler/rustc_middle/src/mir/interpret/error.rs b/compiler/rustc_middle/src/mir/interpret/error.rs
index 2435bc59e..e6ef5a41e 100644
--- a/compiler/rustc_middle/src/mir/interpret/error.rs
+++ b/compiler/rustc_middle/src/mir/interpret/error.rs
@@ -12,7 +12,8 @@ use rustc_errors::{
use rustc_macros::HashStable;
use rustc_session::CtfeBacktrace;
use rustc_span::def_id::DefId;
-use rustc_target::abi::{call, Align, Size, WrappingRange};
+use rustc_target::abi::{call, Align, Size, VariantIdx, WrappingRange};
+
use std::borrow::Cow;
use std::{any::Any, backtrace::Backtrace, fmt};
@@ -22,7 +23,7 @@ pub enum ErrorHandled {
/// *guaranteed* to fail. Warnings/lints *must not* produce `Reported`.
Reported(ReportedErrorInfo),
/// Don't emit an error, the evaluation failed because the MIR was generic
- /// and the substs didn't fully monomorphize it.
+ /// and the args didn't fully monomorphize it.
TooGeneric,
}
@@ -66,9 +67,7 @@ impl Into<ErrorGuaranteed> for ReportedErrorInfo {
}
}
-TrivialTypeTraversalAndLiftImpls! {
- ErrorHandled,
-}
+TrivialTypeTraversalAndLiftImpls! { ErrorHandled }
pub type EvalToAllocationRawResult<'tcx> = Result<ConstAlloc<'tcx>, ErrorHandled>;
pub type EvalToConstValueResult<'tcx> = Result<ConstValue<'tcx>, ErrorHandled>;
@@ -135,10 +134,6 @@ impl InterpErrorBacktrace {
}
impl<'tcx> InterpErrorInfo<'tcx> {
- pub fn from_parts(kind: InterpError<'tcx>, backtrace: InterpErrorBacktrace) -> Self {
- Self(Box::new(InterpErrorInfoInner { kind, backtrace }))
- }
-
pub fn into_parts(self) -> (InterpError<'tcx>, InterpErrorBacktrace) {
let InterpErrorInfo(box InterpErrorInfoInner { kind, backtrace }) = self;
(kind, backtrace)
@@ -156,7 +151,7 @@ impl<'tcx> InterpErrorInfo<'tcx> {
}
fn print_backtrace(backtrace: &Backtrace) {
- eprintln!("\n\nAn error occurred in miri:\n{}", backtrace);
+ eprintln!("\n\nAn error occurred in the MIR interpreter:\n{backtrace}");
}
impl From<ErrorGuaranteed> for InterpErrorInfo<'_> {
@@ -189,11 +184,8 @@ pub enum InvalidProgramInfo<'tcx> {
/// (which unfortunately typeck does not reject).
/// Not using `FnAbiError` as that contains a nested `LayoutError`.
FnAbiAdjustForForeignAbi(call::AdjustForForeignAbiError),
- /// SizeOf of unsized type was requested.
- SizeOfUnsizedType(Ty<'tcx>),
- /// An unsized local was accessed without having been initialized.
- /// This is not meaningful as we can't even have backing memory for such locals.
- UninitUnsizedLocal,
+ /// We are runnning into a nonsense situation due to ConstProp violating our invariants.
+ ConstPropNonsense,
}
/// Details of why a pointer had to be in-bounds.
@@ -228,13 +220,13 @@ impl IntoDiagnosticArg for InvalidMetaKind {
}
}
-/// Details of an access to uninitialized bytes where it is not allowed.
+/// Details of an access to uninitialized bytes / bad pointer bytes where it is not allowed.
#[derive(Debug, Clone, Copy)]
-pub struct UninitBytesAccess {
+pub struct BadBytesAccess {
/// Range of the original memory access.
pub access: AllocRange,
- /// Range of the uninit memory that was encountered. (Might not be maximal.)
- pub uninit: AllocRange,
+ /// Range of the bad memory that was encountered. (Might not be maximal.)
+ pub bad: AllocRange,
}
/// Information about a size mismatch.
@@ -284,8 +276,8 @@ pub enum UndefinedBehaviorInfo<'a> {
InvalidMeta(InvalidMetaKind),
/// Reading a C string that does not end within its allocation.
UnterminatedCString(Pointer),
- /// Dereferencing a dangling pointer after it got freed.
- PointerUseAfterFree(AllocId),
+ /// Using a pointer after it got freed.
+ PointerUseAfterFree(AllocId, CheckInAllocMsg),
/// Used a pointer outside the bounds it is valid for.
/// (If `ptr_size > 0`, determines the size of the memory range that was expected to be in-bounds.)
PointerOutOfBounds {
@@ -318,15 +310,17 @@ pub enum UndefinedBehaviorInfo<'a> {
/// Using a string that is not valid UTF-8,
InvalidStr(std::str::Utf8Error),
/// Using uninitialized data where it is not allowed.
- InvalidUninitBytes(Option<(AllocId, UninitBytesAccess)>),
+ InvalidUninitBytes(Option<(AllocId, BadBytesAccess)>),
/// Working with a local that is not currently live.
DeadLocal,
/// Data size is not equal to target size.
ScalarSizeMismatch(ScalarSizeMismatch),
/// A discriminant of an uninhabited enum variant is written.
- UninhabitedEnumVariantWritten,
+ UninhabitedEnumVariantWritten(VariantIdx),
+ /// An uninhabited enum variant is projected.
+ UninhabitedEnumVariantRead(VariantIdx),
/// Validation error.
- Validation(ValidationErrorInfo<'a>),
+ ValidationError(ValidationErrorInfo<'a>),
// FIXME(fee1-dead) these should all be actual variants of the enum instead of dynamically
// dispatched
/// A custom (free-form) error, created by `err_ub_custom!`.
@@ -368,6 +362,8 @@ pub enum ExpectedKind {
Float,
Int,
FnPtr,
+ EnumTag,
+ Str,
}
impl From<PointerKind> for ExpectedKind {
@@ -381,10 +377,11 @@ impl From<PointerKind> for ExpectedKind {
#[derive(Debug)]
pub enum ValidationErrorKind<'tcx> {
+ PointerAsInt { expected: ExpectedKind },
+ PartialPointer,
PtrToUninhabited { ptr_kind: PointerKind, ty: Ty<'tcx> },
PtrToStatic { ptr_kind: PointerKind },
PtrToMut { ptr_kind: PointerKind },
- ExpectedNonPtr { value: String },
MutableRefInConst,
NullFnPtr,
NeverVal,
@@ -394,10 +391,8 @@ pub enum ValidationErrorKind<'tcx> {
UnsafeCell,
UninhabitedVal { ty: Ty<'tcx> },
InvalidEnumTag { value: String },
- UninitEnumTag,
- UninitStr,
+ UninhabitedEnumVariant,
Uninit { expected: ExpectedKind },
- UninitVal,
InvalidVTablePtr { value: String },
InvalidMetaSliceTooLarge { ptr_kind: PointerKind },
InvalidMetaTooLarge { ptr_kind: PointerKind },
@@ -425,12 +420,12 @@ pub enum UnsupportedOpInfo {
//
/// Overwriting parts of a pointer; without knowing absolute addresses, the resulting state
/// cannot be represented by the CTFE interpreter.
- PartialPointerOverwrite(Pointer<AllocId>),
- /// Attempting to `copy` parts of a pointer to somewhere else; without knowing absolute
+ OverwritePartialPointer(Pointer<AllocId>),
+ /// Attempting to read or copy parts of a pointer to somewhere else; without knowing absolute
/// addresses, the resulting state cannot be represented by the CTFE interpreter.
- PartialPointerCopy(Pointer<AllocId>),
- /// Encountered a pointer where we needed raw bytes.
- ReadPointerAsBytes,
+ ReadPartialPointer(Pointer<AllocId>),
+ /// Encountered a pointer where we needed an integer.
+ ReadPointerAsInt(Option<(AllocId, BadBytesAccess)>),
/// Accessing thread local statics
ThreadLocalStatic(DefId),
/// Accessing an unsupported extern static.
@@ -496,7 +491,7 @@ impl InterpError<'_> {
matches!(
self,
InterpError::Unsupported(UnsupportedOpInfo::Unsupported(_))
- | InterpError::UndefinedBehavior(UndefinedBehaviorInfo::Validation { .. })
+ | InterpError::UndefinedBehavior(UndefinedBehaviorInfo::ValidationError { .. })
| InterpError::UndefinedBehavior(UndefinedBehaviorInfo::Ub(_))
)
}
diff --git a/compiler/rustc_middle/src/mir/interpret/mod.rs b/compiler/rustc_middle/src/mir/interpret/mod.rs
index 2d2cfee1b..3543158bf 100644
--- a/compiler/rustc_middle/src/mir/interpret/mod.rs
+++ b/compiler/rustc_middle/src/mir/interpret/mod.rs
@@ -138,15 +138,15 @@ use rustc_target::abi::{AddressSpace, Endian, HasDataLayout};
use crate::mir;
use crate::ty::codec::{TyDecoder, TyEncoder};
-use crate::ty::subst::GenericArgKind;
+use crate::ty::GenericArgKind;
use crate::ty::{self, Instance, Ty, TyCtxt};
pub use self::error::{
- struct_error, CheckInAllocMsg, ErrorHandled, EvalToAllocationRawResult, EvalToConstValueResult,
- EvalToValTreeResult, ExpectedKind, InterpError, InterpErrorInfo, InterpResult, InvalidMetaKind,
- InvalidProgramInfo, MachineStopType, PointerKind, ReportedErrorInfo, ResourceExhaustionInfo,
- ScalarSizeMismatch, UndefinedBehaviorInfo, UninitBytesAccess, UnsupportedOpInfo,
- ValidationErrorInfo, ValidationErrorKind,
+ struct_error, BadBytesAccess, CheckInAllocMsg, ErrorHandled, EvalToAllocationRawResult,
+ EvalToConstValueResult, EvalToValTreeResult, ExpectedKind, InterpError, InterpErrorInfo,
+ InterpResult, InvalidMetaKind, InvalidProgramInfo, MachineStopType, PointerKind,
+ ReportedErrorInfo, ResourceExhaustionInfo, ScalarSizeMismatch, UndefinedBehaviorInfo,
+ UnsupportedOpInfo, ValidationErrorInfo, ValidationErrorKind,
};
pub use self::value::{get_slice_bytes, ConstAlloc, ConstValue, Scalar};
@@ -176,7 +176,7 @@ impl<'tcx> GlobalId<'tcx> {
pub fn display(self, tcx: TyCtxt<'tcx>) -> String {
let instance_name = with_no_trimmed_paths!(tcx.def_path_str(self.instance.def.def_id()));
if let Some(promoted) = self.promoted {
- format!("{}::{:?}", instance_name, promoted)
+ format!("{instance_name}::{promoted:?}")
} else {
instance_name
}
@@ -274,7 +274,7 @@ pub struct AllocDecodingState {
// For each `AllocId`, we keep track of which decoding state it's currently in.
decoding_state: Vec<Lock<State>>,
// The offsets of each allocation in the data stream.
- data_offsets: Vec<u32>,
+ data_offsets: Vec<u64>,
}
impl AllocDecodingState {
@@ -289,7 +289,7 @@ impl AllocDecodingState {
AllocDecodingSession { state: self, session_id }
}
- pub fn new(data_offsets: Vec<u32>) -> Self {
+ pub fn new(data_offsets: Vec<u64>) -> Self {
let decoding_state =
std::iter::repeat_with(|| Lock::new(State::Empty)).take(data_offsets.len()).collect();
@@ -559,7 +559,7 @@ impl<'tcx> TyCtxt<'tcx> {
// However, formatting code relies on function identity (see #58320), so we only do
// this for generic functions. Lifetime parameters are ignored.
let is_generic = instance
- .substs
+ .args
.into_iter()
.any(|kind| !matches!(kind.unpack(), GenericArgKind::Lifetime(_)));
if is_generic {
@@ -609,7 +609,7 @@ impl<'tcx> TyCtxt<'tcx> {
/// Panics in case the `AllocId` is dangling. Since that is impossible for `AllocId`s in
/// constants (as all constants must pass interning and validation that check for dangling
/// ids), this function is frequently used throughout rustc, but should not be used within
- /// the miri engine.
+ /// the interpreter.
pub fn global_alloc(self, id: AllocId) -> GlobalAlloc<'tcx> {
match self.try_get_global_alloc(id) {
Some(alloc) => alloc,
diff --git a/compiler/rustc_middle/src/mir/interpret/queries.rs b/compiler/rustc_middle/src/mir/interpret/queries.rs
index 9c97431f3..fc659ce18 100644
--- a/compiler/rustc_middle/src/mir/interpret/queries.rs
+++ b/compiler/rustc_middle/src/mir/interpret/queries.rs
@@ -2,8 +2,8 @@ use super::{ErrorHandled, EvalToConstValueResult, EvalToValTreeResult, GlobalId}
use crate::mir;
use crate::query::{TyCtxtAt, TyCtxtEnsure};
-use crate::ty::subst::InternalSubsts;
use crate::ty::visit::TypeVisitableExt;
+use crate::ty::GenericArgs;
use crate::ty::{self, TyCtxt};
use rustc_hir::def::DefKind;
use rustc_hir::def_id::DefId;
@@ -20,8 +20,8 @@ impl<'tcx> TyCtxt<'tcx> {
// to be used. So we can't use `Instance::mono`, instead we feed unresolved substitutions
// into `const_eval` which will return `ErrorHandled::ToGeneric` if any of them are
// encountered.
- let substs = InternalSubsts::identity_for_item(self, def_id);
- let instance = ty::Instance::new(def_id, substs);
+ let args = GenericArgs::identity_for_item(self, def_id);
+ let instance = ty::Instance::new(def_id, args);
let cid = GlobalId { instance, promoted: None };
let param_env = self.param_env(def_id).with_reveal_all_normalized(self);
self.const_eval_global_id(param_env, cid, None)
@@ -48,14 +48,14 @@ impl<'tcx> TyCtxt<'tcx> {
//
// When trying to evaluate constants containing inference variables,
// use `Infcx::const_eval_resolve` instead.
- if ct.substs.has_non_region_infer() {
+ if ct.args.has_non_region_infer() {
bug!("did not expect inference variables here");
}
match ty::Instance::resolve(
self, param_env,
// FIXME: maybe have a separate version for resolving mir::UnevaluatedConst?
- ct.def, ct.substs,
+ ct.def, ct.args,
) {
Ok(Some(instance)) => {
let cid = GlobalId { instance, promoted: ct.promoted };
@@ -79,11 +79,11 @@ impl<'tcx> TyCtxt<'tcx> {
//
// When trying to evaluate constants containing inference variables,
// use `Infcx::const_eval_resolve` instead.
- if ct.substs.has_non_region_infer() {
+ if ct.args.has_non_region_infer() {
bug!("did not expect inference variables here");
}
- match ty::Instance::resolve(self, param_env, ct.def, ct.substs) {
+ match ty::Instance::resolve(self, param_env, ct.def, ct.args) {
Ok(Some(instance)) => {
let cid = GlobalId { instance, promoted: None };
self.const_eval_global_id_for_typeck(param_env, cid, span).inspect(|_| {
@@ -94,7 +94,7 @@ impl<'tcx> TyCtxt<'tcx> {
// @lcnr believes that successfully evaluating even though there are
// used generic parameters is a bug of evaluation, so checking for it
// here does feel somewhat sensible.
- if !self.features().generic_const_exprs && ct.substs.has_non_region_param() {
+ if !self.features().generic_const_exprs && ct.args.has_non_region_param() {
let def_kind = self.def_kind(instance.def_id());
assert!(
matches!(
@@ -139,7 +139,6 @@ impl<'tcx> TyCtxt<'tcx> {
cid: GlobalId<'tcx>,
span: Option<Span>,
) -> EvalToConstValueResult<'tcx> {
- let param_env = param_env.with_const();
// Const-eval shouldn't depend on lifetimes at all, so we can erase them, which should
// improve caching of queries.
let inputs = self.erase_regions(param_env.and(cid));
@@ -158,8 +157,6 @@ impl<'tcx> TyCtxt<'tcx> {
cid: GlobalId<'tcx>,
span: Option<Span>,
) -> EvalToValTreeResult<'tcx> {
- let param_env = param_env.with_const();
- debug!(?param_env);
// Const-eval shouldn't depend on lifetimes at all, so we can erase them, which should
// improve caching of queries.
let inputs = self.erase_regions(param_env.and(cid));
@@ -204,7 +201,6 @@ impl<'tcx> TyCtxtAt<'tcx> {
gid: GlobalId<'tcx>,
param_env: ty::ParamEnv<'tcx>,
) -> Result<mir::ConstAllocation<'tcx>, ErrorHandled> {
- let param_env = param_env.with_const();
trace!("eval_to_allocation: Need to compute {:?}", gid);
let raw_const = self.eval_to_allocation_raw(param_env.and(gid))?;
Ok(self.global_alloc(raw_const.alloc_id).unwrap_memory())
@@ -221,11 +217,10 @@ impl<'tcx> TyCtxtEnsure<'tcx> {
// to be used. So we can't use `Instance::mono`, instead we feed unresolved substitutions
// into `const_eval` which will return `ErrorHandled::ToGeneric` if any of them are
// encountered.
- let substs = InternalSubsts::identity_for_item(self.tcx, def_id);
- let instance = ty::Instance::new(def_id, substs);
+ let args = GenericArgs::identity_for_item(self.tcx, def_id);
+ let instance = ty::Instance::new(def_id, args);
let cid = GlobalId { instance, promoted: None };
- let param_env =
- self.tcx.param_env(def_id).with_reveal_all_normalized(self.tcx).with_const();
+ let param_env = self.tcx.param_env(def_id).with_reveal_all_normalized(self.tcx);
// Const-eval shouldn't depend on lifetimes at all, so we can erase them, which should
// improve caching of queries.
let inputs = self.tcx.erase_regions(param_env.and(cid));
@@ -238,7 +233,7 @@ impl<'tcx> TyCtxtEnsure<'tcx> {
assert!(self.tcx.is_static(def_id));
let instance = ty::Instance::mono(self.tcx, def_id);
let gid = GlobalId { instance, promoted: None };
- let param_env = ty::ParamEnv::reveal_all().with_const();
+ let param_env = ty::ParamEnv::reveal_all();
trace!("eval_to_allocation: Need to compute {:?}", gid);
self.eval_to_allocation_raw(param_env.and(gid))
}
diff --git a/compiler/rustc_middle/src/mir/interpret/value.rs b/compiler/rustc_middle/src/mir/interpret/value.rs
index 0416411df..5345a6588 100644
--- a/compiler/rustc_middle/src/mir/interpret/value.rs
+++ b/compiler/rustc_middle/src/mir/interpret/value.rs
@@ -135,8 +135,8 @@ static_assert_size!(Scalar, 24);
impl<Prov: Provenance> fmt::Debug for Scalar<Prov> {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
match self {
- Scalar::Ptr(ptr, _size) => write!(f, "{:?}", ptr),
- Scalar::Int(int) => write!(f, "{:?}", int),
+ Scalar::Ptr(ptr, _size) => write!(f, "{ptr:?}"),
+ Scalar::Int(int) => write!(f, "{int:?}"),
}
}
}
@@ -144,8 +144,8 @@ impl<Prov: Provenance> fmt::Debug for Scalar<Prov> {
impl<Prov: Provenance> fmt::Display for Scalar<Prov> {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
match self {
- Scalar::Ptr(ptr, _size) => write!(f, "pointer to {:?}", ptr),
- Scalar::Int(int) => write!(f, "{}", int),
+ Scalar::Ptr(ptr, _size) => write!(f, "pointer to {ptr:?}"),
+ Scalar::Int(int) => write!(f, "{int}"),
}
}
}
@@ -153,8 +153,8 @@ impl<Prov: Provenance> fmt::Display for Scalar<Prov> {
impl<Prov: Provenance> fmt::LowerHex for Scalar<Prov> {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
match self {
- Scalar::Ptr(ptr, _size) => write!(f, "pointer to {:?}", ptr),
- Scalar::Int(int) => write!(f, "{:#x}", int),
+ Scalar::Ptr(ptr, _size) => write!(f, "pointer to {ptr:?}"),
+ Scalar::Int(int) => write!(f, "{int:#x}"),
}
}
}
@@ -320,6 +320,14 @@ impl<Prov> Scalar<Prov> {
}
})
}
+
+ #[inline]
+ pub fn size(self) -> Size {
+ match self {
+ Scalar::Int(int) => int.size(),
+ Scalar::Ptr(_ptr, sz) => Size::from_bytes(sz),
+ }
+ }
}
impl<'tcx, Prov: Provenance> Scalar<Prov> {
@@ -370,15 +378,16 @@ impl<'tcx, Prov: Provenance> Scalar<Prov> {
#[inline]
pub fn to_bits(self, target_size: Size) -> InterpResult<'tcx, u128> {
assert_ne!(target_size.bytes(), 0, "you should never look at the bits of a ZST");
- self.try_to_int().map_err(|_| err_unsup!(ReadPointerAsBytes))?.to_bits(target_size).map_err(
- |size| {
+ self.try_to_int()
+ .map_err(|_| err_unsup!(ReadPointerAsInt(None)))?
+ .to_bits(target_size)
+ .map_err(|size| {
err_ub!(ScalarSizeMismatch(ScalarSizeMismatch {
target_size: target_size.bytes(),
data_size: size.bytes(),
}))
.into()
- },
- )
+ })
}
#[inline(always)]
diff --git a/compiler/rustc_middle/src/mir/mod.rs b/compiler/rustc_middle/src/mir/mod.rs
index 28c505878..9ef3a1b30 100644
--- a/compiler/rustc_middle/src/mir/mod.rs
+++ b/compiler/rustc_middle/src/mir/mod.rs
@@ -12,7 +12,7 @@ use crate::ty::print::{FmtPrinter, Printer};
use crate::ty::visit::TypeVisitableExt;
use crate::ty::{self, List, Ty, TyCtxt};
use crate::ty::{AdtDef, InstanceDef, ScalarInt, UserTypeAnnotationIndex};
-use crate::ty::{GenericArg, InternalSubsts, SubstsRef};
+use crate::ty::{GenericArg, GenericArgs, GenericArgsRef};
use rustc_data_structures::captures::Captures;
use rustc_errors::{DiagnosticArgValue, DiagnosticMessage, ErrorGuaranteed, IntoDiagnosticArg};
@@ -619,7 +619,7 @@ impl<D: TyDecoder, T: Decodable<D>> Decodable<D> for ClearCrossCrate<T> {
let val = T::decode(d);
ClearCrossCrate::Set(val)
}
- tag => panic!("Invalid tag for ClearCrossCrate: {:?}", tag),
+ tag => panic!("Invalid tag for ClearCrossCrate: {tag:?}"),
}
}
}
@@ -706,9 +706,7 @@ pub enum BindingForm<'tcx> {
RefForGuard,
}
-TrivialTypeTraversalAndLiftImpls! {
- BindingForm<'tcx>,
-}
+TrivialTypeTraversalAndLiftImpls! { BindingForm<'tcx> }
mod binding_form_impl {
use rustc_data_structures::stable_hasher::{HashStable, StableHasher};
@@ -1048,12 +1046,12 @@ pub enum VarDebugInfoContents<'tcx> {
impl<'tcx> Debug for VarDebugInfoContents<'tcx> {
fn fmt(&self, fmt: &mut Formatter<'_>) -> fmt::Result {
match self {
- VarDebugInfoContents::Const(c) => write!(fmt, "{}", c),
- VarDebugInfoContents::Place(p) => write!(fmt, "{:?}", p),
+ VarDebugInfoContents::Const(c) => write!(fmt, "{c}"),
+ VarDebugInfoContents::Place(p) => write!(fmt, "{p:?}"),
VarDebugInfoContents::Composite { ty, fragments } => {
- write!(fmt, "{:?}{{ ", ty)?;
+ write!(fmt, "{ty:?}{{ ")?;
for f in fragments.iter() {
- write!(fmt, "{:?}, ", f)?;
+ write!(fmt, "{f:?}, ")?;
}
write!(fmt, "}}")
}
@@ -1111,10 +1109,6 @@ pub struct VarDebugInfo<'tcx> {
/// originated from (starting from 1). Note, if MIR inlining is enabled, then this is the
/// argument number in the original function before it was inlined.
pub argument_index: Option<u16>,
-
- /// The data represents `name` dereferenced `references` times,
- /// and not the direct value.
- pub references: u8,
}
///////////////////////////////////////////////////////////////////////////
@@ -1317,55 +1311,47 @@ impl<O> AssertKind<O> {
match self {
BoundsCheck { ref len, ref index } => write!(
f,
- "\"index out of bounds: the length is {{}} but the index is {{}}\", {:?}, {:?}",
- len, index
+ "\"index out of bounds: the length is {{}} but the index is {{}}\", {len:?}, {index:?}"
),
OverflowNeg(op) => {
- write!(f, "\"attempt to negate `{{}}`, which would overflow\", {:?}", op)
+ write!(f, "\"attempt to negate `{{}}`, which would overflow\", {op:?}")
}
- DivisionByZero(op) => write!(f, "\"attempt to divide `{{}}` by zero\", {:?}", op),
+ DivisionByZero(op) => write!(f, "\"attempt to divide `{{}}` by zero\", {op:?}"),
RemainderByZero(op) => write!(
f,
- "\"attempt to calculate the remainder of `{{}}` with a divisor of zero\", {:?}",
- op
+ "\"attempt to calculate the remainder of `{{}}` with a divisor of zero\", {op:?}"
),
Overflow(BinOp::Add, l, r) => write!(
f,
- "\"attempt to compute `{{}} + {{}}`, which would overflow\", {:?}, {:?}",
- l, r
+ "\"attempt to compute `{{}} + {{}}`, which would overflow\", {l:?}, {r:?}"
),
Overflow(BinOp::Sub, l, r) => write!(
f,
- "\"attempt to compute `{{}} - {{}}`, which would overflow\", {:?}, {:?}",
- l, r
+ "\"attempt to compute `{{}} - {{}}`, which would overflow\", {l:?}, {r:?}"
),
Overflow(BinOp::Mul, l, r) => write!(
f,
- "\"attempt to compute `{{}} * {{}}`, which would overflow\", {:?}, {:?}",
- l, r
+ "\"attempt to compute `{{}} * {{}}`, which would overflow\", {l:?}, {r:?}"
),
Overflow(BinOp::Div, l, r) => write!(
f,
- "\"attempt to compute `{{}} / {{}}`, which would overflow\", {:?}, {:?}",
- l, r
+ "\"attempt to compute `{{}} / {{}}`, which would overflow\", {l:?}, {r:?}"
),
Overflow(BinOp::Rem, l, r) => write!(
f,
- "\"attempt to compute the remainder of `{{}} % {{}}`, which would overflow\", {:?}, {:?}",
- l, r
+ "\"attempt to compute the remainder of `{{}} % {{}}`, which would overflow\", {l:?}, {r:?}"
),
Overflow(BinOp::Shr, _, r) => {
- write!(f, "\"attempt to shift right by `{{}}`, which would overflow\", {:?}", r)
+ write!(f, "\"attempt to shift right by `{{}}`, which would overflow\", {r:?}")
}
Overflow(BinOp::Shl, _, r) => {
- write!(f, "\"attempt to shift left by `{{}}`, which would overflow\", {:?}", r)
+ write!(f, "\"attempt to shift left by `{{}}`, which would overflow\", {r:?}")
}
MisalignedPointerDereference { required, found } => {
write!(
f,
- "\"misaligned pointer dereference: address must be a multiple of {{}} but is {{}}\", {:?}, {:?}",
- required, found
+ "\"misaligned pointer dereference: address must be a multiple of {{}} but is {{}}\", {required:?}, {found:?}"
)
}
_ => write!(f, "\"{}\"", self.description()),
@@ -1461,9 +1447,9 @@ impl Debug for Statement<'_> {
fn fmt(&self, fmt: &mut Formatter<'_>) -> fmt::Result {
use self::StatementKind::*;
match self.kind {
- Assign(box (ref place, ref rv)) => write!(fmt, "{:?} = {:?}", place, rv),
+ Assign(box (ref place, ref rv)) => write!(fmt, "{place:?} = {rv:?}"),
FakeRead(box (ref cause, ref place)) => {
- write!(fmt, "FakeRead({:?}, {:?})", cause, place)
+ write!(fmt, "FakeRead({cause:?}, {place:?})")
}
Retag(ref kind, ref place) => write!(
fmt,
@@ -1476,20 +1462,20 @@ impl Debug for Statement<'_> {
},
place,
),
- StorageLive(ref place) => write!(fmt, "StorageLive({:?})", place),
- StorageDead(ref place) => write!(fmt, "StorageDead({:?})", place),
+ StorageLive(ref place) => write!(fmt, "StorageLive({place:?})"),
+ StorageDead(ref place) => write!(fmt, "StorageDead({place:?})"),
SetDiscriminant { ref place, variant_index } => {
- write!(fmt, "discriminant({:?}) = {:?}", place, variant_index)
+ write!(fmt, "discriminant({place:?}) = {variant_index:?}")
}
- Deinit(ref place) => write!(fmt, "Deinit({:?})", place),
+ Deinit(ref place) => write!(fmt, "Deinit({place:?})"),
PlaceMention(ref place) => {
- write!(fmt, "PlaceMention({:?})", place)
+ write!(fmt, "PlaceMention({place:?})")
}
AscribeUserType(box (ref place, ref c_ty), ref variance) => {
- write!(fmt, "AscribeUserType({:?}, {:?}, {:?})", place, variance, c_ty)
+ write!(fmt, "AscribeUserType({place:?}, {variance:?}, {c_ty:?})")
}
Coverage(box self::Coverage { ref kind, code_region: Some(ref rgn) }) => {
- write!(fmt, "Coverage::{:?} for {:?}", kind, rgn)
+ write!(fmt, "Coverage::{kind:?} for {rgn:?}")
}
Coverage(box ref coverage) => write!(fmt, "Coverage::{:?}", coverage.kind),
Intrinsic(box ref intrinsic) => write!(fmt, "{intrinsic}"),
@@ -1602,14 +1588,13 @@ impl<'tcx> Place<'tcx> {
self.projection.iter().any(|elem| elem.is_indirect())
}
- /// If MirPhase >= Derefered and if projection contains Deref,
- /// It's guaranteed to be in the first place
- pub fn has_deref(&self) -> bool {
- // To make sure this is not accidentally used in wrong mir phase
- debug_assert!(
- self.projection.is_empty() || !self.projection[1..].contains(&PlaceElem::Deref)
- );
- self.projection.first() == Some(&PlaceElem::Deref)
+ /// Returns `true` if this `Place`'s first projection is `Deref`.
+ ///
+ /// This is useful because for MIR phases `AnalysisPhase::PostCleanup` and later,
+ /// `Deref` projections can only occur as the first projection. In that case this method
+ /// is equivalent to `is_indirect`, but faster.
+ pub fn is_indirect_first_projection(&self) -> bool {
+ self.as_ref().is_indirect_first_projection()
}
/// Finds the innermost `Local` from this `Place`, *if* it is either a local itself or
@@ -1682,9 +1667,16 @@ impl<'tcx> PlaceRef<'tcx> {
self.projection.iter().any(|elem| elem.is_indirect())
}
- /// If MirPhase >= Derefered and if projection contains Deref,
- /// It's guaranteed to be in the first place
- pub fn has_deref(&self) -> bool {
+ /// Returns `true` if this `Place`'s first projection is `Deref`.
+ ///
+ /// This is useful because for MIR phases `AnalysisPhase::PostCleanup` and later,
+ /// `Deref` projections can only occur as the first projection. In that case this method
+ /// is equivalent to `is_indirect`, but faster.
+ pub fn is_indirect_first_projection(&self) -> bool {
+ // To make sure this is not accidentally used in wrong mir phase
+ debug_assert!(
+ self.projection.is_empty() || !self.projection[1..].contains(&PlaceElem::Deref)
+ );
self.projection.first() == Some(&PlaceElem::Deref)
}
@@ -1769,13 +1761,13 @@ impl Debug for Place<'_> {
for elem in self.projection.iter() {
match elem {
ProjectionElem::OpaqueCast(ty) => {
- write!(fmt, " as {})", ty)?;
+ write!(fmt, " as {ty})")?;
}
ProjectionElem::Downcast(Some(name), _index) => {
- write!(fmt, " as {})", name)?;
+ write!(fmt, " as {name})")?;
}
ProjectionElem::Downcast(None, index) => {
- write!(fmt, " as variant#{:?})", index)?;
+ write!(fmt, " as variant#{index:?})")?;
}
ProjectionElem::Deref => {
write!(fmt, ")")?;
@@ -1784,25 +1776,25 @@ impl Debug for Place<'_> {
write!(fmt, ".{:?}: {:?})", field.index(), ty)?;
}
ProjectionElem::Index(ref index) => {
- write!(fmt, "[{:?}]", index)?;
+ write!(fmt, "[{index:?}]")?;
}
ProjectionElem::ConstantIndex { offset, min_length, from_end: false } => {
- write!(fmt, "[{:?} of {:?}]", offset, min_length)?;
+ write!(fmt, "[{offset:?} of {min_length:?}]")?;
}
ProjectionElem::ConstantIndex { offset, min_length, from_end: true } => {
- write!(fmt, "[-{:?} of {:?}]", offset, min_length)?;
+ write!(fmt, "[-{offset:?} of {min_length:?}]")?;
}
ProjectionElem::Subslice { from, to, from_end: true } if to == 0 => {
- write!(fmt, "[{:?}:]", from)?;
+ write!(fmt, "[{from:?}:]")?;
}
ProjectionElem::Subslice { from, to, from_end: true } if from == 0 => {
- write!(fmt, "[:-{:?}]", to)?;
+ write!(fmt, "[:-{to:?}]")?;
}
ProjectionElem::Subslice { from, to, from_end: true } => {
- write!(fmt, "[{:?}:-{:?}]", from, to)?;
+ write!(fmt, "[{from:?}:-{to:?}]")?;
}
ProjectionElem::Subslice { from, to, from_end: false } => {
- write!(fmt, "[{:?}..{:?}]", from, to)?;
+ write!(fmt, "[{from:?}..{to:?}]")?;
}
}
}
@@ -1896,24 +1888,24 @@ impl<'tcx> Debug for Operand<'tcx> {
fn fmt(&self, fmt: &mut Formatter<'_>) -> fmt::Result {
use self::Operand::*;
match *self {
- Constant(ref a) => write!(fmt, "{:?}", a),
- Copy(ref place) => write!(fmt, "{:?}", place),
- Move(ref place) => write!(fmt, "move {:?}", place),
+ Constant(ref a) => write!(fmt, "{a:?}"),
+ Copy(ref place) => write!(fmt, "{place:?}"),
+ Move(ref place) => write!(fmt, "move {place:?}"),
}
}
}
impl<'tcx> Operand<'tcx> {
/// Convenience helper to make a constant that refers to the fn
- /// with given `DefId` and substs. Since this is used to synthesize
+ /// with given `DefId` and args. Since this is used to synthesize
/// MIR, assumes `user_ty` is None.
pub fn function_handle(
tcx: TyCtxt<'tcx>,
def_id: DefId,
- substs: impl IntoIterator<Item = GenericArg<'tcx>>,
+ args: impl IntoIterator<Item = GenericArg<'tcx>>,
span: Span,
) -> Self {
- let ty = Ty::new_fn_def(tcx, def_id, substs);
+ let ty = Ty::new_fn_def(tcx, def_id, args);
Operand::Constant(Box::new(Constant {
span,
user_ty: None,
@@ -1937,11 +1929,11 @@ impl<'tcx> Operand<'tcx> {
let param_env_and_ty = ty::ParamEnv::empty().and(ty);
let type_size = tcx
.layout_of(param_env_and_ty)
- .unwrap_or_else(|e| panic!("could not compute layout for {:?}: {:?}", ty, e))
+ .unwrap_or_else(|e| panic!("could not compute layout for {ty:?}: {e:?}"))
.size;
let scalar_size = match val {
Scalar::Int(int) => int.size(),
- _ => panic!("Invalid scalar type {:?}", val),
+ _ => panic!("Invalid scalar type {val:?}"),
};
scalar_size == type_size
});
@@ -1981,9 +1973,9 @@ impl<'tcx> Operand<'tcx> {
///
/// While this is unlikely in general, it's the normal case of what you'll
/// find as the `func` in a [`TerminatorKind::Call`].
- pub fn const_fn_def(&self) -> Option<(DefId, SubstsRef<'tcx>)> {
+ pub fn const_fn_def(&self) -> Option<(DefId, GenericArgsRef<'tcx>)> {
let const_ty = self.constant()?.literal.ty();
- if let ty::FnDef(def_id, substs) = *const_ty.kind() { Some((def_id, substs)) } else { None }
+ if let ty::FnDef(def_id, args) = *const_ty.kind() { Some((def_id, args)) } else { None }
}
}
@@ -2057,26 +2049,26 @@ impl<'tcx> Debug for Rvalue<'tcx> {
use self::Rvalue::*;
match *self {
- Use(ref place) => write!(fmt, "{:?}", place),
+ Use(ref place) => write!(fmt, "{place:?}"),
Repeat(ref a, b) => {
- write!(fmt, "[{:?}; ", a)?;
+ write!(fmt, "[{a:?}; ")?;
pretty_print_const(b, fmt, false)?;
write!(fmt, "]")
}
- Len(ref a) => write!(fmt, "Len({:?})", a),
+ Len(ref a) => write!(fmt, "Len({a:?})"),
Cast(ref kind, ref place, ref ty) => {
- write!(fmt, "{:?} as {:?} ({:?})", place, ty, kind)
+ write!(fmt, "{place:?} as {ty:?} ({kind:?})")
}
- BinaryOp(ref op, box (ref a, ref b)) => write!(fmt, "{:?}({:?}, {:?})", op, a, b),
+ BinaryOp(ref op, box (ref a, ref b)) => write!(fmt, "{op:?}({a:?}, {b:?})"),
CheckedBinaryOp(ref op, box (ref a, ref b)) => {
- write!(fmt, "Checked{:?}({:?}, {:?})", op, a, b)
+ write!(fmt, "Checked{op:?}({a:?}, {b:?})")
}
- UnaryOp(ref op, ref a) => write!(fmt, "{:?}({:?})", op, a),
- Discriminant(ref place) => write!(fmt, "discriminant({:?})", place),
+ UnaryOp(ref op, ref a) => write!(fmt, "{op:?}({a:?})"),
+ Discriminant(ref place) => write!(fmt, "discriminant({place:?})"),
NullaryOp(ref op, ref t) => match op {
- NullOp::SizeOf => write!(fmt, "SizeOf({:?})", t),
- NullOp::AlignOf => write!(fmt, "AlignOf({:?})", t),
- NullOp::OffsetOf(fields) => write!(fmt, "OffsetOf({:?}, {:?})", t, fields),
+ NullOp::SizeOf => write!(fmt, "SizeOf({t:?})"),
+ NullOp::AlignOf => write!(fmt, "AlignOf({t:?})"),
+ NullOp::OffsetOf(fields) => write!(fmt, "OffsetOf({t:?}, {fields:?})"),
},
ThreadLocalRef(did) => ty::tls::with(|tcx| {
let muta = tcx.static_mutability(did).unwrap().prefix_str();
@@ -2103,10 +2095,10 @@ impl<'tcx> Debug for Rvalue<'tcx> {
// Do not even print 'static
String::new()
};
- write!(fmt, "&{}{}{:?}", region, kind_str, place)
+ write!(fmt, "&{region}{kind_str}{place:?}")
}
- CopyForDeref(ref place) => write!(fmt, "deref_copy {:#?}", place),
+ CopyForDeref(ref place) => write!(fmt, "deref_copy {place:#?}"),
AddressOf(mutability, ref place) => {
let kind_str = match mutability {
@@ -2114,7 +2106,7 @@ impl<'tcx> Debug for Rvalue<'tcx> {
Mutability::Not => "const",
};
- write!(fmt, "&raw {} {:?}", kind_str, place)
+ write!(fmt, "&raw {kind_str} {place:?}")
}
Aggregate(ref kind, ref places) => {
@@ -2127,7 +2119,7 @@ impl<'tcx> Debug for Rvalue<'tcx> {
};
match **kind {
- AggregateKind::Array(_) => write!(fmt, "{:?}", places),
+ AggregateKind::Array(_) => write!(fmt, "{places:?}"),
AggregateKind::Tuple => {
if places.is_empty() {
@@ -2137,12 +2129,12 @@ impl<'tcx> Debug for Rvalue<'tcx> {
}
}
- AggregateKind::Adt(adt_did, variant, substs, _user_ty, _) => {
+ AggregateKind::Adt(adt_did, variant, args, _user_ty, _) => {
ty::tls::with(|tcx| {
let variant_def = &tcx.adt_def(adt_did).variant(variant);
- let substs = tcx.lift(substs).expect("could not lift for printing");
+ let args = tcx.lift(args).expect("could not lift for printing");
let name = FmtPrinter::new(tcx, Namespace::ValueNS)
- .print_def_path(variant_def.def_id, substs)?
+ .print_def_path(variant_def.def_id, args)?
.into_buffer();
match variant_def.ctor_kind() {
@@ -2159,10 +2151,10 @@ impl<'tcx> Debug for Rvalue<'tcx> {
})
}
- AggregateKind::Closure(def_id, substs) => ty::tls::with(|tcx| {
+ AggregateKind::Closure(def_id, args) => ty::tls::with(|tcx| {
let name = if tcx.sess.opts.unstable_opts.span_free_formats {
- let substs = tcx.lift(substs).unwrap();
- format!("[closure@{}]", tcx.def_path_str_with_substs(def_id, substs),)
+ let args = tcx.lift(args).unwrap();
+ format!("[closure@{}]", tcx.def_path_str_with_args(def_id, args),)
} else {
let span = tcx.def_span(def_id);
format!(
@@ -2213,7 +2205,7 @@ impl<'tcx> Debug for Rvalue<'tcx> {
}
ShallowInitBox(ref place, ref ty) => {
- write!(fmt, "ShallowInitBox({:?}, {:?})", place, ty)
+ write!(fmt, "ShallowInitBox({place:?}, {ty:?})")
}
}
}
@@ -2493,7 +2485,7 @@ impl<'tcx> ConstantKind<'tcx> {
};
debug!("expr.kind: {:?}", expr.kind);
- let ty = tcx.type_of(def).subst_identity();
+ let ty = tcx.type_of(def).instantiate_identity();
debug!(?ty);
// FIXME(const_generics): We currently have to special case parameters because `min_const_generics`
@@ -2521,23 +2513,22 @@ impl<'tcx> ConstantKind<'tcx> {
}
let hir_id = tcx.hir().local_def_id_to_hir_id(def);
- let parent_substs = if let Some(parent_hir_id) = tcx.hir().opt_parent_id(hir_id)
+ let parent_args = if let Some(parent_hir_id) = tcx.hir().opt_parent_id(hir_id)
&& let Some(parent_did) = parent_hir_id.as_owner()
{
- InternalSubsts::identity_for_item(tcx, parent_did)
+ GenericArgs::identity_for_item(tcx, parent_did)
} else {
List::empty()
};
- debug!(?parent_substs);
+ debug!(?parent_args);
let did = def.to_def_id();
- let child_substs = InternalSubsts::identity_for_item(tcx, did);
- let substs =
- tcx.mk_substs_from_iter(parent_substs.into_iter().chain(child_substs.into_iter()));
- debug!(?substs);
+ let child_args = GenericArgs::identity_for_item(tcx, did);
+ let args = tcx.mk_args_from_iter(parent_args.into_iter().chain(child_args.into_iter()));
+ debug!(?args);
let span = tcx.def_span(def);
- let uneval = UnevaluatedConst::new(did, substs);
+ let uneval = UnevaluatedConst::new(did, args);
debug!(?span, ?param_env);
match tcx.const_eval_resolve(param_env, uneval, Some(span)) {
@@ -2552,7 +2543,7 @@ impl<'tcx> ConstantKind<'tcx> {
Self::Unevaluated(
UnevaluatedConst {
def: did,
- substs: InternalSubsts::identity_for_item(tcx, did),
+ args: GenericArgs::identity_for_item(tcx, did),
promoted: None,
},
ty,
@@ -2578,7 +2569,7 @@ impl<'tcx> ConstantKind<'tcx> {
#[derive(Hash, HashStable, TypeFoldable, TypeVisitable)]
pub struct UnevaluatedConst<'tcx> {
pub def: DefId,
- pub substs: SubstsRef<'tcx>,
+ pub args: GenericArgsRef<'tcx>,
pub promoted: Option<Promoted>,
}
@@ -2586,14 +2577,14 @@ impl<'tcx> UnevaluatedConst<'tcx> {
#[inline]
pub fn shrink(self) -> ty::UnevaluatedConst<'tcx> {
assert_eq!(self.promoted, None);
- ty::UnevaluatedConst { def: self.def, substs: self.substs }
+ ty::UnevaluatedConst { def: self.def, args: self.args }
}
}
impl<'tcx> UnevaluatedConst<'tcx> {
#[inline]
- pub fn new(def: DefId, substs: SubstsRef<'tcx>) -> UnevaluatedConst<'tcx> {
- UnevaluatedConst { def, substs, promoted: Default::default() }
+ pub fn new(def: DefId, args: GenericArgsRef<'tcx>) -> UnevaluatedConst<'tcx> {
+ UnevaluatedConst { def, args, promoted: Default::default() }
}
}
@@ -2758,7 +2749,7 @@ rustc_index::newtype_index! {
impl<'tcx> Debug for Constant<'tcx> {
fn fmt(&self, fmt: &mut Formatter<'_>) -> fmt::Result {
- write!(fmt, "{}", self)
+ write!(fmt, "{self}")
}
}
@@ -2834,7 +2825,7 @@ fn pretty_print_const_value<'tcx>(
let ty = tcx.lift(ty).unwrap();
if tcx.sess.verbose() {
- fmt.write_str(&format!("ConstValue({:?}: {})", ct, ty))?;
+ fmt.write_str(&format!("ConstValue({ct:?}: {ty})"))?;
return Ok(());
}
@@ -2904,17 +2895,17 @@ fn pretty_print_const_value<'tcx>(
fmt.write_str(")")?;
}
ty::Adt(def, _) if def.variants().is_empty() => {
- fmt.write_str(&format!("{{unreachable(): {}}}", ty))?;
+ fmt.write_str(&format!("{{unreachable(): {ty}}}"))?;
}
- ty::Adt(def, substs) => {
+ ty::Adt(def, args) => {
let variant_idx = contents
.variant
.expect("destructed mir constant of adt without variant idx");
let variant_def = &def.variant(variant_idx);
- let substs = tcx.lift(substs).unwrap();
+ let args = tcx.lift(args).unwrap();
let mut cx = FmtPrinter::new(tcx, Namespace::ValueNS);
cx.print_alloc_ids = true;
- let cx = cx.print_value_path(variant_def.def_id, substs)?;
+ let cx = cx.print_value_path(variant_def.def_id, args)?;
fmt.write_str(&cx.into_buffer())?;
match variant_def.ctor_kind() {
diff --git a/compiler/rustc_middle/src/mir/mono.rs b/compiler/rustc_middle/src/mir/mono.rs
index ca735d523..8fd980d5a 100644
--- a/compiler/rustc_middle/src/mir/mono.rs
+++ b/compiler/rustc_middle/src/mir/mono.rs
@@ -1,5 +1,5 @@
use crate::dep_graph::{DepNode, WorkProduct, WorkProductId};
-use crate::ty::{subst::InternalSubsts, Instance, InstanceDef, SymbolName, TyCtxt};
+use crate::ty::{GenericArgs, Instance, InstanceDef, SymbolName, TyCtxt};
use rustc_attr::InlineAttr;
use rustc_data_structures::base_n;
use rustc_data_structures::fingerprint::Fingerprint;
@@ -56,22 +56,31 @@ impl<'tcx> MonoItem<'tcx> {
}
}
+ // Note: if you change how item size estimates work, you might need to
+ // change NON_INCR_MIN_CGU_SIZE as well.
pub fn size_estimate(&self, tcx: TyCtxt<'tcx>) -> usize {
match *self {
MonoItem::Fn(instance) => {
- // Estimate the size of a function based on how many statements
- // it contains.
- tcx.instance_def_size_estimate(instance.def)
+ match instance.def {
+ // "Normal" functions size estimate: the number of
+ // statements, plus one for the terminator.
+ InstanceDef::Item(..) | InstanceDef::DropGlue(..) => {
+ let mir = tcx.instance_mir(instance.def);
+ mir.basic_blocks.iter().map(|bb| bb.statements.len() + 1).sum()
+ }
+ // Other compiler-generated shims size estimate: 1
+ _ => 1,
+ }
}
- // Conservatively estimate the size of a static declaration
- // or assembly to be 1.
+ // Conservatively estimate the size of a static declaration or
+ // assembly item to be 1.
MonoItem::Static(_) | MonoItem::GlobalAsm(_) => 1,
}
}
pub fn is_generic_fn(&self) -> bool {
match *self {
- MonoItem::Fn(ref instance) => instance.substs.non_erasable_generics().next().is_some(),
+ MonoItem::Fn(ref instance) => instance.args.non_erasable_generics().next().is_some(),
MonoItem::Static(..) | MonoItem::GlobalAsm(..) => false,
}
}
@@ -168,14 +177,14 @@ impl<'tcx> MonoItem<'tcx> {
/// which will never be accessed) in its place.
pub fn is_instantiable(&self, tcx: TyCtxt<'tcx>) -> bool {
debug!("is_instantiable({:?})", self);
- let (def_id, substs) = match *self {
- MonoItem::Fn(ref instance) => (instance.def_id(), instance.substs),
- MonoItem::Static(def_id) => (def_id, InternalSubsts::empty()),
+ let (def_id, args) = match *self {
+ MonoItem::Fn(ref instance) => (instance.def_id(), instance.args),
+ MonoItem::Static(def_id) => (def_id, GenericArgs::empty()),
// global asm never has predicates
MonoItem::GlobalAsm(..) => return true,
};
- !tcx.subst_and_check_impossible_predicates((def_id, &substs))
+ !tcx.subst_and_check_impossible_predicates((def_id, &args))
}
pub fn local_span(&self, tcx: TyCtxt<'tcx>) -> Option<Span> {
@@ -214,9 +223,9 @@ impl<'tcx> MonoItem<'tcx> {
impl<'tcx> fmt::Display for MonoItem<'tcx> {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
match *self {
- MonoItem::Fn(instance) => write!(f, "fn {}", instance),
+ MonoItem::Fn(instance) => write!(f, "fn {instance}"),
MonoItem::Static(def_id) => {
- write!(f, "static {}", Instance::new(def_id, InternalSubsts::empty()))
+ write!(f, "static {}", Instance::new(def_id, GenericArgs::empty()))
}
MonoItem::GlobalAsm(..) => write!(f, "global_asm"),
}
@@ -230,7 +239,7 @@ pub struct CodegenUnit<'tcx> {
/// contain something unique to this crate (e.g., a module path)
/// as well as the crate name and disambiguator.
name: Symbol,
- items: FxHashMap<MonoItem<'tcx>, (Linkage, Visibility)>,
+ items: FxHashMap<MonoItem<'tcx>, MonoItemData>,
size_estimate: usize,
primary: bool,
/// True if this is CGU is used to hold code coverage information for dead code,
@@ -238,6 +247,20 @@ pub struct CodegenUnit<'tcx> {
is_code_coverage_dead_code_cgu: bool,
}
+/// Auxiliary info about a `MonoItem`.
+#[derive(Copy, Clone, PartialEq, Debug, HashStable)]
+pub struct MonoItemData {
+ /// A cached copy of the result of `MonoItem::instantiation_mode`, where
+ /// `GloballyShared` maps to `false` and `LocalCopy` maps to `true`.
+ pub inlined: bool,
+
+ pub linkage: Linkage,
+ pub visibility: Visibility,
+
+ /// A cached copy of the result of `MonoItem::size_estimate`.
+ pub size_estimate: usize,
+}
+
/// Specifies the linkage type for a `MonoItem`.
///
/// See <https://llvm.org/docs/LangRef.html#linkage-types> for more details about these variants.
@@ -292,12 +315,12 @@ impl<'tcx> CodegenUnit<'tcx> {
}
/// The order of these items is non-determinstic.
- pub fn items(&self) -> &FxHashMap<MonoItem<'tcx>, (Linkage, Visibility)> {
+ pub fn items(&self) -> &FxHashMap<MonoItem<'tcx>, MonoItemData> {
&self.items
}
/// The order of these items is non-determinstic.
- pub fn items_mut(&mut self) -> &mut FxHashMap<MonoItem<'tcx>, (Linkage, Visibility)> {
+ pub fn items_mut(&mut self) -> &mut FxHashMap<MonoItem<'tcx>, MonoItemData> {
&mut self.items
}
@@ -320,16 +343,16 @@ impl<'tcx> CodegenUnit<'tcx> {
base_n::encode(hash, base_n::CASE_INSENSITIVE)
}
- pub fn compute_size_estimate(&mut self, tcx: TyCtxt<'tcx>) {
- // Estimate the size of a codegen unit as (approximately) the number of MIR
- // statements it corresponds to.
- self.size_estimate = self.items.keys().map(|mi| mi.size_estimate(tcx)).sum();
+ pub fn compute_size_estimate(&mut self) {
+ // The size of a codegen unit as the sum of the sizes of the items
+ // within it.
+ self.size_estimate = self.items.values().map(|data| data.size_estimate).sum();
}
- #[inline]
/// Should only be called if [`compute_size_estimate`] has previously been called.
///
/// [`compute_size_estimate`]: Self::compute_size_estimate
+ #[inline]
pub fn size_estimate(&self) -> usize {
// Items are never zero-sized, so if we have items the estimate must be
// non-zero, unless we forgot to call `compute_size_estimate` first.
@@ -355,7 +378,7 @@ impl<'tcx> CodegenUnit<'tcx> {
pub fn items_in_deterministic_order(
&self,
tcx: TyCtxt<'tcx>,
- ) -> Vec<(MonoItem<'tcx>, (Linkage, Visibility))> {
+ ) -> Vec<(MonoItem<'tcx>, MonoItemData)> {
// The codegen tests rely on items being process in the same order as
// they appear in the file, so for local items, we sort by node_id first
#[derive(PartialEq, Eq, PartialOrd, Ord)]
@@ -390,7 +413,7 @@ impl<'tcx> CodegenUnit<'tcx> {
)
}
- let mut items: Vec<_> = self.items().iter().map(|(&i, &l)| (i, l)).collect();
+ let mut items: Vec<_> = self.items().iter().map(|(&i, &data)| (i, data)).collect();
items.sort_by_cached_key(|&(i, _)| item_sort_key(tcx, i));
items
}
@@ -501,27 +524,27 @@ impl<'tcx> CodegenUnitNameBuilder<'tcx> {
// local crate's ID. Otherwise there can be collisions between CGUs
// instantiating stuff for upstream crates.
let local_crate_id = if cnum != LOCAL_CRATE {
- let local_stable_crate_id = tcx.sess.local_stable_crate_id();
+ let local_stable_crate_id = tcx.stable_crate_id(LOCAL_CRATE);
format!("-in-{}.{:08x}", tcx.crate_name(LOCAL_CRATE), local_stable_crate_id)
} else {
String::new()
};
- let stable_crate_id = tcx.sess.local_stable_crate_id();
+ let stable_crate_id = tcx.stable_crate_id(LOCAL_CRATE);
format!("{}.{:08x}{}", tcx.crate_name(cnum), stable_crate_id, local_crate_id)
});
- write!(cgu_name, "{}", crate_prefix).unwrap();
+ write!(cgu_name, "{crate_prefix}").unwrap();
// Add the components
for component in components {
- write!(cgu_name, "-{}", component).unwrap();
+ write!(cgu_name, "-{component}").unwrap();
}
if let Some(special_suffix) = special_suffix {
// We add a dot in here so it cannot clash with anything in a regular
// Rust identifier
- write!(cgu_name, ".{}", special_suffix).unwrap();
+ write!(cgu_name, ".{special_suffix}").unwrap();
}
Symbol::intern(&cgu_name)
diff --git a/compiler/rustc_middle/src/mir/pretty.rs b/compiler/rustc_middle/src/mir/pretty.rs
index ffa7a5400..773056e8a 100644
--- a/compiler/rustc_middle/src/mir/pretty.rs
+++ b/compiler/rustc_middle/src/mir/pretty.rs
@@ -124,14 +124,14 @@ fn dump_matched_mir_node<'tcx, F>(
let def_path =
ty::print::with_forced_impl_filename_line!(tcx.def_path_str(body.source.def_id()));
// ignore-tidy-odd-backticks the literal below is fine
- write!(file, "// MIR for `{}", def_path)?;
+ write!(file, "// MIR for `{def_path}")?;
match body.source.promoted {
None => write!(file, "`")?,
- Some(promoted) => write!(file, "::{:?}`", promoted)?,
+ Some(promoted) => write!(file, "::{promoted:?}`")?,
}
- writeln!(file, " {} {}", disambiguator, pass_name)?;
+ writeln!(file, " {disambiguator} {pass_name}")?;
if let Some(ref layout) = body.generator_layout() {
- writeln!(file, "/* generator_layout = {:#?} */", layout)?;
+ writeln!(file, "/* generator_layout = {layout:#?} */")?;
}
writeln!(file)?;
extra_data(PassWhere::BeforeCFG, &mut file)?;
@@ -169,7 +169,7 @@ fn dump_file_basename<'tcx>(
) -> String {
let source = body.source;
let promotion_id = match source.promoted {
- Some(id) => format!("-{:?}", id),
+ Some(id) => format!("-{id:?}"),
None => String::new(),
};
@@ -203,8 +203,7 @@ fn dump_file_basename<'tcx>(
};
format!(
- "{}.{}{}{}{}.{}.{}",
- crate_name, item_name, shim_disambiguator, promotion_id, pass_num, pass_name, disambiguator,
+ "{crate_name}.{item_name}{shim_disambiguator}{promotion_id}{pass_num}.{pass_name}.{disambiguator}",
)
}
@@ -215,7 +214,7 @@ fn dump_path(tcx: TyCtxt<'_>, basename: &str, extension: &str) -> PathBuf {
let mut file_path = PathBuf::new();
file_path.push(Path::new(&tcx.sess.opts.unstable_opts.dump_mir_dir));
- let file_name = format!("{}.{}", basename, extension,);
+ let file_name = format!("{basename}.{extension}",);
file_path.push(&file_name);
@@ -233,12 +232,12 @@ fn create_dump_file_with_basename(
fs::create_dir_all(parent).map_err(|e| {
io::Error::new(
e.kind(),
- format!("IO error creating MIR dump directory: {:?}; {}", parent, e),
+ format!("IO error creating MIR dump directory: {parent:?}; {e}"),
)
})?;
}
Ok(io::BufWriter::new(fs::File::create(&file_path).map_err(|e| {
- io::Error::new(e.kind(), format!("IO error creating MIR dump file: {:?}; {}", file_path, e))
+ io::Error::new(e.kind(), format!("IO error creating MIR dump file: {file_path:?}; {e}"))
})?))
}
@@ -346,28 +345,24 @@ where
// Basic block label at the top.
let cleanup_text = if data.is_cleanup { " (cleanup)" } else { "" };
- writeln!(w, "{}{:?}{}: {{", INDENT, block, cleanup_text)?;
+ writeln!(w, "{INDENT}{block:?}{cleanup_text}: {{")?;
// List of statements in the middle.
let mut current_location = Location { block, statement_index: 0 };
for statement in &data.statements {
extra_data(PassWhere::BeforeLocation(current_location), w)?;
- let indented_body = format!("{0}{0}{1:?};", INDENT, statement);
+ let indented_body = format!("{INDENT}{INDENT}{statement:?};");
if tcx.sess.opts.unstable_opts.mir_include_spans {
writeln!(
w,
"{:A$} // {}{}",
indented_body,
- if tcx.sess.verbose() {
- format!("{:?}: ", current_location)
- } else {
- String::new()
- },
+ if tcx.sess.verbose() { format!("{current_location:?}: ") } else { String::new() },
comment(tcx, statement.source_info),
A = ALIGN,
)?;
} else {
- writeln!(w, "{}", indented_body)?;
+ writeln!(w, "{indented_body}")?;
}
write_extra(tcx, w, |visitor| {
@@ -387,12 +382,12 @@ where
w,
"{:A$} // {}{}",
indented_terminator,
- if tcx.sess.verbose() { format!("{:?}: ", current_location) } else { String::new() },
+ if tcx.sess.verbose() { format!("{current_location:?}: ") } else { String::new() },
comment(tcx, data.terminator().source_info),
A = ALIGN,
)?;
} else {
- writeln!(w, "{}", indented_terminator)?;
+ writeln!(w, "{indented_terminator}")?;
}
write_extra(tcx, w, |visitor| {
@@ -402,7 +397,7 @@ where
extra_data(PassWhere::AfterLocation(current_location), w)?;
extra_data(PassWhere::AfterTerminator(block), w)?;
- writeln!(w, "{}}}", INDENT)
+ writeln!(w, "{INDENT}}}")
}
/// After we print the main statement, we sometimes dump extra
@@ -457,27 +452,27 @@ impl<'tcx> Visitor<'tcx> for ExtraComments<'tcx> {
self.tcx.sess.source_map().span_to_embeddable_string(*span)
));
if let Some(user_ty) = user_ty {
- self.push(&format!("+ user_ty: {:?}", user_ty));
+ self.push(&format!("+ user_ty: {user_ty:?}"));
}
// FIXME: this is a poor version of `pretty_print_const_value`.
let fmt_val = |val: &ConstValue<'tcx>| match val {
ConstValue::ZeroSized => "<ZST>".to_string(),
- ConstValue::Scalar(s) => format!("Scalar({:?})", s),
+ ConstValue::Scalar(s) => format!("Scalar({s:?})"),
ConstValue::Slice { .. } => "Slice(..)".to_string(),
ConstValue::ByRef { .. } => "ByRef(..)".to_string(),
};
let fmt_valtree = |valtree: &ty::ValTree<'tcx>| match valtree {
- ty::ValTree::Leaf(leaf) => format!("ValTree::Leaf({:?})", leaf),
+ ty::ValTree::Leaf(leaf) => format!("ValTree::Leaf({leaf:?})"),
ty::ValTree::Branch(_) => "ValTree::Branch(..)".to_string(),
};
let val = match literal {
ConstantKind::Ty(ct) => match ct.kind() {
- ty::ConstKind::Param(p) => format!("Param({})", p),
+ ty::ConstKind::Param(p) => format!("Param({p})"),
ty::ConstKind::Unevaluated(uv) => {
- format!("Unevaluated({}, {:?})", self.tcx.def_path_str(uv.def), uv.substs,)
+ format!("Unevaluated({}, {:?})", self.tcx.def_path_str(uv.def), uv.args,)
}
ty::ConstKind::Value(val) => format!("Value({})", fmt_valtree(&val)),
ty::ConstKind::Error(_) => "Error".to_string(),
@@ -491,7 +486,7 @@ impl<'tcx> Visitor<'tcx> for ExtraComments<'tcx> {
format!(
"Unevaluated({}, {:?}, {:?})",
self.tcx.def_path_str(uv.def),
- uv.substs,
+ uv.args,
uv.promoted,
)
}
@@ -512,22 +507,22 @@ impl<'tcx> Visitor<'tcx> for ExtraComments<'tcx> {
self.super_rvalue(rvalue, location);
if let Rvalue::Aggregate(kind, _) = rvalue {
match **kind {
- AggregateKind::Closure(def_id, substs) => {
+ AggregateKind::Closure(def_id, args) => {
self.push("closure");
- self.push(&format!("+ def_id: {:?}", def_id));
- self.push(&format!("+ substs: {:#?}", substs));
+ self.push(&format!("+ def_id: {def_id:?}"));
+ self.push(&format!("+ args: {args:#?}"));
}
- AggregateKind::Generator(def_id, substs, movability) => {
+ AggregateKind::Generator(def_id, args, movability) => {
self.push("generator");
- self.push(&format!("+ def_id: {:?}", def_id));
- self.push(&format!("+ substs: {:#?}", substs));
- self.push(&format!("+ movability: {:?}", movability));
+ self.push(&format!("+ def_id: {def_id:?}"));
+ self.push(&format!("+ args: {args:#?}"));
+ self.push(&format!("+ movability: {movability:?}"));
}
AggregateKind::Adt(_, _, _, Some(user_ty), _) => {
self.push("adt");
- self.push(&format!("+ user_ty: {:?}", user_ty));
+ self.push(&format!("+ user_ty: {user_ty:?}"));
}
_ => {}
@@ -560,13 +555,8 @@ fn write_scope_tree(
}
let indented_debug_info = format!(
- "{0:1$}debug {2} => {3:&<4$}{5:?};",
- INDENT,
- indent,
- var_debug_info.name,
- "",
- var_debug_info.references as usize,
- var_debug_info.value,
+ "{0:1$}debug {2} => {3:?};",
+ INDENT, indent, var_debug_info.name, var_debug_info.value,
);
if tcx.sess.opts.unstable_opts.mir_include_spans {
@@ -578,7 +568,7 @@ fn write_scope_tree(
comment(tcx, var_debug_info.source_info),
)?;
} else {
- writeln!(w, "{}", indented_debug_info)?;
+ writeln!(w, "{indented_debug_info}")?;
}
}
@@ -600,7 +590,7 @@ fn write_scope_tree(
format!("{0:1$}let {2}{3:?}: {4:?}", INDENT, indent, mut_str, local, local_decl.ty);
if let Some(user_ty) = &local_decl.user_ty {
for user_ty in user_ty.projections() {
- write!(indented_decl, " as {:?}", user_ty).unwrap();
+ write!(indented_decl, " as {user_ty:?}").unwrap();
}
}
indented_decl.push(';');
@@ -617,7 +607,7 @@ fn write_scope_tree(
comment(tcx, local_decl.source_info),
)?;
} else {
- writeln!(w, "{}", indented_decl,)?;
+ writeln!(w, "{indented_decl}",)?;
}
}
@@ -654,10 +644,10 @@ fn write_scope_tree(
tcx.sess.source_map().span_to_embeddable_string(span),
)?;
} else {
- writeln!(w, "{}", indented_header)?;
+ writeln!(w, "{indented_header}")?;
}
} else {
- writeln!(w, "{}", indented_header)?;
+ writeln!(w, "{indented_header}")?;
}
write_scope_tree(tcx, body, scope_tree, w, child, depth + 1)?;
@@ -844,7 +834,7 @@ fn write_allocation_endline(w: &mut dyn std::fmt::Write, ascii: &str) -> std::fm
for _ in 0..(BYTES_PER_LINE - ascii.chars().count()) {
write!(w, " ")?;
}
- writeln!(w, " │ {}", ascii)
+ writeln!(w, " │ {ascii}")
}
/// Number of bytes to print per allocation hex dump line.
@@ -880,7 +870,7 @@ pub fn write_allocation_bytes<'tcx, Prov: Provenance, Extra, Bytes: AllocBytes>(
if num_lines > 0 {
write!(w, "{}0x{:02$x} │ ", prefix, 0, pos_width)?;
} else {
- write!(w, "{}", prefix)?;
+ write!(w, "{prefix}")?;
}
let mut i = Size::ZERO;
@@ -913,10 +903,10 @@ pub fn write_allocation_bytes<'tcx, Prov: Provenance, Extra, Bytes: AllocBytes>(
let offset = Size::from_bytes(offset);
let provenance_width = |bytes| bytes * 3;
let ptr = Pointer::new(prov, offset);
- let mut target = format!("{:?}", ptr);
+ let mut target = format!("{ptr:?}");
if target.len() > provenance_width(ptr_size.bytes_usize() - 1) {
// This is too long, try to save some space.
- target = format!("{:#?}", ptr);
+ target = format!("{ptr:#?}");
}
if ((i - line_start) + ptr_size).bytes_usize() > BYTES_PER_LINE {
// This branch handles the situation where a provenance starts in the current line
@@ -935,10 +925,10 @@ pub fn write_allocation_bytes<'tcx, Prov: Provenance, Extra, Bytes: AllocBytes>(
line_start =
write_allocation_newline(w, line_start, &ascii, pos_width, prefix)?;
ascii.clear();
- write!(w, "{0:─^1$}╼", target, overflow_width)?;
+ write!(w, "{target:─^overflow_width$}╼")?;
} else {
oversized_ptr(&mut target, remainder_width);
- write!(w, "╾{0:─^1$}", target, remainder_width)?;
+ write!(w, "╾{target:─^remainder_width$}")?;
line_start =
write_allocation_newline(w, line_start, &ascii, pos_width, prefix)?;
write!(w, "{0:─^1$}╼", "", overflow_width)?;
@@ -955,7 +945,7 @@ pub fn write_allocation_bytes<'tcx, Prov: Provenance, Extra, Bytes: AllocBytes>(
let provenance_width = provenance_width(ptr_size.bytes_usize() - 1);
oversized_ptr(&mut target, provenance_width);
ascii.push('╾');
- write!(w, "╾{0:─^1$}╼", target, provenance_width)?;
+ write!(w, "╾{target:─^provenance_width$}╼")?;
for _ in 0..ptr_size.bytes() - 2 {
ascii.push('─');
}
@@ -972,7 +962,7 @@ pub fn write_allocation_bytes<'tcx, Prov: Provenance, Extra, Bytes: AllocBytes>(
// Format is similar to "oversized" above.
let j = i.bytes_usize();
let c = alloc.inspect_with_uninit_and_ptr_outside_interpreter(j..j + 1)[0];
- write!(w, "╾{:02x}{:#?} (1 ptr byte)╼", c, prov)?;
+ write!(w, "╾{c:02x}{prov:#?} (1 ptr byte)╼")?;
i += Size::from_bytes(1);
} else if alloc
.init_mask()
@@ -984,7 +974,7 @@ pub fn write_allocation_bytes<'tcx, Prov: Provenance, Extra, Bytes: AllocBytes>(
// Checked definedness (and thus range) and provenance. This access also doesn't
// influence interpreter execution but is only for debugging.
let c = alloc.inspect_with_uninit_and_ptr_outside_interpreter(j..j + 1)[0];
- write!(w, "{:02x}", c)?;
+ write!(w, "{c:02x}")?;
if c.is_ascii_control() || c >= 0x80 {
ascii.push('.');
} else {
@@ -1018,7 +1008,7 @@ fn write_mir_sig(tcx: TyCtxt<'_>, body: &Body<'_>, w: &mut dyn Write) -> io::Res
_ => tcx.is_closure(def_id),
};
match (kind, body.source.promoted) {
- (_, Some(i)) => write!(w, "{:?} in ", i)?,
+ (_, Some(i)) => write!(w, "{i:?} in ")?,
(DefKind::Const | DefKind::AssocConst, _) => write!(w, "const ")?,
(DefKind::Static(hir::Mutability::Not), _) => write!(w, "static ")?,
(DefKind::Static(hir::Mutability::Mut), _) => write!(w, "static mut ")?,
@@ -1051,7 +1041,7 @@ fn write_mir_sig(tcx: TyCtxt<'_>, body: &Body<'_>, w: &mut dyn Write) -> io::Res
if let Some(yield_ty) = body.yield_ty() {
writeln!(w)?;
- writeln!(w, "yields {}", yield_ty)?;
+ writeln!(w, "yields {yield_ty}")?;
}
write!(w, " ")?;
diff --git a/compiler/rustc_middle/src/mir/query.rs b/compiler/rustc_middle/src/mir/query.rs
index 613b132ff..71bec49af 100644
--- a/compiler/rustc_middle/src/mir/query.rs
+++ b/compiler/rustc_middle/src/mir/query.rs
@@ -194,11 +194,11 @@ impl Debug for GeneratorLayout<'_> {
}
impl Debug for GenVariantPrinter {
fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result {
- let variant_name = ty::GeneratorSubsts::variant_name(self.0);
+ let variant_name = ty::GeneratorArgs::variant_name(self.0);
if fmt.alternate() {
write!(fmt, "{:9}({:?})", variant_name, self.0)
} else {
- write!(fmt, "{}", variant_name)
+ write!(fmt, "{variant_name}")
}
}
}
@@ -265,10 +265,10 @@ pub struct ConstQualifs {
/// `UniversalRegions::closure_mapping`.) Note the free regions in the
/// closure's signature and captures are erased.
///
-/// Example: If type check produces a closure with the closure substs:
+/// Example: If type check produces a closure with the closure args:
///
/// ```text
-/// ClosureSubsts = [
+/// ClosureArgs = [
/// 'a, // From the parent.
/// 'b,
/// i8, // the "closure kind"
@@ -280,7 +280,7 @@ pub struct ConstQualifs {
/// We would "renumber" each free region to a unique vid, as follows:
///
/// ```text
-/// ClosureSubsts = [
+/// ClosureArgs = [
/// '1, // From the parent.
/// '2,
/// i8, // the "closure kind"
diff --git a/compiler/rustc_middle/src/mir/spanview.rs b/compiler/rustc_middle/src/mir/spanview.rs
index 730c55157..20a9e6889 100644
--- a/compiler/rustc_middle/src/mir/spanview.rs
+++ b/compiler/rustc_middle/src/mir/spanview.rs
@@ -159,10 +159,10 @@ where
indent_to_initial_start_col,
source_map.span_to_snippet(spanview_span).expect("function should have printable source")
);
- writeln!(w, "{}", HEADER)?;
- writeln!(w, "<title>{}</title>", title)?;
- writeln!(w, "{}", STYLE_SECTION)?;
- writeln!(w, "{}", START_BODY)?;
+ writeln!(w, "{HEADER}")?;
+ writeln!(w, "<title>{title}</title>")?;
+ writeln!(w, "{STYLE_SECTION}")?;
+ writeln!(w, "{START_BODY}")?;
write!(
w,
r#"<div class="code" style="counter-reset: line {}"><span class="line">{}"#,
@@ -226,7 +226,7 @@ where
write_coverage_gap(tcx, from_pos, end_pos, w)?;
}
writeln!(w, r#"</span></div>"#)?;
- writeln!(w, "{}", FOOTER)?;
+ writeln!(w, "{FOOTER}")?;
Ok(())
}
@@ -561,17 +561,16 @@ where
}
for (i, line) in html_snippet.lines().enumerate() {
if i > 0 {
- write!(w, "{}", NEW_LINE_SPAN)?;
+ write!(w, "{NEW_LINE_SPAN}")?;
}
write!(
w,
- r#"<span class="code{}" style="--layer: {}"{}>{}</span>"#,
- maybe_alt_class, layer, maybe_title_attr, line
+ r#"<span class="code{maybe_alt_class}" style="--layer: {layer}"{maybe_title_attr}>{line}</span>"#
)?;
}
// Check for and translate trailing newlines, because `str::lines()` ignores them
if html_snippet.ends_with('\n') {
- write!(w, "{}", NEW_LINE_SPAN)?;
+ write!(w, "{NEW_LINE_SPAN}")?;
}
if layer == 1 {
write!(w, "</span>")?;
diff --git a/compiler/rustc_middle/src/mir/syntax.rs b/compiler/rustc_middle/src/mir/syntax.rs
index 7f1d38203..be27bf75d 100644
--- a/compiler/rustc_middle/src/mir/syntax.rs
+++ b/compiler/rustc_middle/src/mir/syntax.rs
@@ -8,7 +8,7 @@ use super::{BasicBlock, Constant, Local, SwitchTargets, UserTypeProjection};
use crate::mir::coverage::{CodeRegion, CoverageKind};
use crate::traits::Reveal;
use crate::ty::adjustment::PointerCoercion;
-use crate::ty::subst::SubstsRef;
+use crate::ty::GenericArgsRef;
use crate::ty::{self, List, Ty};
use crate::ty::{Region, UserTypeAnnotationIndex};
@@ -1050,10 +1050,6 @@ pub type PlaceElem<'tcx> = ProjectionElem<Local, Ty<'tcx>>;
/// there may be other effects: if the type has a validity constraint loading the place might be UB
/// if the validity constraint is not met.
///
-/// **Needs clarification:** Ralf proposes that loading a place not have side-effects.
-/// This is what is implemented in miri today. Are these the semantics we want for MIR? Is this
-/// something we can even decide without knowing more about Rust's memory model?
-///
/// **Needs clarification:** Is loading a place that has its variant index set well-formed? Miri
/// currently implements it, but it seems like this may be something to check against in the
/// validator.
@@ -1071,6 +1067,16 @@ pub enum Operand<'tcx> {
/// in [UCG#188]. You should not emit MIR that may attempt a subsequent second load of this
/// place without first re-initializing it.
///
+ /// **Needs clarification:** The operational impact of `Move` is unclear. Currently (both in
+ /// Miri and codegen) it has no effect at all unless it appears in an argument to `Call`; for
+ /// `Call` it allows the argument to be passed to the callee "in-place", i.e. the callee might
+ /// just get a reference to this place instead of a full copy. Miri implements this with a
+ /// combination of aliasing model "protectors" and putting `uninit` into the place. Ralf
+ /// proposes that we don't want these semantics for `Move` in regular assignments, because
+ /// loading a place should not have side-effects, and the aliasing model "protectors" are
+ /// inherently tied to a function call. Are these the semantics we want for MIR? Is this
+ /// something we can even decide without knowing more about Rust's memory model?
+ ///
/// [UCG#188]: https://github.com/rust-lang/unsafe-code-guidelines/issues/188
Move(Place<'tcx>),
@@ -1262,10 +1268,10 @@ pub enum AggregateKind<'tcx> {
/// active field number and is present only for union expressions
/// -- e.g., for a union expression `SomeUnion { c: .. }`, the
/// active field index would identity the field `c`
- Adt(DefId, VariantIdx, SubstsRef<'tcx>, Option<UserTypeAnnotationIndex>, Option<FieldIdx>),
+ Adt(DefId, VariantIdx, GenericArgsRef<'tcx>, Option<UserTypeAnnotationIndex>, Option<FieldIdx>),
- Closure(DefId, SubstsRef<'tcx>),
- Generator(DefId, SubstsRef<'tcx>, hir::Movability),
+ Closure(DefId, GenericArgsRef<'tcx>),
+ Generator(DefId, GenericArgsRef<'tcx>, hir::Movability),
}
#[derive(Clone, Debug, PartialEq, Eq, TyEncodable, TyDecodable, Hash, HashStable)]
diff --git a/compiler/rustc_middle/src/mir/tcx.rs b/compiler/rustc_middle/src/mir/tcx.rs
index 8618a5315..f79697936 100644
--- a/compiler/rustc_middle/src/mir/tcx.rs
+++ b/compiler/rustc_middle/src/mir/tcx.rs
@@ -35,7 +35,7 @@ impl<'tcx> PlaceTy<'tcx> {
#[instrument(level = "debug", skip(tcx), ret)]
pub fn field_ty(self, tcx: TyCtxt<'tcx>, f: FieldIdx) -> Ty<'tcx> {
match self.ty.kind() {
- ty::Adt(adt_def, substs) => {
+ ty::Adt(adt_def, args) => {
let variant_def = match self.variant_index {
None => adt_def.non_enum_variant(),
Some(variant_index) => {
@@ -44,7 +44,7 @@ impl<'tcx> PlaceTy<'tcx> {
}
};
let field_def = &variant_def.fields[f];
- field_def.ty(tcx, substs)
+ field_def.ty(tcx, args)
}
ty::Tuple(tys) => tys[f.index()],
_ => bug!("extracting field of non-tuple non-adt: {:?}", self),
@@ -198,10 +198,10 @@ impl<'tcx> Rvalue<'tcx> {
AggregateKind::Tuple => {
Ty::new_tup_from_iter(tcx, ops.iter().map(|op| op.ty(local_decls, tcx)))
}
- AggregateKind::Adt(did, _, substs, _, _) => tcx.type_of(did).subst(tcx, substs),
- AggregateKind::Closure(did, substs) => Ty::new_closure(tcx, did, substs),
- AggregateKind::Generator(did, substs, movability) => {
- Ty::new_generator(tcx, did, substs, movability)
+ AggregateKind::Adt(did, _, args, _, _) => tcx.type_of(did).instantiate(tcx, args),
+ AggregateKind::Closure(did, args) => Ty::new_closure(tcx, did, args),
+ AggregateKind::Generator(did, args, movability) => {
+ Ty::new_generator(tcx, did, args, movability)
}
},
Rvalue::ShallowInitBox(_, ty) => Ty::new_box(tcx, ty),
diff --git a/compiler/rustc_middle/src/mir/terminator.rs b/compiler/rustc_middle/src/mir/terminator.rs
index 1b9c1438f..1f878d23b 100644
--- a/compiler/rustc_middle/src/mir/terminator.rs
+++ b/compiler/rustc_middle/src/mir/terminator.rs
@@ -10,6 +10,7 @@ use std::iter;
use std::slice;
pub use super::query::*;
+use super::*;
#[derive(Debug, Clone, TyEncodable, TyDecodable, Hash, HashStable, PartialEq)]
pub struct SwitchTargets {
@@ -280,7 +281,7 @@ impl<'tcx> Debug for TerminatorKind<'tcx> {
match (successor_count, unwind) {
(0, None) => Ok(()),
- (0, Some(unwind)) => write!(fmt, " -> {}", unwind),
+ (0, Some(unwind)) => write!(fmt, " -> {unwind}"),
(1, None) => write!(fmt, " -> {:?}", self.successors().next().unwrap()),
_ => {
write!(fmt, " -> [")?;
@@ -307,22 +308,22 @@ impl<'tcx> TerminatorKind<'tcx> {
use self::TerminatorKind::*;
match self {
Goto { .. } => write!(fmt, "goto"),
- SwitchInt { discr, .. } => write!(fmt, "switchInt({:?})", discr),
+ SwitchInt { discr, .. } => write!(fmt, "switchInt({discr:?})"),
Return => write!(fmt, "return"),
GeneratorDrop => write!(fmt, "generator_drop"),
Resume => write!(fmt, "resume"),
Terminate => write!(fmt, "abort"),
- Yield { value, resume_arg, .. } => write!(fmt, "{:?} = yield({:?})", resume_arg, value),
+ Yield { value, resume_arg, .. } => write!(fmt, "{resume_arg:?} = yield({value:?})"),
Unreachable => write!(fmt, "unreachable"),
- Drop { place, .. } => write!(fmt, "drop({:?})", place),
+ Drop { place, .. } => write!(fmt, "drop({place:?})"),
Call { func, args, destination, .. } => {
- write!(fmt, "{:?} = ", destination)?;
- write!(fmt, "{:?}(", func)?;
+ write!(fmt, "{destination:?} = ")?;
+ write!(fmt, "{func:?}(")?;
for (index, arg) in args.iter().enumerate() {
if index > 0 {
write!(fmt, ", ")?;
}
- write!(fmt, "{:?}", arg)?;
+ write!(fmt, "{arg:?}")?;
}
write!(fmt, ")")
}
@@ -331,7 +332,7 @@ impl<'tcx> TerminatorKind<'tcx> {
if !expected {
write!(fmt, "!")?;
}
- write!(fmt, "{:?}, ", cond)?;
+ write!(fmt, "{cond:?}, ")?;
msg.fmt_assert_args(fmt)?;
write!(fmt, ")")
}
@@ -344,7 +345,7 @@ impl<'tcx> TerminatorKind<'tcx> {
let print_late = |&late| if late { "late" } else { "" };
match op {
InlineAsmOperand::In { reg, value } => {
- write!(fmt, "in({}) {:?}", reg, value)?;
+ write!(fmt, "in({reg}) {value:?}")?;
}
InlineAsmOperand::Out { reg, late, place: Some(place) } => {
write!(fmt, "{}out({}) {:?}", print_late(late), reg, place)?;
@@ -371,17 +372,17 @@ impl<'tcx> TerminatorKind<'tcx> {
write!(fmt, "in{}out({}) {:?} => _", print_late(late), reg, in_value)?;
}
InlineAsmOperand::Const { value } => {
- write!(fmt, "const {:?}", value)?;
+ write!(fmt, "const {value:?}")?;
}
InlineAsmOperand::SymFn { value } => {
- write!(fmt, "sym_fn {:?}", value)?;
+ write!(fmt, "sym_fn {value:?}")?;
}
InlineAsmOperand::SymStatic { def_id } => {
- write!(fmt, "sym_static {:?}", def_id)?;
+ write!(fmt, "sym_static {def_id:?}")?;
}
}
}
- write!(fmt, ", options({:?}))", options)
+ write!(fmt, ", options({options:?}))")
}
}
}
@@ -430,3 +431,108 @@ impl<'tcx> TerminatorKind<'tcx> {
}
}
}
+
+#[derive(Copy, Clone, Debug)]
+pub enum TerminatorEdges<'mir, 'tcx> {
+ /// For terminators that have no successor, like `return`.
+ None,
+ /// For terminators that a single successor, like `goto`, and `assert` without cleanup block.
+ Single(BasicBlock),
+ /// For terminators that two successors, `assert` with cleanup block and `falseEdge`.
+ Double(BasicBlock, BasicBlock),
+ /// Special action for `Yield`, `Call` and `InlineAsm` terminators.
+ AssignOnReturn {
+ return_: Option<BasicBlock>,
+ unwind: UnwindAction,
+ place: CallReturnPlaces<'mir, 'tcx>,
+ },
+ /// Special edge for `SwitchInt`.
+ SwitchInt { targets: &'mir SwitchTargets, discr: &'mir Operand<'tcx> },
+}
+
+/// List of places that are written to after a successful (non-unwind) return
+/// from a `Call`, `Yield` or `InlineAsm`.
+#[derive(Copy, Clone, Debug)]
+pub enum CallReturnPlaces<'a, 'tcx> {
+ Call(Place<'tcx>),
+ Yield(Place<'tcx>),
+ InlineAsm(&'a [InlineAsmOperand<'tcx>]),
+}
+
+impl<'tcx> CallReturnPlaces<'_, 'tcx> {
+ pub fn for_each(&self, mut f: impl FnMut(Place<'tcx>)) {
+ match *self {
+ Self::Call(place) | Self::Yield(place) => f(place),
+ Self::InlineAsm(operands) => {
+ for op in operands {
+ match *op {
+ InlineAsmOperand::Out { place: Some(place), .. }
+ | InlineAsmOperand::InOut { out_place: Some(place), .. } => f(place),
+ _ => {}
+ }
+ }
+ }
+ }
+ }
+}
+
+impl<'tcx> Terminator<'tcx> {
+ pub fn edges(&self) -> TerminatorEdges<'_, 'tcx> {
+ self.kind.edges()
+ }
+}
+
+impl<'tcx> TerminatorKind<'tcx> {
+ pub fn edges(&self) -> TerminatorEdges<'_, 'tcx> {
+ use TerminatorKind::*;
+ match *self {
+ Return | Resume | Terminate | GeneratorDrop | Unreachable => TerminatorEdges::None,
+
+ Goto { target } => TerminatorEdges::Single(target),
+
+ Assert { target, unwind, expected: _, msg: _, cond: _ }
+ | Drop { target, unwind, place: _, replace: _ }
+ | FalseUnwind { real_target: target, unwind } => match unwind {
+ UnwindAction::Cleanup(unwind) => TerminatorEdges::Double(target, unwind),
+ UnwindAction::Continue | UnwindAction::Terminate | UnwindAction::Unreachable => {
+ TerminatorEdges::Single(target)
+ }
+ },
+
+ FalseEdge { real_target, imaginary_target } => {
+ TerminatorEdges::Double(real_target, imaginary_target)
+ }
+
+ Yield { resume: target, drop, resume_arg, value: _ } => {
+ TerminatorEdges::AssignOnReturn {
+ return_: Some(target),
+ unwind: drop.map_or(UnwindAction::Terminate, UnwindAction::Cleanup),
+ place: CallReturnPlaces::Yield(resume_arg),
+ }
+ }
+
+ Call { unwind, destination, target, func: _, args: _, fn_span: _, call_source: _ } => {
+ TerminatorEdges::AssignOnReturn {
+ return_: target,
+ unwind,
+ place: CallReturnPlaces::Call(destination),
+ }
+ }
+
+ InlineAsm {
+ template: _,
+ ref operands,
+ options: _,
+ line_spans: _,
+ destination,
+ unwind,
+ } => TerminatorEdges::AssignOnReturn {
+ return_: destination,
+ unwind,
+ place: CallReturnPlaces::InlineAsm(operands),
+ },
+
+ SwitchInt { ref targets, ref discr } => TerminatorEdges::SwitchInt { targets, discr },
+ }
+ }
+}
diff --git a/compiler/rustc_middle/src/mir/visit.rs b/compiler/rustc_middle/src/mir/visit.rs
index 205dc9ec7..069b38591 100644
--- a/compiler/rustc_middle/src/mir/visit.rs
+++ b/compiler/rustc_middle/src/mir/visit.rs
@@ -63,7 +63,7 @@
//! `is_cleanup` above.
use crate::mir::*;
-use crate::ty::subst::SubstsRef;
+use crate::ty::GenericArgsRef;
use crate::ty::{self, CanonicalUserTypeAnnotation, Ty};
use rustc_span::Span;
@@ -245,12 +245,12 @@ macro_rules! make_mir_visitor {
self.super_region(region);
}
- fn visit_substs(
+ fn visit_args(
&mut self,
- substs: & $($mutability)? SubstsRef<'tcx>,
+ args: & $($mutability)? GenericArgsRef<'tcx>,
_: Location,
) {
- self.super_substs(substs);
+ self.super_args(args);
}
fn visit_local_decl(
@@ -335,7 +335,7 @@ macro_rules! make_mir_visitor {
self.visit_span($(& $mutability)? *callsite_span);
- let ty::Instance { def: callee_def, substs: callee_substs } = callee;
+ let ty::Instance { def: callee_def, args: callee_args } = callee;
match callee_def {
ty::InstanceDef::Item(_def_id) => {}
@@ -355,7 +355,7 @@ macro_rules! make_mir_visitor {
self.visit_ty($(& $mutability)? *ty, TyContext::Location(location));
}
}
- self.visit_substs(callee_substs, location);
+ self.visit_args(callee_args, location);
}
if let Some(inlined_parent_scope) = inlined_parent_scope {
self.visit_source_scope($(& $mutability)? *inlined_parent_scope);
@@ -721,24 +721,24 @@ macro_rules! make_mir_visitor {
AggregateKind::Adt(
_adt_def,
_variant_index,
- substs,
- _user_substs,
+ args,
+ _user_args,
_active_field_index
) => {
- self.visit_substs(substs, location);
+ self.visit_args(args, location);
}
AggregateKind::Closure(
_,
- closure_substs
+ closure_args
) => {
- self.visit_substs(closure_substs, location);
+ self.visit_args(closure_args, location);
}
AggregateKind::Generator(
_,
- generator_substs,
+ generator_args,
_movability,
) => {
- self.visit_substs(generator_substs, location);
+ self.visit_args(generator_args, location);
}
}
@@ -840,7 +840,6 @@ macro_rules! make_mir_visitor {
source_info,
value,
argument_index: _,
- references: _,
} = var_debug_info;
self.visit_source_info(source_info);
@@ -933,7 +932,7 @@ macro_rules! make_mir_visitor {
fn super_region(&mut self, _region: $(& $mutability)? ty::Region<'tcx>) {
}
- fn super_substs(&mut self, _substs: & $($mutability)? SubstsRef<'tcx>) {
+ fn super_args(&mut self, _args: & $($mutability)? GenericArgsRef<'tcx>) {
}
// Convenience methods
diff --git a/compiler/rustc_middle/src/query/erase.rs b/compiler/rustc_middle/src/query/erase.rs
index 2c481745d..348f79ed6 100644
--- a/compiler/rustc_middle/src/query/erase.rs
+++ b/compiler/rustc_middle/src/query/erase.rs
@@ -235,6 +235,7 @@ trivial! {
rustc_hir::def_id::DefId,
rustc_hir::def_id::DefIndex,
rustc_hir::def_id::LocalDefId,
+ rustc_hir::def_id::LocalModDefId,
rustc_hir::def::DefKind,
rustc_hir::Defaultness,
rustc_hir::definitions::DefKey,
diff --git a/compiler/rustc_middle/src/query/keys.rs b/compiler/rustc_middle/src/query/keys.rs
index 28e699cd2..01bdc4c99 100644
--- a/compiler/rustc_middle/src/query/keys.rs
+++ b/compiler/rustc_middle/src/query/keys.rs
@@ -6,9 +6,9 @@ use crate::mir::interpret::ConstValue;
use crate::traits;
use crate::ty::fast_reject::SimplifiedType;
use crate::ty::layout::{TyAndLayout, ValidityRequirement};
-use crate::ty::subst::{GenericArg, SubstsRef};
use crate::ty::{self, Ty, TyCtxt};
-use rustc_hir::def_id::{CrateNum, DefId, LocalDefId, LOCAL_CRATE};
+use crate::ty::{GenericArg, GenericArgsRef};
+use rustc_hir::def_id::{CrateNum, DefId, LocalDefId, LocalModDefId, ModDefId, LOCAL_CRATE};
use rustc_hir::hir_id::{HirId, OwnerId};
use rustc_query_system::query::{DefaultCacheSelector, SingleCacheSelector, VecCacheSelector};
use rustc_span::symbol::{Ident, Symbol};
@@ -175,6 +175,41 @@ impl AsLocalKey for DefId {
}
}
+impl Key for LocalModDefId {
+ type CacheSelector = DefaultCacheSelector<Self>;
+
+ fn default_span(&self, tcx: TyCtxt<'_>) -> Span {
+ tcx.def_span(*self)
+ }
+
+ #[inline(always)]
+ fn key_as_def_id(&self) -> Option<DefId> {
+ Some(self.to_def_id())
+ }
+}
+
+impl Key for ModDefId {
+ type CacheSelector = DefaultCacheSelector<Self>;
+
+ fn default_span(&self, tcx: TyCtxt<'_>) -> Span {
+ tcx.def_span(*self)
+ }
+
+ #[inline(always)]
+ fn key_as_def_id(&self) -> Option<DefId> {
+ Some(self.to_def_id())
+ }
+}
+
+impl AsLocalKey for ModDefId {
+ type LocalKey = LocalModDefId;
+
+ #[inline(always)]
+ fn as_local_key(&self) -> Option<Self::LocalKey> {
+ self.as_local()
+ }
+}
+
impl Key for SimplifiedType {
type CacheSelector = DefaultCacheSelector<Self>;
@@ -286,7 +321,7 @@ impl Key for (DefId, SimplifiedType) {
}
}
-impl<'tcx> Key for SubstsRef<'tcx> {
+impl<'tcx> Key for GenericArgsRef<'tcx> {
type CacheSelector = DefaultCacheSelector<Self>;
fn default_span(&self, _: TyCtxt<'_>) -> Span {
@@ -294,7 +329,7 @@ impl<'tcx> Key for SubstsRef<'tcx> {
}
}
-impl<'tcx> Key for (DefId, SubstsRef<'tcx>) {
+impl<'tcx> Key for (DefId, GenericArgsRef<'tcx>) {
type CacheSelector = DefaultCacheSelector<Self>;
fn default_span(&self, tcx: TyCtxt<'_>) -> Span {
@@ -310,7 +345,7 @@ impl<'tcx> Key for (ty::UnevaluatedConst<'tcx>, ty::UnevaluatedConst<'tcx>) {
}
}
-impl<'tcx> Key for (LocalDefId, DefId, SubstsRef<'tcx>) {
+impl<'tcx> Key for (LocalDefId, DefId, GenericArgsRef<'tcx>) {
type CacheSelector = DefaultCacheSelector<Self>;
fn default_span(&self, tcx: TyCtxt<'_>) -> Span {
@@ -487,7 +522,7 @@ impl Key for (Symbol, u32, u32) {
}
}
-impl<'tcx> Key for (DefId, Ty<'tcx>, SubstsRef<'tcx>, ty::ParamEnv<'tcx>) {
+impl<'tcx> Key for (DefId, Ty<'tcx>, GenericArgsRef<'tcx>, ty::ParamEnv<'tcx>) {
type CacheSelector = DefaultCacheSelector<Self>;
fn default_span(&self, _tcx: TyCtxt<'_>) -> Span {
diff --git a/compiler/rustc_middle/src/query/mod.rs b/compiler/rustc_middle/src/query/mod.rs
index a059590e6..94ae0dcb5 100644
--- a/compiler/rustc_middle/src/query/mod.rs
+++ b/compiler/rustc_middle/src/query/mod.rs
@@ -44,7 +44,6 @@ use crate::traits::{
};
use crate::ty::fast_reject::SimplifiedType;
use crate::ty::layout::ValidityRequirement;
-use crate::ty::subst::{GenericArg, SubstsRef};
use crate::ty::util::AlwaysRequiresDrop;
use crate::ty::GeneratorDiagnosticData;
use crate::ty::TyCtxtFeed;
@@ -52,6 +51,7 @@ use crate::ty::{
self, print::describe_as_module, CrateInherentImpls, ParamEnvAnd, Ty, TyCtxt,
UnusedGenericParams,
};
+use crate::ty::{GenericArg, GenericArgsRef};
use rustc_arena::TypedArena;
use rustc_ast as ast;
use rustc_ast::expand::{allocator::AllocatorKind, StrippedCfgItem};
@@ -67,7 +67,7 @@ use rustc_errors::ErrorGuaranteed;
use rustc_hir as hir;
use rustc_hir::def::{DefKind, DocLinkResMap};
use rustc_hir::def_id::{
- CrateNum, DefId, DefIdMap, DefIdSet, LocalDefId, LocalDefIdMap, LocalDefIdSet,
+ CrateNum, DefId, DefIdMap, DefIdSet, LocalDefId, LocalDefIdMap, LocalDefIdSet, LocalModDefId,
};
use rustc_hir::lang_items::{LangItem, LanguageItems};
use rustc_hir::{Crate, ItemLocalId, TraitCandidate};
@@ -167,7 +167,7 @@ rustc_queries! {
///
/// This can be conveniently accessed by `tcx.hir().visit_item_likes_in_module`.
/// Avoid calling this query directly.
- query hir_module_items(key: LocalDefId) -> &'tcx rustc_middle::hir::ModuleItems {
+ query hir_module_items(key: LocalModDefId) -> &'tcx rustc_middle::hir::ModuleItems {
arena_cache
desc { |tcx| "getting HIR module items in `{}`", tcx.def_path_str(key) }
cache_on_disk_if { true }
@@ -231,7 +231,7 @@ rustc_queries! {
action = {
use rustc_hir::def::DefKind;
match tcx.def_kind(key) {
- DefKind::TyAlias => "expanding type alias",
+ DefKind::TyAlias { .. } => "expanding type alias",
DefKind::TraitAlias => "expanding trait alias",
_ => "computing type of",
}
@@ -388,7 +388,6 @@ rustc_queries! {
}
query shallow_lint_levels_on(key: hir::OwnerId) -> &'tcx rustc_middle::lint::ShallowLintLevelMap {
- eval_always // fetches `resolutions`
arena_cache
desc { |tcx| "looking up lint levels for `{}`", tcx.def_path_str(key) }
}
@@ -398,11 +397,6 @@ rustc_queries! {
desc { "computing `#[expect]`ed lints in this crate" }
}
- query parent_module_from_def_id(key: LocalDefId) -> LocalDefId {
- eval_always
- desc { |tcx| "getting the parent module of `{}`", tcx.def_path_str(key) }
- }
-
query expn_that_defined(key: DefId) -> rustc_span::ExpnId {
desc { |tcx| "getting the expansion that defined `{}`", tcx.def_path_str(key) }
separate_provide_extern
@@ -706,7 +700,7 @@ rustc_queries! {
separate_provide_extern
}
- query adt_sized_constraint(key: DefId) -> &'tcx [Ty<'tcx>] {
+ query adt_sized_constraint(key: DefId) -> ty::EarlyBinder<&'tcx ty::List<Ty<'tcx>>> {
desc { |tcx| "computing `Sized` constraints for `{}`", tcx.def_path_str(key) }
}
@@ -749,7 +743,7 @@ rustc_queries! {
separate_provide_extern
}
- /// Gets a map with the variance of every item; use `item_variance` instead.
+ /// Gets a map with the variance of every item; use `variances_of` instead.
query crate_variances(_: ()) -> &'tcx ty::CrateVariancesMap<'tcx> {
arena_cache
desc { "computing the variances for items in this crate" }
@@ -885,6 +879,13 @@ rustc_queries! {
desc { |tcx| "computing the implied bounds of `{}`", tcx.def_path_str(key) }
}
+ /// We need to store the assumed_wf_types for an RPITIT so that impls of foreign
+ /// traits with return-position impl trait in traits can inherit the right wf types.
+ query assumed_wf_types_for_rpitit(key: DefId) -> &'tcx [(Ty<'tcx>, Span)] {
+ desc { |tcx| "computing the implied bounds of `{}`", tcx.def_path_str(key) }
+ separate_provide_extern
+ }
+
/// Computes the signature of the function.
query fn_sig(key: DefId) -> ty::EarlyBinder<ty::PolyFnSig<'tcx>> {
desc { |tcx| "computing function signature of `{}`", tcx.def_path_str(key) }
@@ -894,40 +895,44 @@ rustc_queries! {
}
/// Performs lint checking for the module.
- query lint_mod(key: LocalDefId) -> () {
+ query lint_mod(key: LocalModDefId) -> () {
desc { |tcx| "linting {}", describe_as_module(key, tcx) }
}
+ query check_unused_traits(_: ()) -> () {
+ desc { "checking unused trait imports in crate" }
+ }
+
/// Checks the attributes in the module.
- query check_mod_attrs(key: LocalDefId) -> () {
+ query check_mod_attrs(key: LocalModDefId) -> () {
desc { |tcx| "checking attributes in {}", describe_as_module(key, tcx) }
}
/// Checks for uses of unstable APIs in the module.
- query check_mod_unstable_api_usage(key: LocalDefId) -> () {
+ query check_mod_unstable_api_usage(key: LocalModDefId) -> () {
desc { |tcx| "checking for unstable API usage in {}", describe_as_module(key, tcx) }
}
/// Checks the const bodies in the module for illegal operations (e.g. `if` or `loop`).
- query check_mod_const_bodies(key: LocalDefId) -> () {
+ query check_mod_const_bodies(key: LocalModDefId) -> () {
desc { |tcx| "checking consts in {}", describe_as_module(key, tcx) }
}
/// Checks the loops in the module.
- query check_mod_loops(key: LocalDefId) -> () {
+ query check_mod_loops(key: LocalModDefId) -> () {
desc { |tcx| "checking loops in {}", describe_as_module(key, tcx) }
}
- query check_mod_naked_functions(key: LocalDefId) -> () {
+ query check_mod_naked_functions(key: LocalModDefId) -> () {
desc { |tcx| "checking naked functions in {}", describe_as_module(key, tcx) }
}
- query check_mod_item_types(key: LocalDefId) -> () {
+ query check_mod_item_types(key: LocalModDefId) -> () {
desc { |tcx| "checking item types in {}", describe_as_module(key, tcx) }
}
- query check_mod_privacy(key: LocalDefId) -> () {
- desc { |tcx| "checking privacy in {}", describe_as_module(key, tcx) }
+ query check_mod_privacy(key: LocalModDefId) -> () {
+ desc { |tcx| "checking privacy in {}", describe_as_module(key.to_local_def_id(), tcx) }
}
query check_liveness(key: LocalDefId) {
@@ -946,19 +951,19 @@ rustc_queries! {
desc { "finding live symbols in crate" }
}
- query check_mod_deathness(key: LocalDefId) -> () {
+ query check_mod_deathness(key: LocalModDefId) -> () {
desc { |tcx| "checking deathness of variables in {}", describe_as_module(key, tcx) }
}
- query check_mod_impl_wf(key: LocalDefId) -> () {
+ query check_mod_impl_wf(key: LocalModDefId) -> () {
desc { |tcx| "checking that impls are well-formed in {}", describe_as_module(key, tcx) }
}
- query check_mod_type_wf(key: LocalDefId) -> () {
+ query check_mod_type_wf(key: LocalModDefId) -> () {
desc { |tcx| "checking that types are well-formed in {}", describe_as_module(key, tcx) }
}
- query collect_mod_item_types(key: LocalDefId) -> () {
+ query collect_mod_item_types(key: LocalModDefId) -> () {
desc { |tcx| "collecting item types in {}", describe_as_module(key, tcx) }
}
@@ -1032,7 +1037,7 @@ rustc_queries! {
}
/// Obtain all the calls into other local functions
- query mir_inliner_callees(key: ty::InstanceDef<'tcx>) -> &'tcx [(DefId, SubstsRef<'tcx>)] {
+ query mir_inliner_callees(key: ty::InstanceDef<'tcx>) -> &'tcx [(DefId, GenericArgsRef<'tcx>)] {
fatal_cycle
desc { |tcx|
"computing all local function calls in `{}`",
@@ -1273,7 +1278,7 @@ rustc_queries! {
query vtable_allocation(key: (Ty<'tcx>, Option<ty::PolyExistentialTraitRef<'tcx>>)) -> mir::interpret::AllocId {
desc { |tcx| "vtable const allocation for <{} as {}>",
key.0,
- key.1.map(|trait_ref| format!("{}", trait_ref)).unwrap_or("_".to_owned())
+ key.1.map(|trait_ref| format!("{trait_ref}")).unwrap_or("_".to_owned())
}
}
@@ -1537,7 +1542,7 @@ rustc_queries! {
/// added or removed in any upstream crate. Instead use the narrower
/// `upstream_monomorphizations_for`, `upstream_drop_glue_for`, or, even
/// better, `Instance::upstream_monomorphization()`.
- query upstream_monomorphizations(_: ()) -> &'tcx DefIdMap<FxHashMap<SubstsRef<'tcx>, CrateNum>> {
+ query upstream_monomorphizations(_: ()) -> &'tcx DefIdMap<FxHashMap<GenericArgsRef<'tcx>, CrateNum>> {
arena_cache
desc { "collecting available upstream monomorphizations" }
}
@@ -1550,7 +1555,7 @@ rustc_queries! {
/// You likely want to call `Instance::upstream_monomorphization()`
/// instead of invoking this query directly.
query upstream_monomorphizations_for(def_id: DefId)
- -> Option<&'tcx FxHashMap<SubstsRef<'tcx>, CrateNum>>
+ -> Option<&'tcx FxHashMap<GenericArgsRef<'tcx>, CrateNum>>
{
desc { |tcx|
"collecting available upstream monomorphizations for `{}`",
@@ -1560,7 +1565,7 @@ rustc_queries! {
}
/// Returns the upstream crate that exports drop-glue for the given
- /// type (`substs` is expected to be a single-item list containing the
+ /// type (`args` is expected to be a single-item list containing the
/// type one wants drop-glue for).
///
/// This is a subset of `upstream_monomorphizations_for` in order to
@@ -1574,17 +1579,22 @@ rustc_queries! {
/// NOTE: This query could easily be extended to also support other
/// common functions that have are large set of monomorphizations
/// (like `Clone::clone` for example).
- query upstream_drop_glue_for(substs: SubstsRef<'tcx>) -> Option<CrateNum> {
- desc { "available upstream drop-glue for `{:?}`", substs }
+ query upstream_drop_glue_for(args: GenericArgsRef<'tcx>) -> Option<CrateNum> {
+ desc { "available upstream drop-glue for `{:?}`", args }
}
/// Returns a list of all `extern` blocks of a crate.
- query foreign_modules(_: CrateNum) -> &'tcx FxHashMap<DefId, ForeignModule> {
+ query foreign_modules(_: CrateNum) -> &'tcx FxIndexMap<DefId, ForeignModule> {
arena_cache
desc { "looking up the foreign modules of a linked crate" }
separate_provide_extern
}
+ /// Lint against `extern fn` declarations having incompatible types.
+ query clashing_extern_declarations(_: ()) {
+ desc { "checking `extern fn` declarations are compatible" }
+ }
+
/// Identifies the entry-point (e.g., the `main` function) for a given
/// crate, returning `None` if there is no entry point (such as for library crates).
query entry_fn(_: ()) -> Option<(DefId, EntryFnType)> {
@@ -2053,16 +2063,16 @@ rustc_queries! {
desc { "normalizing `{:?}`", goal.value.value.value }
}
- query subst_and_check_impossible_predicates(key: (DefId, SubstsRef<'tcx>)) -> bool {
+ query subst_and_check_impossible_predicates(key: (DefId, GenericArgsRef<'tcx>)) -> bool {
desc { |tcx|
"checking impossible substituted predicates: `{}`",
tcx.def_path_str(key.0)
}
}
- query is_impossible_method(key: (DefId, DefId)) -> bool {
+ query is_impossible_associated_item(key: (DefId, DefId)) -> bool {
desc { |tcx|
- "checking if `{}` is impossible to call within `{}`",
+ "checking if `{}` is impossible to reference within `{}`",
tcx.def_path_str(key.1),
tcx.def_path_str(key.0),
}
@@ -2080,23 +2090,11 @@ rustc_queries! {
desc { "looking up supported target features" }
}
- /// Get an estimate of the size of an InstanceDef based on its MIR for CGU partitioning.
- query instance_def_size_estimate(def: ty::InstanceDef<'tcx>)
- -> usize {
- desc { |tcx| "estimating size for `{}`", tcx.def_path_str(def.def_id()) }
- }
-
query features_query(_: ()) -> &'tcx rustc_feature::Features {
feedable
desc { "looking up enabled feature gates" }
}
- query metadata_loader((): ()) -> &'tcx Steal<Box<rustc_session::cstore::MetadataLoaderDyn>> {
- feedable
- no_hash
- desc { "raw operations for metadata file access" }
- }
-
query crate_for_resolver((): ()) -> &'tcx Steal<(rustc_ast::Crate, rustc_ast::AttrVec)> {
feedable
no_hash
@@ -2104,16 +2102,16 @@ rustc_queries! {
}
/// Attempt to resolve the given `DefId` to an `Instance`, for the
- /// given generics args (`SubstsRef`), returning one of:
+ /// given generics args (`GenericArgsRef`), returning one of:
/// * `Ok(Some(instance))` on success
- /// * `Ok(None)` when the `SubstsRef` are still too generic,
+ /// * `Ok(None)` when the `GenericArgsRef` are still too generic,
/// and therefore don't allow finding the final `Instance`
/// * `Err(ErrorGuaranteed)` when the `Instance` resolution process
/// couldn't complete due to errors elsewhere - this is distinct
/// from `Ok(None)` to avoid misleading diagnostics when an error
/// has already been/will be emitted, for the original cause
query resolve_instance(
- key: ty::ParamEnvAnd<'tcx, (DefId, SubstsRef<'tcx>)>
+ key: ty::ParamEnvAnd<'tcx, (DefId, GenericArgsRef<'tcx>)>
) -> Result<Option<ty::Instance<'tcx>>, ErrorGuaranteed> {
desc { "resolving instance `{}`", ty::Instance::new(key.value.0, key.value.1) }
}
diff --git a/compiler/rustc_middle/src/query/on_disk_cache.rs b/compiler/rustc_middle/src/query/on_disk_cache.rs
index 8751d3b78..995b2140f 100644
--- a/compiler/rustc_middle/src/query/on_disk_cache.rs
+++ b/compiler/rustc_middle/src/query/on_disk_cache.rs
@@ -104,7 +104,9 @@ struct Footer {
query_result_index: EncodedDepNodeIndex,
side_effects_index: EncodedDepNodeIndex,
// The location of all allocations.
- interpret_alloc_index: Vec<u32>,
+ // Most uses only need values up to u32::MAX, but benchmarking indicates that we can use a u64
+ // without measurable overhead. This permits larger const allocations without ICEing.
+ interpret_alloc_index: Vec<u64>,
// See `OnDiskCache.syntax_contexts`
syntax_contexts: FxHashMap<u32, AbsoluteBytePos>,
// See `OnDiskCache.expn_data`
@@ -301,7 +303,7 @@ impl<'sess> OnDiskCache<'sess> {
interpret_alloc_index.reserve(new_n - n);
for idx in n..new_n {
let id = encoder.interpret_allocs[idx];
- let pos: u32 = encoder.position().try_into().unwrap();
+ let pos: u64 = encoder.position().try_into().unwrap();
interpret_alloc_index.push(pos);
interpret::specialized_encode_alloc_id(&mut encoder, tcx, id);
}
diff --git a/compiler/rustc_middle/src/query/plumbing.rs b/compiler/rustc_middle/src/query/plumbing.rs
index 97edfc2fc..a1aac2846 100644
--- a/compiler/rustc_middle/src/query/plumbing.rs
+++ b/compiler/rustc_middle/src/query/plumbing.rs
@@ -545,6 +545,7 @@ macro_rules! define_feedable {
mod sealed {
use super::{DefId, LocalDefId, OwnerId};
+ use rustc_hir::def_id::{LocalModDefId, ModDefId};
/// An analogue of the `Into` trait that's intended only for query parameters.
///
@@ -588,6 +589,27 @@ mod sealed {
self.to_def_id()
}
}
+
+ impl IntoQueryParam<DefId> for ModDefId {
+ #[inline(always)]
+ fn into_query_param(self) -> DefId {
+ self.to_def_id()
+ }
+ }
+
+ impl IntoQueryParam<DefId> for LocalModDefId {
+ #[inline(always)]
+ fn into_query_param(self) -> DefId {
+ self.to_def_id()
+ }
+ }
+
+ impl IntoQueryParam<LocalDefId> for LocalModDefId {
+ #[inline(always)]
+ fn into_query_param(self) -> LocalDefId {
+ self.into()
+ }
+ }
}
pub use sealed::IntoQueryParam;
diff --git a/compiler/rustc_middle/src/thir.rs b/compiler/rustc_middle/src/thir.rs
index e9af5070e..ebc1c1190 100644
--- a/compiler/rustc_middle/src/thir.rs
+++ b/compiler/rustc_middle/src/thir.rs
@@ -19,8 +19,8 @@ use rustc_middle::middle::region;
use rustc_middle::mir::interpret::AllocId;
use rustc_middle::mir::{self, BinOp, BorrowKind, FakeReadCause, Mutability, UnOp};
use rustc_middle::ty::adjustment::PointerCoercion;
-use rustc_middle::ty::subst::SubstsRef;
-use rustc_middle::ty::{self, AdtDef, FnSig, List, Ty, UpvarSubsts};
+use rustc_middle::ty::GenericArgsRef;
+use rustc_middle::ty::{self, AdtDef, FnSig, List, Ty, UpvarArgs};
use rustc_middle::ty::{CanonicalUserType, CanonicalUserTypeAnnotation};
use rustc_span::def_id::LocalDefId;
use rustc_span::{sym, Span, Symbol, DUMMY_SP};
@@ -150,9 +150,9 @@ pub struct AdtExpr<'tcx> {
pub adt_def: AdtDef<'tcx>,
/// The variant of the ADT.
pub variant_index: VariantIdx,
- pub substs: SubstsRef<'tcx>,
+ pub args: GenericArgsRef<'tcx>,
- /// Optional user-given substs: for something like `let x =
+ /// Optional user-given args: for something like `let x =
/// Bar::<T> { ... }`.
pub user_ty: UserTy<'tcx>,
@@ -164,7 +164,7 @@ pub struct AdtExpr<'tcx> {
#[derive(Clone, Debug, HashStable)]
pub struct ClosureExpr<'tcx> {
pub closure_id: LocalDefId,
- pub substs: UpvarSubsts<'tcx>,
+ pub args: UpvarArgs<'tcx>,
pub upvars: Box<[ExprId]>,
pub movability: Option<hir::Movability>,
pub fake_reads: Vec<(ExprId, FakeReadCause, hir::HirId)>,
@@ -346,6 +346,7 @@ pub enum ExprKind<'tcx> {
/// A `match` expression.
Match {
scrutinee: ExprId,
+ scrutinee_hir_id: hir::HirId,
arms: Box<[ArmId]>,
},
/// A block.
@@ -418,7 +419,7 @@ pub enum ExprKind<'tcx> {
/// An inline `const` block, e.g. `const {}`.
ConstBlock {
did: DefId,
- substs: SubstsRef<'tcx>,
+ args: GenericArgsRef<'tcx>,
},
/// An array literal constructed from one repeated element, e.g. `[1; 5]`.
Repeat {
@@ -466,7 +467,7 @@ pub enum ExprKind<'tcx> {
/// Associated constants and named constants
NamedConst {
def_id: DefId,
- substs: SubstsRef<'tcx>,
+ args: GenericArgsRef<'tcx>,
user_ty: UserTy<'tcx>,
},
ConstParam {
@@ -659,7 +660,7 @@ impl<'tcx> Pat<'tcx> {
impl<'tcx> IntoDiagnosticArg for Pat<'tcx> {
fn into_diagnostic_arg(self) -> DiagnosticArgValue<'static> {
- format!("{}", self).into_diagnostic_arg()
+ format!("{self}").into_diagnostic_arg()
}
}
@@ -714,7 +715,7 @@ pub enum PatKind<'tcx> {
/// multiple variants.
Variant {
adt_def: AdtDef<'tcx>,
- substs: SubstsRef<'tcx>,
+ args: GenericArgsRef<'tcx>,
variant_index: VariantIdx,
subpatterns: Vec<FieldPat<'tcx>>,
},
@@ -789,7 +790,7 @@ impl<'tcx> fmt::Display for Pat<'tcx> {
match self.kind {
PatKind::Wild => write!(f, "_"),
- PatKind::AscribeUserType { ref subpattern, .. } => write!(f, "{}: _", subpattern),
+ PatKind::AscribeUserType { ref subpattern, .. } => write!(f, "{subpattern}: _"),
PatKind::Binding { mutability, name, mode, ref subpattern, .. } => {
let is_mut = match mode {
BindingMode::ByValue => mutability == Mutability::Mut,
@@ -801,9 +802,9 @@ impl<'tcx> fmt::Display for Pat<'tcx> {
if is_mut {
write!(f, "mut ")?;
}
- write!(f, "{}", name)?;
+ write!(f, "{name}")?;
if let Some(ref subpattern) = *subpattern {
- write!(f, " @ {}", subpattern)?;
+ write!(f, " @ {subpattern}")?;
}
Ok(())
}
@@ -833,7 +834,7 @@ impl<'tcx> fmt::Display for Pat<'tcx> {
};
if let Some((variant, name)) = &variant_and_name {
- write!(f, "{}", name)?;
+ write!(f, "{name}")?;
// Only for Adt we can have `S {...}`,
// which we handle separately here.
@@ -893,13 +894,13 @@ impl<'tcx> fmt::Display for Pat<'tcx> {
}
_ => bug!("{} is a bad Deref pattern type", self.ty),
}
- write!(f, "{}", subpattern)
+ write!(f, "{subpattern}")
}
- PatKind::Constant { value } => write!(f, "{}", value),
+ PatKind::Constant { value } => write!(f, "{value}"),
PatKind::Range(box PatRange { lo, hi, end }) => {
- write!(f, "{}", lo)?;
- write!(f, "{}", end)?;
- write!(f, "{}", hi)
+ write!(f, "{lo}")?;
+ write!(f, "{end}")?;
+ write!(f, "{hi}")
}
PatKind::Slice { ref prefix, ref slice, ref suffix }
| PatKind::Array { ref prefix, ref slice, ref suffix } => {
@@ -911,7 +912,7 @@ impl<'tcx> fmt::Display for Pat<'tcx> {
write!(f, "{}", start_or_comma())?;
match slice.kind {
PatKind::Wild => {}
- _ => write!(f, "{}", slice)?,
+ _ => write!(f, "{slice}")?,
}
write!(f, "..")?;
}
diff --git a/compiler/rustc_middle/src/thir/visit.rs b/compiler/rustc_middle/src/thir/visit.rs
index 14bc1ac0c..681400dbb 100644
--- a/compiler/rustc_middle/src/thir/visit.rs
+++ b/compiler/rustc_middle/src/thir/visit.rs
@@ -70,7 +70,7 @@ pub fn walk_expr<'a, 'tcx: 'a, V: Visitor<'a, 'tcx>>(visitor: &mut V, expr: &Exp
visitor.visit_expr(&visitor.thir()[expr]);
}
Loop { body } => visitor.visit_expr(&visitor.thir()[body]),
- Match { scrutinee, ref arms } => {
+ Match { scrutinee, ref arms, .. } => {
visitor.visit_expr(&visitor.thir()[scrutinee]);
for &arm in &**arms {
visitor.visit_arm(&visitor.thir()[arm]);
@@ -101,7 +101,7 @@ pub fn walk_expr<'a, 'tcx: 'a, V: Visitor<'a, 'tcx>>(visitor: &mut V, expr: &Exp
}
}
Become { value } => visitor.visit_expr(&visitor.thir()[value]),
- ConstBlock { did: _, substs: _ } => {}
+ ConstBlock { did: _, args: _ } => {}
Repeat { value, count: _ } => {
visitor.visit_expr(&visitor.thir()[value]);
}
@@ -115,7 +115,7 @@ pub fn walk_expr<'a, 'tcx: 'a, V: Visitor<'a, 'tcx>>(visitor: &mut V, expr: &Exp
ref base,
adt_def: _,
variant_index: _,
- substs: _,
+ args: _,
user_ty: _,
}) => {
for field in &**fields {
@@ -130,7 +130,7 @@ pub fn walk_expr<'a, 'tcx: 'a, V: Visitor<'a, 'tcx>>(visitor: &mut V, expr: &Exp
}
Closure(box ClosureExpr {
closure_id: _,
- substs: _,
+ args: _,
upvars: _,
movability: _,
fake_reads: _,
@@ -138,7 +138,7 @@ pub fn walk_expr<'a, 'tcx: 'a, V: Visitor<'a, 'tcx>>(visitor: &mut V, expr: &Exp
Literal { lit: _, neg: _ } => {}
NonHirLiteral { lit: _, user_ty: _ } => {}
ZstLiteral { user_ty: _ } => {}
- NamedConst { def_id: _, substs: _, user_ty: _ } => {}
+ NamedConst { def_id: _, args: _, user_ty: _ } => {}
ConstParam { param: _, def_id: _ } => {}
StaticRef { alloc_id: _, ty: _, def_id: _ } => {}
InlineAsm(box InlineAsmExpr { ref operands, template: _, options: _, line_spans: _ }) => {
@@ -227,7 +227,7 @@ pub fn walk_pat<'a, 'tcx: 'a, V: Visitor<'a, 'tcx>>(visitor: &mut V, pat: &Pat<'
name: _,
} => visitor.visit_pat(&subpattern),
Binding { .. } | Wild => {}
- Variant { subpatterns, adt_def: _, substs: _, variant_index: _ } | Leaf { subpatterns } => {
+ Variant { subpatterns, adt_def: _, args: _, variant_index: _ } | Leaf { subpatterns } => {
for subpattern in subpatterns {
visitor.visit_pat(&subpattern.pattern);
}
diff --git a/compiler/rustc_middle/src/traits/mod.rs b/compiler/rustc_middle/src/traits/mod.rs
index c7d2e4c22..3465759b9 100644
--- a/compiler/rustc_middle/src/traits/mod.rs
+++ b/compiler/rustc_middle/src/traits/mod.rs
@@ -12,7 +12,7 @@ pub mod util;
use crate::infer::canonical::Canonical;
use crate::mir::ConstraintCategory;
use crate::ty::abstract_const::NotConstEvaluatable;
-use crate::ty::subst::SubstsRef;
+use crate::ty::GenericArgsRef;
use crate::ty::{self, AdtKind, Ty, TyCtxt};
use rustc_data_structures::sync::Lrc;
@@ -199,7 +199,7 @@ impl<'tcx> ObligationCause<'tcx> {
pub struct UnifyReceiverContext<'tcx> {
pub assoc_item: ty::AssocItem,
pub param_env: ty::ParamEnv<'tcx>,
- pub substs: SubstsRef<'tcx>,
+ pub args: GenericArgsRef<'tcx>,
}
#[derive(Clone, PartialEq, Eq, Lift, Default, HashStable)]
@@ -402,7 +402,7 @@ pub enum ObligationCauseCode<'tcx> {
OpaqueReturnType(Option<(Ty<'tcx>, Span)>),
/// Block implicit return
- BlockTailExpression(hir::HirId),
+ BlockTailExpression(hir::HirId, hir::MatchSource),
/// #[feature(trivial_bounds)] is not enabled
TrivialBound,
@@ -543,7 +543,6 @@ pub struct MatchExpressionArmCause<'tcx> {
pub scrut_span: Span,
pub source: hir::MatchSource,
pub prior_arms: Vec<Span>,
- pub scrut_hir_id: hir::HirId,
pub opt_suggest_box_span: Option<Span>,
}
@@ -649,43 +648,31 @@ pub enum ImplSource<'tcx, N> {
/// for some type parameter. The `Vec<N>` represents the
/// obligations incurred from normalizing the where-clause (if
/// any).
- Param(Vec<N>, ty::BoundConstness),
+ Param(Vec<N>),
- /// Virtual calls through an object.
- Object(ImplSourceObjectData<N>),
-
- /// Successful resolution for a builtin trait.
- Builtin(Vec<N>),
-
- /// ImplSource for trait upcasting coercion
- TraitUpcasting(ImplSourceTraitUpcastingData<N>),
+ /// Successful resolution for a builtin impl.
+ Builtin(BuiltinImplSource, Vec<N>),
}
impl<'tcx, N> ImplSource<'tcx, N> {
pub fn nested_obligations(self) -> Vec<N> {
match self {
ImplSource::UserDefined(i) => i.nested,
- ImplSource::Param(n, _) | ImplSource::Builtin(n) => n,
- ImplSource::Object(d) => d.nested,
- ImplSource::TraitUpcasting(d) => d.nested,
+ ImplSource::Param(n) | ImplSource::Builtin(_, n) => n,
}
}
pub fn borrow_nested_obligations(&self) -> &[N] {
match self {
ImplSource::UserDefined(i) => &i.nested,
- ImplSource::Param(n, _) | ImplSource::Builtin(n) => &n,
- ImplSource::Object(d) => &d.nested,
- ImplSource::TraitUpcasting(d) => &d.nested,
+ ImplSource::Param(n) | ImplSource::Builtin(_, n) => &n,
}
}
pub fn borrow_nested_obligations_mut(&mut self) -> &mut [N] {
match self {
ImplSource::UserDefined(i) => &mut i.nested,
- ImplSource::Param(n, _) | ImplSource::Builtin(n) => n,
- ImplSource::Object(d) => &mut d.nested,
- ImplSource::TraitUpcasting(d) => &mut d.nested,
+ ImplSource::Param(n) | ImplSource::Builtin(_, n) => n,
}
}
@@ -696,20 +683,12 @@ impl<'tcx, N> ImplSource<'tcx, N> {
match self {
ImplSource::UserDefined(i) => ImplSource::UserDefined(ImplSourceUserDefinedData {
impl_def_id: i.impl_def_id,
- substs: i.substs,
+ args: i.args,
nested: i.nested.into_iter().map(f).collect(),
}),
- ImplSource::Param(n, ct) => ImplSource::Param(n.into_iter().map(f).collect(), ct),
- ImplSource::Builtin(n) => ImplSource::Builtin(n.into_iter().map(f).collect()),
- ImplSource::Object(o) => ImplSource::Object(ImplSourceObjectData {
- vtable_base: o.vtable_base,
- nested: o.nested.into_iter().map(f).collect(),
- }),
- ImplSource::TraitUpcasting(d) => {
- ImplSource::TraitUpcasting(ImplSourceTraitUpcastingData {
- vtable_vptr_slot: d.vtable_vptr_slot,
- nested: d.nested.into_iter().map(f).collect(),
- })
+ ImplSource::Param(n) => ImplSource::Param(n.into_iter().map(f).collect()),
+ ImplSource::Builtin(source, n) => {
+ ImplSource::Builtin(source, n.into_iter().map(f).collect())
}
}
}
@@ -729,33 +708,35 @@ impl<'tcx, N> ImplSource<'tcx, N> {
#[derive(TypeFoldable, TypeVisitable)]
pub struct ImplSourceUserDefinedData<'tcx, N> {
pub impl_def_id: DefId,
- pub substs: SubstsRef<'tcx>,
+ pub args: GenericArgsRef<'tcx>,
pub nested: Vec<N>,
}
-#[derive(Clone, PartialEq, Eq, TyEncodable, TyDecodable, HashStable, Lift)]
-#[derive(TypeFoldable, TypeVisitable)]
-pub struct ImplSourceTraitUpcastingData<N> {
+#[derive(Copy, Clone, PartialEq, Eq, TyEncodable, TyDecodable, HashStable, Debug)]
+pub enum BuiltinImplSource {
+ /// Some builtin impl we don't need to differentiate. This should be used
+ /// unless more specific information is necessary.
+ Misc,
+ /// A builtin impl for trait objects.
+ ///
+ /// The vtable is formed by concatenating together the method lists of
+ /// the base object trait and all supertraits, pointers to supertrait vtable will
+ /// be provided when necessary; this is the start of `upcast_trait_ref`'s methods
+ /// in that vtable.
+ Object { vtable_base: usize },
/// The vtable is formed by concatenating together the method lists of
/// the base object trait and all supertraits, pointers to supertrait vtable will
/// be provided when necessary; this is the position of `upcast_trait_ref`'s vtable
/// within that vtable.
- pub vtable_vptr_slot: Option<usize>,
-
- pub nested: Vec<N>,
+ TraitUpcasting { vtable_vptr_slot: Option<usize> },
+ /// Unsizing a tuple like `(A, B, ..., X)` to `(A, B, ..., Y)` if `X` unsizes to `Y`.
+ ///
+ /// This needs to be a separate variant as it is still unstable and we need to emit
+ /// a feature error when using it on stable.
+ TupleUnsizing,
}
-#[derive(PartialEq, Eq, Clone, TyEncodable, TyDecodable, HashStable, Lift)]
-#[derive(TypeFoldable, TypeVisitable)]
-pub struct ImplSourceObjectData<N> {
- /// The vtable is formed by concatenating together the method lists of
- /// the base object trait and all supertraits, pointers to supertrait vtable will
- /// be provided when necessary; this is the start of `upcast_trait_ref`'s methods
- /// in that vtable.
- pub vtable_base: usize,
-
- pub nested: Vec<N>,
-}
+TrivialTypeTraversalAndLiftImpls! { BuiltinImplSource }
#[derive(Clone, Debug, PartialEq, Eq, Hash, HashStable, PartialOrd, Ord)]
pub enum ObjectSafetyViolation {
@@ -795,49 +776,48 @@ impl ObjectSafetyViolation {
"where clause cannot reference non-lifetime `for<...>` variables".into()
}
ObjectSafetyViolation::Method(name, MethodViolationCode::StaticMethod(_), _) => {
- format!("associated function `{}` has no `self` parameter", name).into()
+ format!("associated function `{name}` has no `self` parameter").into()
}
ObjectSafetyViolation::Method(
name,
MethodViolationCode::ReferencesSelfInput(_),
DUMMY_SP,
- ) => format!("method `{}` references the `Self` type in its parameters", name).into(),
+ ) => format!("method `{name}` references the `Self` type in its parameters").into(),
ObjectSafetyViolation::Method(name, MethodViolationCode::ReferencesSelfInput(_), _) => {
- format!("method `{}` references the `Self` type in this parameter", name).into()
+ format!("method `{name}` references the `Self` type in this parameter").into()
}
ObjectSafetyViolation::Method(name, MethodViolationCode::ReferencesSelfOutput, _) => {
- format!("method `{}` references the `Self` type in its return type", name).into()
+ format!("method `{name}` references the `Self` type in its return type").into()
}
ObjectSafetyViolation::Method(
name,
MethodViolationCode::ReferencesImplTraitInTrait(_),
_,
- ) => format!("method `{}` references an `impl Trait` type in its return type", name)
- .into(),
+ ) => {
+ format!("method `{name}` references an `impl Trait` type in its return type").into()
+ }
ObjectSafetyViolation::Method(name, MethodViolationCode::AsyncFn, _) => {
- format!("method `{}` is `async`", name).into()
+ format!("method `{name}` is `async`").into()
}
ObjectSafetyViolation::Method(
name,
MethodViolationCode::WhereClauseReferencesSelf,
_,
- ) => {
- format!("method `{}` references the `Self` type in its `where` clause", name).into()
- }
+ ) => format!("method `{name}` references the `Self` type in its `where` clause").into(),
ObjectSafetyViolation::Method(name, MethodViolationCode::Generic, _) => {
- format!("method `{}` has generic type parameters", name).into()
+ format!("method `{name}` has generic type parameters").into()
}
ObjectSafetyViolation::Method(
name,
MethodViolationCode::UndispatchableReceiver(_),
_,
- ) => format!("method `{}`'s `self` parameter cannot be dispatched on", name).into(),
+ ) => format!("method `{name}`'s `self` parameter cannot be dispatched on").into(),
ObjectSafetyViolation::AssocConst(name, DUMMY_SP) => {
- format!("it contains associated `const` `{}`", name).into()
+ format!("it contains associated `const` `{name}`").into()
}
ObjectSafetyViolation::AssocConst(..) => "it contains this associated `const`".into(),
ObjectSafetyViolation::GAT(name, _) => {
- format!("it contains the generic associated type `{}`", name).into()
+ format!("it contains the generic associated type `{name}`").into()
}
}
}
@@ -855,8 +835,7 @@ impl ObjectSafetyViolation {
err.span_suggestion(
add_self_sugg.1,
format!(
- "consider turning `{}` into a method by giving it a `&self` argument",
- name
+ "consider turning `{name}` into a method by giving it a `&self` argument"
),
add_self_sugg.0.to_string(),
Applicability::MaybeIncorrect,
@@ -864,9 +843,8 @@ impl ObjectSafetyViolation {
err.span_suggestion(
make_sized_sugg.1,
format!(
- "alternatively, consider constraining `{}` so it does not apply to \
- trait objects",
- name
+ "alternatively, consider constraining `{name}` so it does not apply to \
+ trait objects"
),
make_sized_sugg.0.to_string(),
Applicability::MaybeIncorrect,
@@ -879,7 +857,7 @@ impl ObjectSafetyViolation {
) => {
err.span_suggestion(
*span,
- format!("consider changing method `{}`'s `self` parameter to be `&self`", name),
+ format!("consider changing method `{name}`'s `self` parameter to be `&self`"),
"&Self",
Applicability::MachineApplicable,
);
@@ -887,7 +865,7 @@ impl ObjectSafetyViolation {
ObjectSafetyViolation::AssocConst(name, _)
| ObjectSafetyViolation::GAT(name, _)
| ObjectSafetyViolation::Method(name, ..) => {
- err.help(format!("consider moving `{}` to another trait", name));
+ err.help(format!("consider moving `{name}` to another trait"));
}
}
}
diff --git a/compiler/rustc_middle/src/traits/query.rs b/compiler/rustc_middle/src/traits/query.rs
index 60a38747f..950a59e96 100644
--- a/compiler/rustc_middle/src/traits/query.rs
+++ b/compiler/rustc_middle/src/traits/query.rs
@@ -8,7 +8,7 @@
use crate::error::DropCheckOverflow;
use crate::infer::canonical::{Canonical, QueryResponse};
use crate::ty::error::TypeError;
-use crate::ty::subst::GenericArg;
+use crate::ty::GenericArg;
use crate::ty::{self, Ty, TyCtxt};
use rustc_span::source_map::Span;
@@ -132,7 +132,7 @@ impl<'tcx> DropckOutlivesResult<'tcx> {
pub struct DropckConstraint<'tcx> {
/// Types that are required to be alive in order for this
/// type to be valid for destruction.
- pub outlives: Vec<ty::subst::GenericArg<'tcx>>,
+ pub outlives: Vec<ty::GenericArg<'tcx>>,
/// Types that could not be resolved: projections and params.
pub dtorck_types: Vec<Ty<'tcx>>,
diff --git a/compiler/rustc_middle/src/traits/select.rs b/compiler/rustc_middle/src/traits/select.rs
index f2dda003b..ffae35798 100644
--- a/compiler/rustc_middle/src/traits/select.rs
+++ b/compiler/rustc_middle/src/traits/select.rs
@@ -127,6 +127,7 @@ pub enum SelectionCandidate<'tcx> {
/// an applicable bound in the trait definition. The `usize` is an index
/// into the list returned by `tcx.item_bounds`. The constness is the
/// constness of the bound in the trait.
+ // FIXME(effects) do we need this constness
ProjectionCandidate(usize, ty::BoundConstness),
/// Implementation of a `Fn`-family trait by one of the anonymous types
@@ -304,9 +305,7 @@ impl From<ErrorGuaranteed> for OverflowError {
}
}
-TrivialTypeTraversalAndLiftImpls! {
- OverflowError,
-}
+TrivialTypeTraversalAndLiftImpls! { OverflowError }
impl<'tcx> From<OverflowError> for SelectionError<'tcx> {
fn from(overflow_error: OverflowError) -> SelectionError<'tcx> {
diff --git a/compiler/rustc_middle/src/traits/solve.rs b/compiler/rustc_middle/src/traits/solve.rs
index 73b332fd8..9d63d2918 100644
--- a/compiler/rustc_middle/src/traits/solve.rs
+++ b/compiler/rustc_middle/src/traits/solve.rs
@@ -1,7 +1,6 @@
use std::ops::ControlFlow;
use rustc_data_structures::intern::Interned;
-use rustc_query_system::cache::Cache;
use crate::infer::canonical::{CanonicalVarValues, QueryRegionConstraints};
use crate::traits::query::NoSolution;
@@ -11,9 +10,10 @@ use crate::ty::{
TypeVisitor,
};
+mod cache;
pub mod inspect;
-pub type EvaluationCache<'tcx> = Cache<CanonicalInput<'tcx>, QueryResult<'tcx>>;
+pub use cache::{CacheData, EvaluationCache};
/// A goal is a statement, i.e. `predicate`, we want to prove
/// given some assumptions, i.e. `param_env`.
@@ -57,6 +57,7 @@ pub enum Certainty {
impl Certainty {
pub const AMBIGUOUS: Certainty = Certainty::Maybe(MaybeCause::Ambiguity);
+ pub const OVERFLOW: Certainty = Certainty::Maybe(MaybeCause::Overflow);
/// Use this function to merge the certainty of multiple nested subgoals.
///
@@ -66,7 +67,7 @@ impl Certainty {
/// success, we merge these two responses. This results in ambiguity.
///
/// If we unify ambiguity with overflow, we return overflow. This doesn't matter
- /// inside of the solver as we distinguish ambiguity from overflow. It does
+ /// inside of the solver as we do not distinguish ambiguity from overflow. It does
/// however matter for diagnostics. If `T: Foo` resulted in overflow and `T: Bar`
/// in ambiguity without changing the inference state, we still want to tell the
/// user that `T: Baz` results in overflow.
@@ -146,7 +147,7 @@ impl<'tcx> std::ops::Deref for ExternalConstraints<'tcx> {
}
/// Additional constraints returned on success.
-#[derive(Debug, PartialEq, Eq, Clone, Hash, HashStable, Default)]
+#[derive(Debug, PartialEq, Eq, Clone, Hash, HashStable, Default, TypeVisitable, TypeFoldable)]
pub struct ExternalConstraintsData<'tcx> {
// FIXME: implement this.
pub region_constraints: QueryRegionConstraints<'tcx>,
diff --git a/compiler/rustc_middle/src/traits/solve/cache.rs b/compiler/rustc_middle/src/traits/solve/cache.rs
new file mode 100644
index 000000000..9898b0019
--- /dev/null
+++ b/compiler/rustc_middle/src/traits/solve/cache.rs
@@ -0,0 +1,100 @@
+use super::{CanonicalInput, QueryResult};
+use crate::ty::TyCtxt;
+use rustc_data_structures::fx::{FxHashMap, FxHashSet};
+use rustc_data_structures::sync::Lock;
+use rustc_query_system::cache::WithDepNode;
+use rustc_query_system::dep_graph::DepNodeIndex;
+use rustc_session::Limit;
+/// The trait solver cache used by `-Ztrait-solver=next`.
+///
+/// FIXME(@lcnr): link to some official documentation of how
+/// this works.
+#[derive(Default)]
+pub struct EvaluationCache<'tcx> {
+ map: Lock<FxHashMap<CanonicalInput<'tcx>, CacheEntry<'tcx>>>,
+}
+
+pub struct CacheData<'tcx> {
+ pub result: QueryResult<'tcx>,
+ pub reached_depth: usize,
+ pub encountered_overflow: bool,
+}
+
+impl<'tcx> EvaluationCache<'tcx> {
+ /// Insert a final result into the global cache.
+ pub fn insert(
+ &self,
+ key: CanonicalInput<'tcx>,
+ reached_depth: usize,
+ did_overflow: bool,
+ cycle_participants: FxHashSet<CanonicalInput<'tcx>>,
+ dep_node: DepNodeIndex,
+ result: QueryResult<'tcx>,
+ ) {
+ let mut map = self.map.borrow_mut();
+ let entry = map.entry(key).or_default();
+ let data = WithDepNode::new(dep_node, result);
+ entry.cycle_participants.extend(cycle_participants);
+ if did_overflow {
+ entry.with_overflow.insert(reached_depth, data);
+ } else {
+ entry.success = Some(Success { data, reached_depth });
+ }
+ }
+
+ /// Try to fetch a cached result, checking the recursion limit
+ /// and handling root goals of coinductive cycles.
+ ///
+ /// If this returns `Some` the cache result can be used.
+ pub fn get(
+ &self,
+ tcx: TyCtxt<'tcx>,
+ key: CanonicalInput<'tcx>,
+ cycle_participant_in_stack: impl FnOnce(&FxHashSet<CanonicalInput<'tcx>>) -> bool,
+ available_depth: Limit,
+ ) -> Option<CacheData<'tcx>> {
+ let map = self.map.borrow();
+ let entry = map.get(&key)?;
+
+ if cycle_participant_in_stack(&entry.cycle_participants) {
+ return None;
+ }
+
+ if let Some(ref success) = entry.success {
+ if available_depth.value_within_limit(success.reached_depth) {
+ return Some(CacheData {
+ result: success.data.get(tcx),
+ reached_depth: success.reached_depth,
+ encountered_overflow: false,
+ });
+ }
+ }
+
+ entry.with_overflow.get(&available_depth.0).map(|e| CacheData {
+ result: e.get(tcx),
+ reached_depth: available_depth.0,
+ encountered_overflow: true,
+ })
+ }
+}
+
+struct Success<'tcx> {
+ data: WithDepNode<QueryResult<'tcx>>,
+ reached_depth: usize,
+}
+
+/// The cache entry for a goal `CanonicalInput`.
+///
+/// This contains results whose computation never hit the
+/// recursion limit in `success`, and all results which hit
+/// the recursion limit in `with_overflow`.
+#[derive(Default)]
+struct CacheEntry<'tcx> {
+ success: Option<Success<'tcx>>,
+ /// We have to be careful when caching roots of cycles.
+ ///
+ /// See the doc comment of `StackEntry::cycle_participants` for more
+ /// details.
+ cycle_participants: FxHashSet<CanonicalInput<'tcx>>,
+ with_overflow: FxHashMap<usize, WithDepNode<QueryResult<'tcx>>>,
+}
diff --git a/compiler/rustc_middle/src/traits/solve/inspect.rs b/compiler/rustc_middle/src/traits/solve/inspect.rs
index 527afa005..4e2af3816 100644
--- a/compiler/rustc_middle/src/traits/solve/inspect.rs
+++ b/compiler/rustc_middle/src/traits/solve/inspect.rs
@@ -32,7 +32,7 @@ pub enum GoalEvaluationKind<'tcx> {
}
impl Debug for GoalEvaluation<'_> {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
- ProofTreeFormatter { f, on_newline: true }.format_goal_evaluation(self)
+ ProofTreeFormatter::new(f).format_goal_evaluation(self)
}
}
@@ -43,7 +43,7 @@ pub struct AddedGoalsEvaluation<'tcx> {
}
impl Debug for AddedGoalsEvaluation<'_> {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
- ProofTreeFormatter { f, on_newline: true }.format_nested_goal_evaluation(self)
+ ProofTreeFormatter::new(f).format_nested_goal_evaluation(self)
}
}
@@ -58,7 +58,7 @@ pub struct GoalEvaluationStep<'tcx> {
}
impl Debug for GoalEvaluationStep<'_> {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
- ProofTreeFormatter { f, on_newline: true }.format_evaluation_step(self)
+ ProofTreeFormatter::new(f).format_evaluation_step(self)
}
}
@@ -75,9 +75,16 @@ pub enum CandidateKind<'tcx> {
NormalizedSelfTyAssembly,
/// A normal candidate for proving a goal
Candidate { name: String, result: QueryResult<'tcx> },
+ /// Used in the probe that wraps normalizing the non-self type for the unsize
+ /// trait, which is also structurally matched on.
+ UnsizeAssembly,
+ /// During upcasting from some source object to target object type, used to
+ /// do a probe to find out what projection type(s) may be used to prove that
+ /// the source type upholds all of the target type's object bounds.
+ UpcastProbe,
}
impl Debug for GoalCandidate<'_> {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
- ProofTreeFormatter { f, on_newline: true }.format_candidate(self)
+ ProofTreeFormatter::new(f).format_candidate(self)
}
}
diff --git a/compiler/rustc_middle/src/traits/solve/inspect/format.rs b/compiler/rustc_middle/src/traits/solve/inspect/format.rs
index 2ee625674..8759fecb0 100644
--- a/compiler/rustc_middle/src/traits/solve/inspect/format.rs
+++ b/compiler/rustc_middle/src/traits/solve/inspect/format.rs
@@ -1,17 +1,25 @@
use super::*;
pub(super) struct ProofTreeFormatter<'a, 'b> {
- pub(super) f: &'a mut (dyn Write + 'b),
- pub(super) on_newline: bool,
+ f: &'a mut (dyn Write + 'b),
}
-impl Write for ProofTreeFormatter<'_, '_> {
+/// A formatter which adds 4 spaces of indentation to its input before
+/// passing it on to its nested formatter.
+///
+/// We can use this for arbitrary levels of indentation by nesting it.
+struct Indentor<'a, 'b> {
+ f: &'a mut (dyn Write + 'b),
+ on_newline: bool,
+}
+
+impl Write for Indentor<'_, '_> {
fn write_str(&mut self, s: &str) -> std::fmt::Result {
- for line in s.split_inclusive("\n") {
+ for line in s.split_inclusive('\n') {
if self.on_newline {
self.f.write_str(" ")?;
}
- self.on_newline = line.ends_with("\n");
+ self.on_newline = line.ends_with('\n');
self.f.write_str(line)?;
}
@@ -19,49 +27,52 @@ impl Write for ProofTreeFormatter<'_, '_> {
}
}
-impl ProofTreeFormatter<'_, '_> {
- fn nested(&mut self) -> ProofTreeFormatter<'_, '_> {
- ProofTreeFormatter { f: self, on_newline: true }
+impl<'a, 'b> ProofTreeFormatter<'a, 'b> {
+ pub(super) fn new(f: &'a mut (dyn Write + 'b)) -> Self {
+ ProofTreeFormatter { f }
}
- pub(super) fn format_goal_evaluation(&mut self, goal: &GoalEvaluation<'_>) -> std::fmt::Result {
- let f = &mut *self.f;
+ fn nested<F, R>(&mut self, func: F) -> R
+ where
+ F: FnOnce(&mut ProofTreeFormatter<'_, '_>) -> R,
+ {
+ func(&mut ProofTreeFormatter { f: &mut Indentor { f: self.f, on_newline: true } })
+ }
+ pub(super) fn format_goal_evaluation(&mut self, goal: &GoalEvaluation<'_>) -> std::fmt::Result {
let goal_text = match goal.is_normalizes_to_hack {
IsNormalizesToHack::Yes => "NORMALIZES-TO HACK GOAL",
IsNormalizesToHack::No => "GOAL",
};
- writeln!(f, "{}: {:?}", goal_text, goal.uncanonicalized_goal,)?;
- writeln!(f, "CANONICALIZED: {:?}", goal.canonicalized_goal)?;
+ writeln!(self.f, "{}: {:?}", goal_text, goal.uncanonicalized_goal)?;
+ writeln!(self.f, "CANONICALIZED: {:?}", goal.canonicalized_goal)?;
match &goal.kind {
GoalEvaluationKind::CacheHit(CacheHit::Global) => {
- writeln!(f, "GLOBAL CACHE HIT: {:?}", goal.result)
+ writeln!(self.f, "GLOBAL CACHE HIT: {:?}", goal.result)
}
GoalEvaluationKind::CacheHit(CacheHit::Provisional) => {
- writeln!(f, "PROVISIONAL CACHE HIT: {:?}", goal.result)
+ writeln!(self.f, "PROVISIONAL CACHE HIT: {:?}", goal.result)
}
GoalEvaluationKind::Uncached { revisions } => {
for (n, step) in revisions.iter().enumerate() {
- let f = &mut *self.f;
- writeln!(f, "REVISION {n}: {:?}", step.result)?;
- let mut f = self.nested();
- f.format_evaluation_step(step)?;
+ writeln!(self.f, "REVISION {n}: {:?}", step.result)?;
+ self.nested(|this| this.format_evaluation_step(step))?;
}
-
- let f = &mut *self.f;
- writeln!(f, "RESULT: {:?}", goal.result)
+ writeln!(self.f, "RESULT: {:?}", goal.result)
}
}?;
if goal.returned_goals.len() > 0 {
- let f = &mut *self.f;
- writeln!(f, "NESTED GOALS ADDED TO CALLER: [")?;
- let mut f = self.nested();
- for goal in goal.returned_goals.iter() {
- writeln!(f, "ADDED GOAL: {:?},", goal)?;
- }
+ writeln!(self.f, "NESTED GOALS ADDED TO CALLER: [")?;
+ self.nested(|this| {
+ for goal in goal.returned_goals.iter() {
+ writeln!(this.f, "ADDED GOAL: {goal:?},")?;
+ }
+ Ok(())
+ })?;
+
writeln!(self.f, "]")?;
}
@@ -72,58 +83,59 @@ impl ProofTreeFormatter<'_, '_> {
&mut self,
evaluation_step: &GoalEvaluationStep<'_>,
) -> std::fmt::Result {
- let f = &mut *self.f;
- writeln!(f, "INSTANTIATED: {:?}", evaluation_step.instantiated_goal)?;
+ writeln!(self.f, "INSTANTIATED: {:?}", evaluation_step.instantiated_goal)?;
for candidate in &evaluation_step.candidates {
- let mut f = self.nested();
- f.format_candidate(candidate)?;
+ self.nested(|this| this.format_candidate(candidate))?;
}
- for nested_goal_evaluation in &evaluation_step.nested_goal_evaluations {
- let mut f = self.nested();
- f.format_nested_goal_evaluation(nested_goal_evaluation)?;
+ for nested in &evaluation_step.nested_goal_evaluations {
+ self.nested(|this| this.format_nested_goal_evaluation(nested))?;
}
Ok(())
}
pub(super) fn format_candidate(&mut self, candidate: &GoalCandidate<'_>) -> std::fmt::Result {
- let f = &mut *self.f;
-
match &candidate.kind {
CandidateKind::NormalizedSelfTyAssembly => {
- writeln!(f, "NORMALIZING SELF TY FOR ASSEMBLY:")
+ writeln!(self.f, "NORMALIZING SELF TY FOR ASSEMBLY:")
+ }
+ CandidateKind::UnsizeAssembly => {
+ writeln!(self.f, "ASSEMBLING CANDIDATES FOR UNSIZING:")
+ }
+ CandidateKind::UpcastProbe => {
+ writeln!(self.f, "PROBING FOR PROJECTION COMPATIBILITY FOR UPCASTING:")
}
CandidateKind::Candidate { name, result } => {
- writeln!(f, "CANDIDATE {}: {:?}", name, result)
+ writeln!(self.f, "CANDIDATE {name}: {result:?}")
}
}?;
- let mut f = self.nested();
- for candidate in &candidate.candidates {
- f.format_candidate(candidate)?;
- }
- for nested_evaluations in &candidate.nested_goal_evaluations {
- f.format_nested_goal_evaluation(nested_evaluations)?;
- }
-
- Ok(())
+ self.nested(|this| {
+ for candidate in &candidate.candidates {
+ this.format_candidate(candidate)?;
+ }
+ for nested in &candidate.nested_goal_evaluations {
+ this.format_nested_goal_evaluation(nested)?;
+ }
+ Ok(())
+ })
}
pub(super) fn format_nested_goal_evaluation(
&mut self,
nested_goal_evaluation: &AddedGoalsEvaluation<'_>,
) -> std::fmt::Result {
- let f = &mut *self.f;
- writeln!(f, "TRY_EVALUATE_ADDED_GOALS: {:?}", nested_goal_evaluation.result)?;
+ writeln!(self.f, "TRY_EVALUATE_ADDED_GOALS: {:?}", nested_goal_evaluation.result)?;
for (n, revision) in nested_goal_evaluation.evaluations.iter().enumerate() {
- let f = &mut *self.f;
- writeln!(f, "REVISION {n}")?;
- let mut f = self.nested();
- for goal_evaluation in revision {
- f.format_goal_evaluation(goal_evaluation)?;
- }
+ writeln!(self.f, "REVISION {n}")?;
+ self.nested(|this| {
+ for goal_evaluation in revision {
+ this.format_goal_evaluation(goal_evaluation)?;
+ }
+ Ok(())
+ })?;
}
Ok(())
diff --git a/compiler/rustc_middle/src/traits/specialization_graph.rs b/compiler/rustc_middle/src/traits/specialization_graph.rs
index dc2cd2035..e48b46d12 100644
--- a/compiler/rustc_middle/src/traits/specialization_graph.rs
+++ b/compiler/rustc_middle/src/traits/specialization_graph.rs
@@ -43,7 +43,7 @@ impl Graph {
/// The parent of a given impl, which is the `DefId` of the trait when the
/// impl is a "specialization root".
pub fn parent(&self, child: DefId) -> DefId {
- *self.parent.get(&child).unwrap_or_else(|| panic!("Failed to get parent for {:?}", child))
+ *self.parent.get(&child).unwrap_or_else(|| panic!("Failed to get parent for {child:?}"))
}
}
@@ -259,7 +259,9 @@ pub fn ancestors(
if let Some(reported) = specialization_graph.has_errored {
Err(reported)
- } else if let Err(reported) = tcx.type_of(start_from_impl).subst_identity().error_reported() {
+ } else if let Err(reported) =
+ tcx.type_of(start_from_impl).instantiate_identity().error_reported()
+ {
Err(reported)
} else {
Ok(Ancestors {
diff --git a/compiler/rustc_middle/src/traits/structural_impls.rs b/compiler/rustc_middle/src/traits/structural_impls.rs
index a703e3c95..ec450cf55 100644
--- a/compiler/rustc_middle/src/traits/structural_impls.rs
+++ b/compiler/rustc_middle/src/traits/structural_impls.rs
@@ -6,18 +6,16 @@ use std::fmt;
impl<'tcx, N: fmt::Debug> fmt::Debug for traits::ImplSource<'tcx, N> {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
- match *self {
- super::ImplSource::UserDefined(ref v) => write!(f, "{:?}", v),
+ match self {
+ super::ImplSource::UserDefined(v) => write!(f, "{v:?}"),
- super::ImplSource::Builtin(ref d) => write!(f, "{:?}", d),
-
- super::ImplSource::Object(ref d) => write!(f, "{:?}", d),
-
- super::ImplSource::Param(ref n, ct) => {
- write!(f, "ImplSourceParamData({:?}, {:?})", n, ct)
+ super::ImplSource::Builtin(source, d) => {
+ write!(f, "Builtin({source:?}, {d:?})")
}
- super::ImplSource::TraitUpcasting(ref d) => write!(f, "{:?}", d),
+ super::ImplSource::Param(n) => {
+ write!(f, "ImplSourceParamData({n:?})")
+ }
}
}
}
@@ -26,28 +24,8 @@ impl<'tcx, N: fmt::Debug> fmt::Debug for traits::ImplSourceUserDefinedData<'tcx,
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
write!(
f,
- "ImplSourceUserDefinedData(impl_def_id={:?}, substs={:?}, nested={:?})",
- self.impl_def_id, self.substs, self.nested
- )
- }
-}
-
-impl<N: fmt::Debug> fmt::Debug for traits::ImplSourceTraitUpcastingData<N> {
- fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
- write!(
- f,
- "ImplSourceTraitUpcastingData(vtable_vptr_slot={:?}, nested={:?})",
- self.vtable_vptr_slot, self.nested
- )
- }
-}
-
-impl<N: fmt::Debug> fmt::Debug for traits::ImplSourceObjectData<N> {
- fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
- write!(
- f,
- "ImplSourceObjectData(vtable_base={}, nested={:?})",
- self.vtable_base, self.nested
+ "ImplSourceUserDefinedData(impl_def_id={:?}, args={:?}, nested={:?})",
+ self.impl_def_id, self.args, self.nested
)
}
}
diff --git a/compiler/rustc_middle/src/ty/abstract_const.rs b/compiler/rustc_middle/src/ty/abstract_const.rs
index ffee7ba28..cdd835149 100644
--- a/compiler/rustc_middle/src/ty/abstract_const.rs
+++ b/compiler/rustc_middle/src/ty/abstract_const.rs
@@ -27,9 +27,7 @@ impl From<ErrorGuaranteed> for NotConstEvaluatable {
}
}
-TrivialTypeTraversalAndLiftImpls! {
- NotConstEvaluatable,
-}
+TrivialTypeTraversalAndLiftImpls! { NotConstEvaluatable }
pub type BoundAbstractConst<'tcx> = Result<Option<EarlyBinder<ty::Const<'tcx>>>, ErrorGuaranteed>;
@@ -55,8 +53,8 @@ impl<'tcx> TyCtxt<'tcx> {
ty::ConstKind::Unevaluated(uv) => match self.tcx.thir_abstract_const(uv.def) {
Err(e) => ty::Const::new_error(self.tcx, e, c.ty()),
Ok(Some(bac)) => {
- let substs = self.tcx.erase_regions(uv.substs);
- let bac = bac.subst(self.tcx, substs);
+ let args = self.tcx.erase_regions(uv.args);
+ let bac = bac.instantiate(self.tcx, args);
return bac.fold_with(self);
}
Ok(None) => c,
diff --git a/compiler/rustc_middle/src/ty/adt.rs b/compiler/rustc_middle/src/ty/adt.rs
index e067d2a98..b4c6e0d97 100644
--- a/compiler/rustc_middle/src/ty/adt.rs
+++ b/compiler/rustc_middle/src/ty/adt.rs
@@ -448,7 +448,7 @@ impl<'tcx> AdtDef<'tcx> {
Res::Def(DefKind::Ctor(..), cid) => self.variant_with_ctor_id(cid),
Res::Def(DefKind::Struct, _)
| Res::Def(DefKind::Union, _)
- | Res::Def(DefKind::TyAlias, _)
+ | Res::Def(DefKind::TyAlias { .. }, _)
| Res::Def(DefKind::AssocTy, _)
| Res::SelfTyParam { .. }
| Res::SelfTyAlias { .. }
@@ -562,18 +562,10 @@ impl<'tcx> AdtDef<'tcx> {
tcx.adt_destructor(self.did())
}
- /// Returns a list of types such that `Self: Sized` if and only
- /// if that type is `Sized`, or `TyErr` if this type is recursive.
- ///
- /// Oddly enough, checking that the sized-constraint is `Sized` is
- /// actually more expressive than checking all members:
- /// the `Sized` trait is inductive, so an associated type that references
- /// `Self` would prevent its containing ADT from being `Sized`.
- ///
- /// Due to normalization being eager, this applies even if
- /// the associated type is behind a pointer (e.g., issue #31299).
- pub fn sized_constraint(self, tcx: TyCtxt<'tcx>) -> ty::EarlyBinder<&'tcx [Ty<'tcx>]> {
- ty::EarlyBinder::bind(tcx.adt_sized_constraint(self.did()))
+ /// Returns a list of types such that `Self: Sized` if and only if that
+ /// type is `Sized`, or `ty::Error` if this type has a recursive layout.
+ pub fn sized_constraint(self, tcx: TyCtxt<'tcx>) -> ty::EarlyBinder<&'tcx ty::List<Ty<'tcx>>> {
+ tcx.adt_sized_constraint(self.did())
}
}
diff --git a/compiler/rustc_middle/src/ty/assoc.rs b/compiler/rustc_middle/src/ty/assoc.rs
index cce609c26..f77a8c671 100644
--- a/compiler/rustc_middle/src/ty/assoc.rs
+++ b/compiler/rustc_middle/src/ty/assoc.rs
@@ -84,14 +84,22 @@ impl AssocItem {
// late-bound regions, and we don't want method signatures to show up
// `as for<'r> fn(&'r MyType)`. Pretty-printing handles late-bound
// regions just fine, showing `fn(&MyType)`.
- tcx.fn_sig(self.def_id).subst_identity().skip_binder().to_string()
+ tcx.fn_sig(self.def_id).instantiate_identity().skip_binder().to_string()
}
ty::AssocKind::Type => format!("type {};", self.name),
ty::AssocKind::Const => {
- format!("const {}: {:?};", self.name, tcx.type_of(self.def_id).subst_identity())
+ format!(
+ "const {}: {:?};",
+ self.name,
+ tcx.type_of(self.def_id).instantiate_identity()
+ )
}
}
}
+
+ pub fn is_impl_trait_in_trait(&self) -> bool {
+ self.opt_rpitit_info.is_some()
+ }
}
#[derive(Copy, Clone, PartialEq, Debug, HashStable, Eq, Hash, Encodable, Decodable)]
diff --git a/compiler/rustc_middle/src/ty/binding.rs b/compiler/rustc_middle/src/ty/binding.rs
index a5b05a4f9..2fec8ac90 100644
--- a/compiler/rustc_middle/src/ty/binding.rs
+++ b/compiler/rustc_middle/src/ty/binding.rs
@@ -6,7 +6,7 @@ pub enum BindingMode {
BindByValue(Mutability),
}
-TrivialTypeTraversalAndLiftImpls! { BindingMode, }
+TrivialTypeTraversalAndLiftImpls! { BindingMode }
impl BindingMode {
pub fn convert(BindingAnnotation(by_ref, mutbl): BindingAnnotation) -> BindingMode {
diff --git a/compiler/rustc_middle/src/ty/closure.rs b/compiler/rustc_middle/src/ty/closure.rs
index bc9273745..74bdd07a1 100644
--- a/compiler/rustc_middle/src/ty/closure.rs
+++ b/compiler/rustc_middle/src/ty/closure.rs
@@ -6,9 +6,11 @@ use crate::{mir, ty};
use std::fmt::Write;
use crate::query::Providers;
-use rustc_data_structures::fx::{FxHashMap, FxIndexMap};
+use rustc_data_structures::fx::FxIndexMap;
+use rustc_errors::{DiagnosticArgValue, IntoDiagnosticArg};
use rustc_hir::def_id::{DefId, LocalDefId};
use rustc_hir::{self as hir, LangItem};
+use rustc_span::def_id::LocalDefIdMap;
use rustc_span::symbol::Ident;
use rustc_span::{Span, Symbol};
@@ -56,12 +58,9 @@ pub enum UpvarCapture {
ByRef(BorrowKind),
}
-pub type UpvarListMap = FxHashMap<DefId, FxIndexMap<hir::HirId, UpvarId>>;
-pub type UpvarCaptureMap = FxHashMap<UpvarId, UpvarCapture>;
-
/// Given the closure DefId this map provides a map of root variables to minimum
/// set of `CapturedPlace`s that need to be tracked to support all captures of that closure.
-pub type MinCaptureInformationMap<'tcx> = FxHashMap<LocalDefId, RootVariableMinCaptureList<'tcx>>;
+pub type MinCaptureInformationMap<'tcx> = LocalDefIdMap<RootVariableMinCaptureList<'tcx>>;
/// Part of `MinCaptureInformationMap`; Maps a root variable to the list of `CapturedPlace`.
/// Used to track the minimum set of `Place`s that need to be captured to support all
@@ -91,10 +90,18 @@ pub enum ClosureKind {
FnOnce,
}
-impl<'tcx> ClosureKind {
+impl ClosureKind {
/// This is the initial value used when doing upvar inference.
pub const LATTICE_BOTTOM: ClosureKind = ClosureKind::Fn;
+ pub const fn as_str(self) -> &'static str {
+ match self {
+ ClosureKind::Fn => "Fn",
+ ClosureKind::FnMut => "FnMut",
+ ClosureKind::FnOnce => "FnOnce",
+ }
+ }
+
/// Returns `true` if a type that impls this closure kind
/// must also implement `other`.
pub fn extends(self, other: ty::ClosureKind) -> bool {
@@ -117,7 +124,7 @@ impl<'tcx> ClosureKind {
/// Returns the representative scalar type for this closure kind.
/// See `Ty::to_opt_closure_kind` for more details.
- pub fn to_ty(self, tcx: TyCtxt<'tcx>) -> Ty<'tcx> {
+ pub fn to_ty<'tcx>(self, tcx: TyCtxt<'tcx>) -> Ty<'tcx> {
match self {
ClosureKind::Fn => tcx.types.i8,
ClosureKind::FnMut => tcx.types.i16,
@@ -126,6 +133,12 @@ impl<'tcx> ClosureKind {
}
}
+impl IntoDiagnosticArg for ClosureKind {
+ fn into_diagnostic_arg(self) -> DiagnosticArgValue<'static> {
+ DiagnosticArgValue::Str(self.as_str().into())
+ }
+}
+
/// A composite describing a `Place` that is captured by a closure.
#[derive(PartialEq, Clone, Debug, TyEncodable, TyDecodable, HashStable)]
#[derive(TypeFoldable, TypeVisitable)]
@@ -176,6 +189,8 @@ impl<'tcx> CapturedPlace<'tcx> {
// Ignore derefs for now, as they are likely caused by
// autoderefs that don't appear in the original code.
HirProjectionKind::Deref => {}
+ // Just change the type to the hidden type, so we can actually project.
+ HirProjectionKind::OpaqueCast => {}
proj => bug!("Unexpected projection {:?} in captured place", proj),
}
ty = proj.ty;
@@ -350,7 +365,7 @@ pub fn place_to_string_for_capture<'tcx>(tcx: TyCtxt<'tcx>, place: &HirPlace<'tc
for (i, proj) in place.projections.iter().enumerate() {
match proj.kind {
HirProjectionKind::Deref => {
- curr_string = format!("*{}", curr_string);
+ curr_string = format!("*{curr_string}");
}
HirProjectionKind::Field(idx, variant) => match place.ty_before_projection(i).kind() {
ty::Adt(def, ..) => {
diff --git a/compiler/rustc_middle/src/ty/codec.rs b/compiler/rustc_middle/src/ty/codec.rs
index 6adbb44a1..7c05deae9 100644
--- a/compiler/rustc_middle/src/ty/codec.rs
+++ b/compiler/rustc_middle/src/ty/codec.rs
@@ -13,7 +13,7 @@ use crate::mir::{
interpret::{AllocId, ConstAllocation},
};
use crate::traits;
-use crate::ty::subst::SubstsRef;
+use crate::ty::GenericArgsRef;
use crate::ty::{self, AdtDef, Ty};
use rustc_data_structures::fx::FxHashMap;
use rustc_middle::ty::TyCtxt;
@@ -168,7 +168,6 @@ impl<'tcx, E: TyEncoder<I = TyCtxt<'tcx>>> Encodable<E> for ty::ParamEnv<'tcx> {
fn encode(&self, e: &mut E) {
self.caller_bounds().encode(e);
self.reveal().encode(e);
- self.constness().encode(e);
}
}
@@ -254,12 +253,12 @@ impl<'tcx, D: TyDecoder<I = TyCtxt<'tcx>>> Decodable<D> for ty::Clause<'tcx> {
}
}
-impl<'tcx, D: TyDecoder<I = TyCtxt<'tcx>>> Decodable<D> for SubstsRef<'tcx> {
+impl<'tcx, D: TyDecoder<I = TyCtxt<'tcx>>> Decodable<D> for GenericArgsRef<'tcx> {
fn decode(decoder: &mut D) -> Self {
let len = decoder.read_usize();
let tcx = decoder.interner();
- tcx.mk_substs_from_iter(
- (0..len).map::<ty::subst::GenericArg<'tcx>, _>(|_| Decodable::decode(decoder)),
+ tcx.mk_args_from_iter(
+ (0..len).map::<ty::GenericArg<'tcx>, _>(|_| Decodable::decode(decoder)),
)
}
}
@@ -306,8 +305,7 @@ impl<'tcx, D: TyDecoder<I = TyCtxt<'tcx>>> Decodable<D> for ty::ParamEnv<'tcx> {
fn decode(d: &mut D) -> Self {
let caller_bounds = Decodable::decode(d);
let reveal = Decodable::decode(d);
- let constness = Decodable::decode(d);
- ty::ParamEnv::new(caller_bounds, reveal, constness)
+ ty::ParamEnv::new(caller_bounds, reveal)
}
}
diff --git a/compiler/rustc_middle/src/ty/consts.rs b/compiler/rustc_middle/src/ty/consts.rs
index 1cbfe99f8..cce10417e 100644
--- a/compiler/rustc_middle/src/ty/consts.rs
+++ b/compiler/rustc_middle/src/ty/consts.rs
@@ -1,6 +1,6 @@
use crate::middle::resolve_bound_vars as rbv;
use crate::mir::interpret::{AllocId, ConstValue, LitToConstInput, Scalar};
-use crate::ty::{self, InternalSubsts, ParamEnv, ParamEnvAnd, Ty, TyCtxt, TypeVisitableExt};
+use crate::ty::{self, GenericArgs, ParamEnv, ParamEnvAnd, Ty, TyCtxt, TypeVisitableExt};
use rustc_data_structures::intern::Interned;
use rustc_error_messages::MultiSpan;
use rustc_hir as hir;
@@ -171,7 +171,7 @@ impl<'tcx> Const<'tcx> {
tcx,
ty::UnevaluatedConst {
def: def.to_def_id(),
- substs: InternalSubsts::identity_for_item(tcx, def.to_def_id()),
+ args: GenericArgs::identity_for_item(tcx, def.to_def_id()),
},
ty,
),
@@ -212,7 +212,7 @@ impl<'tcx> Const<'tcx> {
Err(e) => {
tcx.sess.delay_span_bug(
expr.span,
- format!("Const::from_anon_const: couldn't lit_to_const {:?}", e),
+ format!("Const::from_anon_const: couldn't lit_to_const {e:?}"),
);
}
}
@@ -225,7 +225,7 @@ impl<'tcx> Const<'tcx> {
)) => {
// Use the type from the param's definition, since we can resolve it,
// not the expected parameter type from WithOptConstParam.
- let param_ty = tcx.type_of(def_id).subst_identity();
+ let param_ty = tcx.type_of(def_id).instantiate_identity();
match tcx.named_bound_var(expr.hir_id) {
Some(rbv::ResolvedArg::EarlyBound(_)) => {
// Find the name and index of the const parameter by indexing the generics of
@@ -267,7 +267,7 @@ impl<'tcx> Const<'tcx> {
pub fn from_bits(tcx: TyCtxt<'tcx>, bits: u128, ty: ParamEnvAnd<'tcx, Ty<'tcx>>) -> Self {
let size = tcx
.layout_of(ty)
- .unwrap_or_else(|e| panic!("could not compute layout for {:?}: {:?}", ty, e))
+ .unwrap_or_else(|e| panic!("could not compute layout for {ty:?}: {e:?}"))
.size;
ty::Const::new_value(
tcx,
@@ -294,6 +294,14 @@ impl<'tcx> Const<'tcx> {
Self::from_bits(tcx, n as u128, ParamEnv::empty().and(tcx.types.usize))
}
+ /// Attempts to convert to a `ValTree`
+ pub fn try_to_valtree(self) -> Option<ty::ValTree<'tcx>> {
+ match self.kind() {
+ ty::ConstKind::Value(valtree) => Some(valtree),
+ _ => None,
+ }
+ }
+
#[inline]
/// Attempts to evaluate the given constant to bits. Can fail to evaluate in the presence of
/// generics (or erroneous code) or if the value can't be represented as bits (e.g. because it
@@ -406,14 +414,14 @@ impl<'tcx> Const<'tcx> {
// any region variables.
// HACK(eddyb) when the query key would contain inference variables,
- // attempt using identity substs and `ParamEnv` instead, that will succeed
+ // attempt using identity args and `ParamEnv` instead, that will succeed
// when the expression doesn't depend on any parameters.
// FIXME(eddyb, skinny121) pass `InferCtxt` into here when it's available, so that
// we can call `infcx.const_eval_resolve` which handles inference variables.
let param_env_and = if (param_env, unevaluated).has_non_region_infer() {
tcx.param_env(unevaluated.def).and(ty::UnevaluatedConst {
def: unevaluated.def,
- substs: InternalSubsts::identity_for_item(tcx, unevaluated.def),
+ args: GenericArgs::identity_for_item(tcx, unevaluated.def),
})
} else {
tcx.erase_regions(param_env)
@@ -430,8 +438,8 @@ impl<'tcx> Const<'tcx> {
EvalMode::Typeck => {
match tcx.const_eval_resolve_for_typeck(param_env, unevaluated, None) {
// NOTE(eddyb) `val` contains no lifetimes/types/consts,
- // and we use the original type, so nothing from `substs`
- // (which may be identity substs, see above),
+ // and we use the original type, so nothing from `args`
+ // (which may be identity args, see above),
// can leak through `val` into the const we return.
Ok(val) => Some(Ok(EvalResult::ValTree(val?))),
Err(ErrorHandled::TooGeneric) => None,
@@ -441,8 +449,8 @@ impl<'tcx> Const<'tcx> {
EvalMode::Mir => {
match tcx.const_eval_resolve(param_env, unevaluated.expand(), None) {
// NOTE(eddyb) `val` contains no lifetimes/types/consts,
- // and we use the original type, so nothing from `substs`
- // (which may be identity substs, see above),
+ // and we use the original type, so nothing from `args`
+ // (which may be identity args, see above),
// can leak through `val` into the const we return.
Ok(val) => Some(Ok(EvalResult::ConstVal(val))),
Err(ErrorHandled::TooGeneric) => None,
diff --git a/compiler/rustc_middle/src/ty/consts/int.rs b/compiler/rustc_middle/src/ty/consts/int.rs
index 1e43fab45..b16163edf 100644
--- a/compiler/rustc_middle/src/ty/consts/int.rs
+++ b/compiler/rustc_middle/src/ty/consts/int.rs
@@ -418,7 +418,7 @@ impl TryFrom<ScalarInt> for char {
#[inline]
fn try_from(int: ScalarInt) -> Result<Self, Self::Error> {
- let Ok(bits) = int.to_bits(Size::from_bytes(std::mem::size_of::<char>())) else {
+ let Ok(bits) = int.to_bits(Size::from_bytes(std::mem::size_of::<char>())) else {
return Err(CharTryFromScalarInt);
};
match char::from_u32(bits.try_into().unwrap()) {
@@ -463,7 +463,7 @@ impl TryFrom<ScalarInt> for Double {
impl fmt::Debug for ScalarInt {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
// Dispatch to LowerHex below.
- write!(f, "0x{:x}", self)
+ write!(f, "0x{self:x}")
}
}
diff --git a/compiler/rustc_middle/src/ty/consts/kind.rs b/compiler/rustc_middle/src/ty/consts/kind.rs
index a6bf74911..db4a15fbe 100644
--- a/compiler/rustc_middle/src/ty/consts/kind.rs
+++ b/compiler/rustc_middle/src/ty/consts/kind.rs
@@ -1,41 +1,41 @@
use super::Const;
use crate::mir;
use crate::ty::abstract_const::CastKind;
-use crate::ty::subst::SubstsRef;
+use crate::ty::GenericArgsRef;
use crate::ty::{self, List, Ty};
use rustc_data_structures::stable_hasher::{HashStable, StableHasher};
use rustc_hir::def_id::DefId;
use rustc_macros::HashStable;
/// An unevaluated (potentially generic) constant used in the type-system.
-#[derive(Copy, Clone, Debug, Eq, PartialEq, PartialOrd, Ord, TyEncodable, TyDecodable, Lift)]
+#[derive(Copy, Clone, Eq, PartialEq, PartialOrd, Ord, TyEncodable, TyDecodable, Lift)]
#[derive(Hash, HashStable, TypeFoldable, TypeVisitable)]
pub struct UnevaluatedConst<'tcx> {
pub def: DefId,
- pub substs: SubstsRef<'tcx>,
+ pub args: GenericArgsRef<'tcx>,
}
impl rustc_errors::IntoDiagnosticArg for UnevaluatedConst<'_> {
fn into_diagnostic_arg(self) -> rustc_errors::DiagnosticArgValue<'static> {
- format!("{:?}", self).into_diagnostic_arg()
+ format!("{self:?}").into_diagnostic_arg()
}
}
impl<'tcx> UnevaluatedConst<'tcx> {
#[inline]
pub fn expand(self) -> mir::UnevaluatedConst<'tcx> {
- mir::UnevaluatedConst { def: self.def, substs: self.substs, promoted: None }
+ mir::UnevaluatedConst { def: self.def, args: self.args, promoted: None }
}
}
impl<'tcx> UnevaluatedConst<'tcx> {
#[inline]
- pub fn new(def: DefId, substs: SubstsRef<'tcx>) -> UnevaluatedConst<'tcx> {
- UnevaluatedConst { def, substs }
+ pub fn new(def: DefId, args: GenericArgsRef<'tcx>) -> UnevaluatedConst<'tcx> {
+ UnevaluatedConst { def, args }
}
}
-#[derive(Copy, Clone, Debug, Eq, PartialEq, PartialOrd, Ord, Hash)]
+#[derive(Copy, Clone, Eq, PartialEq, PartialOrd, Ord, Hash)]
#[derive(HashStable, TyEncodable, TyDecodable, TypeVisitable, TypeFoldable)]
pub enum Expr<'tcx> {
Binop(mir::BinOp, Const<'tcx>, Const<'tcx>),
diff --git a/compiler/rustc_middle/src/ty/consts/valtree.rs b/compiler/rustc_middle/src/ty/consts/valtree.rs
index 8b96864dd..fb7bf78ba 100644
--- a/compiler/rustc_middle/src/ty/consts/valtree.rs
+++ b/compiler/rustc_middle/src/ty/consts/valtree.rs
@@ -24,7 +24,7 @@ pub enum ValTree<'tcx> {
Leaf(ScalarInt),
//SliceOrStr(ValSlice<'tcx>),
- // dont use SliceOrStr for now
+ // don't use SliceOrStr for now
/// The fields of any kind of aggregate. Structs, tuples and arrays are represented by
/// listing their fields' values in order.
///
diff --git a/compiler/rustc_middle/src/ty/context.rs b/compiler/rustc_middle/src/ty/context.rs
index 035e978f6..be839e03c 100644
--- a/compiler/rustc_middle/src/ty/context.rs
+++ b/compiler/rustc_middle/src/ty/context.rs
@@ -30,7 +30,7 @@ use crate::ty::{
Predicate, PredicateKind, Region, RegionKind, ReprOptions, TraitObjectVisitor, Ty, TyKind,
TyVid, TypeAndMut, Visibility,
};
-use crate::ty::{GenericArg, InternalSubsts, SubstsRef};
+use crate::ty::{GenericArg, GenericArgs, GenericArgsRef};
use rustc_ast::{self as ast, attr};
use rustc_data_structures::fingerprint::Fingerprint;
use rustc_data_structures::fx::{FxHashMap, FxHashSet};
@@ -50,9 +50,7 @@ use rustc_hir::def_id::{CrateNum, DefId, LocalDefId, LOCAL_CRATE};
use rustc_hir::definitions::Definitions;
use rustc_hir::intravisit::Visitor;
use rustc_hir::lang_items::LangItem;
-use rustc_hir::{
- Constness, ExprKind, HirId, ImplItemKind, ItemKind, Node, TraitCandidate, TraitItemKind,
-};
+use rustc_hir::{Constness, HirId, Node, TraitCandidate};
use rustc_index::IndexVec;
use rustc_macros::HashStable;
use rustc_query_system::dep_graph::DepNodeIndex;
@@ -61,8 +59,7 @@ use rustc_serialize::opaque::{FileEncodeResult, FileEncoder};
use rustc_session::config::CrateType;
use rustc_session::cstore::{CrateStoreDyn, Untracked};
use rustc_session::lint::Lint;
-use rustc_session::Limit;
-use rustc_session::Session;
+use rustc_session::{Limit, MetadataKind, Session};
use rustc_span::def_id::{DefPathHash, StableCrateId};
use rustc_span::symbol::{kw, sym, Ident, Symbol};
use rustc_span::{Span, DUMMY_SP};
@@ -84,7 +81,7 @@ use std::ops::{Bound, Deref};
#[allow(rustc::usage_of_ty_tykind)]
impl<'tcx> Interner for TyCtxt<'tcx> {
type AdtDef = ty::AdtDef<'tcx>;
- type SubstsRef = ty::SubstsRef<'tcx>;
+ type GenericArgsRef = ty::GenericArgsRef<'tcx>;
type DefId = DefId;
type Binder<T> = Binder<'tcx, T>;
type Ty = Ty<'tcx>;
@@ -142,7 +139,7 @@ pub struct CtxtInterners<'tcx> {
// they're accessed quite often.
type_: InternedSet<'tcx, WithCachedTypeInfo<TyKind<'tcx>>>,
const_lists: InternedSet<'tcx, List<ty::Const<'tcx>>>,
- substs: InternedSet<'tcx, InternalSubsts<'tcx>>,
+ args: InternedSet<'tcx, GenericArgs<'tcx>>,
type_lists: InternedSet<'tcx, List<Ty<'tcx>>>,
canonical_var_infos: InternedSet<'tcx, List<CanonicalVarInfo<'tcx>>>,
region: InternedSet<'tcx, RegionKind<'tcx>>,
@@ -167,7 +164,7 @@ impl<'tcx> CtxtInterners<'tcx> {
arena,
type_: Default::default(),
const_lists: Default::default(),
- substs: Default::default(),
+ args: Default::default(),
type_lists: Default::default(),
region: Default::default(),
poly_existential_predicates: Default::default(),
@@ -529,6 +526,13 @@ pub struct GlobalCtxt<'tcx> {
interners: CtxtInterners<'tcx>,
pub sess: &'tcx Session,
+ crate_types: Vec<CrateType>,
+ /// The `stable_crate_id` is constructed out of the crate name and all the
+ /// `-C metadata` arguments passed to the compiler. Its value forms a unique
+ /// global identifier for the crate. It is used to allow multiple crates
+ /// with the same name to coexist. See the
+ /// `rustc_symbol_mangling` crate for more information.
+ stable_crate_id: StableCrateId,
/// This only ever stores a `LintStore` but we don't want a dependency on that type here.
///
@@ -569,6 +573,7 @@ pub struct GlobalCtxt<'tcx> {
/// Caches the results of goal evaluation in the new solver.
pub new_solver_evaluation_cache: solve::EvaluationCache<'tcx>,
+ pub new_solver_coherence_evaluation_cache: solve::EvaluationCache<'tcx>,
/// Data layout specification for the current target.
pub data_layout: TargetDataLayout,
@@ -680,12 +685,16 @@ impl<'tcx> TyCtxt<'tcx> {
value.lift_to_tcx(self)
}
- /// Creates a type context and call the closure with a `TyCtxt` reference
- /// to the context. The closure enforces that the type context and any interned
- /// value (types, substs, etc.) can only be used while `ty::tls` has a valid
- /// reference to the context, to allow formatting values that need it.
+ /// Creates a type context. To use the context call `fn enter` which
+ /// provides a `TyCtxt`.
+ ///
+ /// By only providing the `TyCtxt` inside of the closure we enforce that the type
+ /// context and any interned alue (types, args, etc.) can only be used while `ty::tls`
+ /// has a valid reference to the context, to allow formatting values that need it.
pub fn create_global_ctxt(
s: &'tcx Session,
+ crate_types: Vec<CrateType>,
+ stable_crate_id: StableCrateId,
lint_store: Lrc<dyn Any + sync::DynSend + sync::DynSync>,
arena: &'tcx WorkerLocal<Arena<'tcx>>,
hir_arena: &'tcx WorkerLocal<hir::Arena<'tcx>>,
@@ -704,6 +713,8 @@ impl<'tcx> TyCtxt<'tcx> {
GlobalCtxt {
sess: s,
+ crate_types,
+ stable_crate_id,
lint_store,
arena,
hir_arena,
@@ -721,6 +732,7 @@ impl<'tcx> TyCtxt<'tcx> {
selection_cache: Default::default(),
evaluation_cache: Default::default(),
new_solver_evaluation_cache: Default::default(),
+ new_solver_coherence_evaluation_cache: Default::default(),
data_layout,
alloc_map: Lock::new(interpret::AllocMap::new()),
}
@@ -799,9 +811,46 @@ impl<'tcx> TyCtxt<'tcx> {
}
#[inline]
+ pub fn crate_types(self) -> &'tcx [CrateType] {
+ &self.crate_types
+ }
+
+ pub fn metadata_kind(self) -> MetadataKind {
+ self.crate_types()
+ .iter()
+ .map(|ty| match *ty {
+ CrateType::Executable | CrateType::Staticlib | CrateType::Cdylib => {
+ MetadataKind::None
+ }
+ CrateType::Rlib => MetadataKind::Uncompressed,
+ CrateType::Dylib | CrateType::ProcMacro => MetadataKind::Compressed,
+ })
+ .max()
+ .unwrap_or(MetadataKind::None)
+ }
+
+ pub fn needs_metadata(self) -> bool {
+ self.metadata_kind() != MetadataKind::None
+ }
+
+ pub fn needs_crate_hash(self) -> bool {
+ // Why is the crate hash needed for these configurations?
+ // - debug_assertions: for the "fingerprint the result" check in
+ // `rustc_query_system::query::plumbing::execute_job`.
+ // - incremental: for query lookups.
+ // - needs_metadata: for putting into crate metadata.
+ // - instrument_coverage: for putting into coverage data (see
+ // `hash_mir_source`).
+ cfg!(debug_assertions)
+ || self.sess.opts.incremental.is_some()
+ || self.needs_metadata()
+ || self.sess.instrument_coverage()
+ }
+
+ #[inline]
pub fn stable_crate_id(self, crate_num: CrateNum) -> StableCrateId {
if crate_num == LOCAL_CRATE {
- self.sess.local_stable_crate_id()
+ self.stable_crate_id
} else {
self.cstore_untracked().stable_crate_id(crate_num)
}
@@ -811,7 +860,7 @@ impl<'tcx> TyCtxt<'tcx> {
/// that the crate in question has already been loaded by the CrateStore.
#[inline]
pub fn stable_crate_id_to_crate_num(self, stable_crate_id: StableCrateId) -> CrateNum {
- if stable_crate_id == self.sess.local_stable_crate_id() {
+ if stable_crate_id == self.stable_crate_id(LOCAL_CRATE) {
LOCAL_CRATE
} else {
self.cstore_untracked().stable_crate_id_to_crate_num(stable_crate_id)
@@ -828,7 +877,7 @@ impl<'tcx> TyCtxt<'tcx> {
// If this is a DefPathHash from the local crate, we can look up the
// DefId in the tcx's `Definitions`.
- if stable_crate_id == self.sess.local_stable_crate_id() {
+ if stable_crate_id == self.stable_crate_id(LOCAL_CRATE) {
self.untracked.definitions.read().local_def_path_hash_to_def_id(hash, err).to_def_id()
} else {
// If this is a DefPathHash from an upstream crate, let the CrateStore map
@@ -845,7 +894,7 @@ impl<'tcx> TyCtxt<'tcx> {
// statements within the query system and we'd run into endless
// recursion otherwise.
let (crate_name, stable_crate_id) = if def_id.is_local() {
- (self.crate_name(LOCAL_CRATE), self.sess.local_stable_crate_id())
+ (self.crate_name(LOCAL_CRATE), self.stable_crate_id(LOCAL_CRATE))
} else {
let cstore = &*self.cstore_untracked();
(cstore.crate_name(def_id.krate), cstore.stable_crate_id(def_id.krate))
@@ -984,7 +1033,7 @@ impl<'tcx> TyCtxt<'tcx> {
pub fn local_crate_exports_generics(self) -> bool {
debug_assert!(self.sess.opts.share_generics());
- self.sess.crate_types().iter().any(|crate_type| {
+ self.crate_types().iter().any(|crate_type| {
match crate_type {
CrateType::Executable
| CrateType::Staticlib
@@ -1036,7 +1085,9 @@ impl<'tcx> TyCtxt<'tcx> {
scope_def_id: LocalDefId,
) -> Vec<&'tcx hir::Ty<'tcx>> {
let hir_id = self.hir().local_def_id_to_hir_id(scope_def_id);
- let Some(hir::FnDecl { output: hir::FnRetTy::Return(hir_output), .. }) = self.hir().fn_decl_by_hir_id(hir_id) else {
+ let Some(hir::FnDecl { output: hir::FnRetTy::Return(hir_output), .. }) =
+ self.hir().fn_decl_by_hir_id(hir_id)
+ else {
return vec![];
};
@@ -1058,7 +1109,7 @@ impl<'tcx> TyCtxt<'tcx> {
if let Some(hir::FnDecl { output: hir::FnRetTy::Return(hir_output), .. }) = self.hir().fn_decl_by_hir_id(hir_id)
&& let hir::TyKind::Path(hir::QPath::Resolved(
None,
- hir::Path { res: hir::def::Res::Def(DefKind::TyAlias, def_id), .. }, )) = hir_output.kind
+ hir::Path { res: hir::def::Res::Def(DefKind::TyAlias { .. }, def_id), .. }, )) = hir_output.kind
&& let Some(local_id) = def_id.as_local()
&& let Some(alias_ty) = self.hir().get_by_def_id(local_id).alias_ty() // it is type alias
&& let Some(alias_generics) = self.hir().get_by_def_id(local_id).generics()
@@ -1071,31 +1122,6 @@ impl<'tcx> TyCtxt<'tcx> {
return None;
}
- pub fn return_type_impl_trait(self, scope_def_id: LocalDefId) -> Option<(Ty<'tcx>, Span)> {
- // `type_of()` will fail on these (#55796, #86483), so only allow `fn`s or closures.
- match self.hir().get_by_def_id(scope_def_id) {
- Node::Item(&hir::Item { kind: ItemKind::Fn(..), .. }) => {}
- Node::TraitItem(&hir::TraitItem { kind: TraitItemKind::Fn(..), .. }) => {}
- Node::ImplItem(&hir::ImplItem { kind: ImplItemKind::Fn(..), .. }) => {}
- Node::Expr(&hir::Expr { kind: ExprKind::Closure { .. }, .. }) => {}
- _ => return None,
- }
-
- let ret_ty = self.type_of(scope_def_id).subst_identity();
- match ret_ty.kind() {
- ty::FnDef(_, _) => {
- let sig = ret_ty.fn_sig(self);
- let output = self.erase_late_bound_regions(sig.output());
- output.is_impl_trait().then(|| {
- let hir_id = self.hir().local_def_id_to_hir_id(scope_def_id);
- let fn_decl = self.hir().fn_decl_by_hir_id(hir_id).unwrap();
- (output, fn_decl.output.span())
- })
- }
- _ => None,
- }
- }
-
/// Checks if the bound region is in Impl Item.
pub fn is_bound_region_in_impl_item(self, suitable_region_binding_scope: LocalDefId) -> bool {
let container_id = self.parent(suitable_region_binding_scope.to_def_id());
@@ -1123,7 +1149,7 @@ impl<'tcx> TyCtxt<'tcx> {
self,
self.lifetimes.re_static,
self.type_of(self.require_lang_item(LangItem::PanicLocation, None))
- .subst(self, self.mk_substs(&[self.lifetimes.re_static.into()])),
+ .instantiate(self, self.mk_args(&[self.lifetimes.re_static.into()])),
)
}
@@ -1166,7 +1192,7 @@ impl<'tcx> TyCtxt<'tcx> {
/// A trait implemented for all `X<'a>` types that can be safely and
/// efficiently converted to `X<'tcx>` as long as they are part of the
/// provided `TyCtxt<'tcx>`.
-/// This can be done, for example, for `Ty<'tcx>` or `SubstsRef<'tcx>`
+/// This can be done, for example, for `Ty<'tcx>` or `GenericArgsRef<'tcx>`
/// by looking them up in their respective interners.
///
/// However, this is still not the best implementation as it does
@@ -1232,8 +1258,8 @@ nop_list_lift! {canonical_var_infos; CanonicalVarInfo<'a> => CanonicalVarInfo<'t
nop_list_lift! {projs; ProjectionKind => ProjectionKind}
nop_list_lift! {bound_variable_kinds; ty::BoundVariableKind => ty::BoundVariableKind}
-// This is the impl for `&'a InternalSubsts<'a>`.
-nop_list_lift! {substs; GenericArg<'a> => GenericArg<'tcx>}
+// This is the impl for `&'a GenericArgs<'a>`.
+nop_list_lift! {args; GenericArg<'a> => GenericArg<'tcx>}
CloneLiftImpls! {
Constness,
@@ -1345,7 +1371,7 @@ impl<'tcx> TyCtxt<'tcx> {
Foreign
)?;
- writeln!(fmt, "InternalSubsts interner: #{}", self.0.interners.substs.len())?;
+ writeln!(fmt, "GenericArgs interner: #{}", self.0.interners.args.len())?;
writeln!(fmt, "Region interner: #{}", self.0.interners.region.len())?;
writeln!(
fmt,
@@ -1501,7 +1527,7 @@ macro_rules! slice_interners {
// should be used when possible, because it's faster.
slice_interners!(
const_lists: pub mk_const_list(Const<'tcx>),
- substs: pub mk_substs(GenericArg<'tcx>),
+ args: pub mk_args(GenericArg<'tcx>),
type_lists: pub mk_type_list(Ty<'tcx>),
canonical_var_infos: pub mk_canonical_var_infos(CanonicalVarInfo<'tcx>),
poly_existential_predicates: intern_poly_existential_predicates(PolyExistentialPredicate<'tcx>),
@@ -1615,12 +1641,12 @@ impl<'tcx> TyCtxt<'tcx> {
}
#[inline(always)]
- pub(crate) fn check_and_mk_substs(
+ pub(crate) fn check_and_mk_args(
self,
_def_id: DefId,
- substs: impl IntoIterator<Item: Into<GenericArg<'tcx>>>,
- ) -> SubstsRef<'tcx> {
- let substs = substs.into_iter().map(Into::into);
+ args: impl IntoIterator<Item: Into<GenericArg<'tcx>>>,
+ ) -> GenericArgsRef<'tcx> {
+ let args = args.into_iter().map(Into::into);
#[cfg(debug_assertions)]
{
let generics = self.generics_of(_def_id);
@@ -1636,12 +1662,12 @@ impl<'tcx> TyCtxt<'tcx> {
};
assert_eq!(
(n, Some(n)),
- substs.size_hint(),
+ args.size_hint(),
"wrong number of generic parameters for {_def_id:?}: {:?}",
- substs.collect::<Vec<_>>(),
+ args.collect::<Vec<_>>(),
);
}
- self.mk_substs_from_iter(substs)
+ self.mk_args_from_iter(args)
}
#[inline]
@@ -1799,12 +1825,12 @@ impl<'tcx> TyCtxt<'tcx> {
T::collect_and_apply(iter, |xs| self.mk_type_list(xs))
}
- pub fn mk_substs_from_iter<I, T>(self, iter: I) -> T::Output
+ pub fn mk_args_from_iter<I, T>(self, iter: I) -> T::Output
where
I: Iterator<Item = T>,
T: CollectAndApply<GenericArg<'tcx>, &'tcx List<GenericArg<'tcx>>>,
{
- T::collect_and_apply(iter, |xs| self.mk_substs(xs))
+ T::collect_and_apply(iter, |xs| self.mk_args(xs))
}
pub fn mk_canonical_var_infos_from_iter<I, T>(self, iter: I) -> T::Output
@@ -1831,21 +1857,21 @@ impl<'tcx> TyCtxt<'tcx> {
T::collect_and_apply(iter, |xs| self.mk_fields(xs))
}
- pub fn mk_substs_trait(
+ pub fn mk_args_trait(
self,
self_ty: Ty<'tcx>,
rest: impl IntoIterator<Item = GenericArg<'tcx>>,
- ) -> SubstsRef<'tcx> {
- self.mk_substs_from_iter(iter::once(self_ty.into()).chain(rest))
+ ) -> GenericArgsRef<'tcx> {
+ self.mk_args_from_iter(iter::once(self_ty.into()).chain(rest))
}
pub fn mk_alias_ty(
self,
def_id: DefId,
- substs: impl IntoIterator<Item: Into<GenericArg<'tcx>>>,
+ args: impl IntoIterator<Item: Into<GenericArg<'tcx>>>,
) -> ty::AliasTy<'tcx> {
- let substs = self.check_and_mk_substs(def_id, substs);
- ty::AliasTy { def_id, substs, _use_mk_alias_ty_instead: () }
+ let args = self.check_and_mk_args(def_id, args);
+ ty::AliasTy { def_id, args, _use_mk_alias_ty_instead: () }
}
pub fn mk_bound_variable_kinds_from_iter<I, T>(self, iter: I) -> T::Output
@@ -1858,6 +1884,7 @@ impl<'tcx> TyCtxt<'tcx> {
/// Emit a lint at `span` from a lint struct (some type that implements `DecorateLint`,
/// typically generated by `#[derive(LintDiagnostic)]`).
+ #[track_caller]
pub fn emit_spanned_lint(
self,
lint: &'static Lint,
@@ -1878,6 +1905,7 @@ impl<'tcx> TyCtxt<'tcx> {
///
/// [`struct_lint_level`]: rustc_middle::lint::struct_lint_level#decorate-signature
#[rustc_lint_diagnostics]
+ #[track_caller]
pub fn struct_span_lint_hir(
self,
lint: &'static Lint,
@@ -1894,6 +1922,7 @@ impl<'tcx> TyCtxt<'tcx> {
/// Emit a lint from a lint struct (some type that implements `DecorateLint`, typically
/// generated by `#[derive(LintDiagnostic)]`).
+ #[track_caller]
pub fn emit_lint(
self,
lint: &'static Lint,
@@ -1909,6 +1938,7 @@ impl<'tcx> TyCtxt<'tcx> {
///
/// [`struct_lint_level`]: rustc_middle::lint::struct_lint_level#decorate-signature
#[rustc_lint_diagnostics]
+ #[track_caller]
pub fn struct_lint_node(
self,
lint: &'static Lint,
@@ -1948,6 +1978,84 @@ impl<'tcx> TyCtxt<'tcx> {
)
}
+ /// Given the def-id of an early-bound lifetime on an RPIT corresponding to
+ /// a duplicated captured lifetime, map it back to the early- or late-bound
+ /// lifetime of the function from which it originally as captured. If it is
+ /// a late-bound lifetime, this will represent the liberated (`ReFree`) lifetime
+ /// of the signature.
+ // FIXME(RPITIT): if we ever synthesize new lifetimes for RPITITs and not just
+ // re-use the generics of the opaque, this function will need to be tweaked slightly.
+ pub fn map_rpit_lifetime_to_fn_lifetime(
+ self,
+ mut rpit_lifetime_param_def_id: LocalDefId,
+ ) -> ty::Region<'tcx> {
+ debug_assert!(
+ matches!(self.def_kind(rpit_lifetime_param_def_id), DefKind::LifetimeParam),
+ "{rpit_lifetime_param_def_id:?} is a {}",
+ self.def_descr(rpit_lifetime_param_def_id.to_def_id())
+ );
+
+ loop {
+ let parent = self.local_parent(rpit_lifetime_param_def_id);
+ let hir::OpaqueTy { lifetime_mapping, .. } =
+ self.hir().get_by_def_id(parent).expect_item().expect_opaque_ty();
+
+ let Some((lifetime, _)) = lifetime_mapping
+ .iter()
+ .find(|(_, duplicated_param)| *duplicated_param == rpit_lifetime_param_def_id)
+ else {
+ bug!("duplicated lifetime param should be present");
+ };
+
+ match self.named_bound_var(lifetime.hir_id) {
+ Some(resolve_bound_vars::ResolvedArg::EarlyBound(ebv)) => {
+ let new_parent = self.parent(ebv);
+
+ // If we map to another opaque, then it should be a parent
+ // of the opaque we mapped from. Continue mapping.
+ if matches!(self.def_kind(new_parent), DefKind::OpaqueTy) {
+ debug_assert_eq!(self.parent(parent.to_def_id()), new_parent);
+ rpit_lifetime_param_def_id = ebv.expect_local();
+ continue;
+ }
+
+ let generics = self.generics_of(new_parent);
+ return ty::Region::new_early_bound(
+ self,
+ ty::EarlyBoundRegion {
+ def_id: ebv,
+ index: generics
+ .param_def_id_to_index(self, ebv)
+ .expect("early-bound var should be present in fn generics"),
+ name: self.hir().name(self.local_def_id_to_hir_id(ebv.expect_local())),
+ },
+ );
+ }
+ Some(resolve_bound_vars::ResolvedArg::LateBound(_, _, lbv)) => {
+ let new_parent = self.parent(lbv);
+ return ty::Region::new_free(
+ self,
+ new_parent,
+ ty::BoundRegionKind::BrNamed(
+ lbv,
+ self.hir().name(self.local_def_id_to_hir_id(lbv.expect_local())),
+ ),
+ );
+ }
+ Some(resolve_bound_vars::ResolvedArg::Error(guar)) => {
+ return ty::Region::new_error(self, guar);
+ }
+ _ => {
+ return ty::Region::new_error_with_message(
+ self,
+ lifetime.ident.span,
+ "cannot resolve lifetime",
+ );
+ }
+ }
+ }
+ }
+
/// Whether the `def_id` counts as const fn in the current crate, considering all active
/// feature gates
pub fn is_const_fn(self, def_id: DefId) -> bool {
@@ -1980,9 +2088,9 @@ impl<'tcx> TyCtxt<'tcx> {
matches!(
node,
hir::Node::Item(hir::Item {
- kind: hir::ItemKind::Impl(hir::Impl { constness: hir::Constness::Const, .. }),
+ kind: hir::ItemKind::Impl(hir::Impl { generics, .. }),
..
- })
+ }) if generics.params.iter().any(|p| self.has_attr(p.def_id, sym::rustc_host))
)
}
@@ -2002,16 +2110,8 @@ impl<'tcx> TyCtxt<'tcx> {
)
}
- pub fn lower_impl_trait_in_trait_to_assoc_ty(self) -> bool {
- self.sess.opts.unstable_opts.lower_impl_trait_in_trait_to_assoc_ty
- }
-
pub fn is_impl_trait_in_trait(self, def_id: DefId) -> bool {
- if self.lower_impl_trait_in_trait_to_assoc_ty() {
- self.opt_rpitit_info(def_id).is_some()
- } else {
- self.def_kind(def_id) == DefKind::ImplTraitPlaceholder
- }
+ self.opt_rpitit_info(def_id).is_some()
}
/// Named module children from all kinds of items, including imports.
diff --git a/compiler/rustc_middle/src/ty/diagnostics.rs b/compiler/rustc_middle/src/ty/diagnostics.rs
index a0b17c374..5db9b775a 100644
--- a/compiler/rustc_middle/src/ty/diagnostics.rs
+++ b/compiler/rustc_middle/src/ty/diagnostics.rs
@@ -1,6 +1,7 @@
//! Diagnostics related methods for `Ty`.
use std::borrow::Cow;
+use std::fmt::Write;
use std::ops::ControlFlow;
use crate::ty::{
@@ -71,7 +72,7 @@ impl<'tcx> Ty<'tcx> {
/// ADTs with no type arguments.
pub fn is_simple_text(self) -> bool {
match self.kind() {
- Adt(_, substs) => substs.non_erasable_generics().next().is_none(),
+ Adt(_, args) => args.non_erasable_generics().next().is_none(),
Ref(_, ty, _) => ty.is_simple_text(),
_ => self.is_simple_ty(),
}
@@ -126,7 +127,7 @@ pub fn suggest_arbitrary_trait_bound<'tcx>(
if constraint.ends_with('>') {
constraint = format!("{}, {} = {}>", &constraint[..constraint.len() - 1], name, term);
} else {
- constraint.push_str(&format!("<{} = {}>", name, term));
+ constraint.push_str(&format!("<{name} = {term}>"));
}
}
@@ -274,9 +275,9 @@ pub fn suggest_constraining_type_params<'a>(
if span_to_replace.is_some() {
constraint.clone()
} else if bound_list_non_empty {
- format!(" + {}", constraint)
+ format!(" + {constraint}")
} else {
- format!(" {}", constraint)
+ format!(" {constraint}")
},
SuggestChangingConstraintsMessage::RestrictBoundFurther,
))
@@ -335,10 +336,10 @@ pub fn suggest_constraining_type_params<'a>(
// - insert: `, X: Bar`
suggestions.push((
generics.tail_span_for_predicate_suggestion(),
- constraints
- .iter()
- .map(|&(constraint, _)| format!(", {}: {}", param_name, constraint))
- .collect::<String>(),
+ constraints.iter().fold(String::new(), |mut string, &(constraint, _)| {
+ write!(string, ", {param_name}: {constraint}").unwrap();
+ string
+ }),
SuggestChangingConstraintsMessage::RestrictTypeFurther { ty: param_name },
));
continue;
@@ -358,7 +359,7 @@ pub fn suggest_constraining_type_params<'a>(
// default (`<T=Foo>`), so we suggest adding `where T: Bar`.
suggestions.push((
generics.tail_span_for_predicate_suggestion(),
- format!(" where {}: {}", param_name, constraint),
+ format!(" where {param_name}: {constraint}"),
SuggestChangingConstraintsMessage::RestrictTypeFurther { ty: param_name },
));
continue;
@@ -371,7 +372,7 @@ pub fn suggest_constraining_type_params<'a>(
if let Some(colon_span) = param.colon_span {
suggestions.push((
colon_span.shrink_to_hi(),
- format!(" {}", constraint),
+ format!(" {constraint}"),
SuggestChangingConstraintsMessage::RestrictType { ty: param_name },
));
continue;
@@ -383,7 +384,7 @@ pub fn suggest_constraining_type_params<'a>(
// - help: consider restricting this type parameter with `T: Foo`
suggestions.push((
param.span.shrink_to_hi(),
- format!(": {}", constraint),
+ format!(": {constraint}"),
SuggestChangingConstraintsMessage::RestrictType { ty: param_name },
));
}
@@ -401,10 +402,10 @@ pub fn suggest_constraining_type_params<'a>(
Cow::from("consider further restricting this bound")
}
SuggestChangingConstraintsMessage::RestrictType { ty } => {
- Cow::from(format!("consider restricting type parameter `{}`", ty))
+ Cow::from(format!("consider restricting type parameter `{ty}`"))
}
SuggestChangingConstraintsMessage::RestrictTypeFurther { ty } => {
- Cow::from(format!("consider further restricting type parameter `{}`", ty))
+ Cow::from(format!("consider further restricting type parameter `{ty}`"))
}
SuggestChangingConstraintsMessage::RemoveMaybeUnsized => {
Cow::from("consider removing the `?Sized` bound to make the type parameter `Sized`")
@@ -491,8 +492,8 @@ impl<'tcx> TypeVisitor<TyCtxt<'tcx>> for IsSuggestableVisitor<'tcx> {
Alias(Opaque, AliasTy { def_id, .. }) => {
let parent = self.tcx.parent(def_id);
- let parent_ty = self.tcx.type_of(parent).subst_identity();
- if let DefKind::TyAlias | DefKind::AssocTy = self.tcx.def_kind(parent)
+ let parent_ty = self.tcx.type_of(parent).instantiate_identity();
+ if let DefKind::TyAlias { .. } | DefKind::AssocTy = self.tcx.def_kind(parent)
&& let Alias(Opaque, AliasTy { def_id: parent_opaque_def_id, .. }) = *parent_ty.kind()
&& parent_opaque_def_id == def_id
{
@@ -558,8 +559,8 @@ impl<'tcx> FallibleTypeFolder<TyCtxt<'tcx>> for MakeSuggestableFolder<'tcx> {
let t = match *t.kind() {
Infer(InferTy::TyVar(_)) if self.infer_suggestable => t,
- FnDef(def_id, substs) => {
- Ty::new_fn_ptr(self.tcx, self.tcx.fn_sig(def_id).subst(self.tcx, substs))
+ FnDef(def_id, args) => {
+ Ty::new_fn_ptr(self.tcx, self.tcx.fn_sig(def_id).instantiate(self.tcx, args))
}
// FIXME(compiler-errors): We could replace these with infer, I guess.
@@ -575,8 +576,8 @@ impl<'tcx> FallibleTypeFolder<TyCtxt<'tcx>> for MakeSuggestableFolder<'tcx> {
Alias(Opaque, AliasTy { def_id, .. }) => {
let parent = self.tcx.parent(def_id);
- let parent_ty = self.tcx.type_of(parent).subst_identity();
- if let hir::def::DefKind::TyAlias | hir::def::DefKind::AssocTy = self.tcx.def_kind(parent)
+ let parent_ty = self.tcx.type_of(parent).instantiate_identity();
+ if let hir::def::DefKind::TyAlias { .. } | hir::def::DefKind::AssocTy = self.tcx.def_kind(parent)
&& let Alias(Opaque, AliasTy { def_id: parent_opaque_def_id, .. }) = *parent_ty.kind()
&& parent_opaque_def_id == def_id
{
diff --git a/compiler/rustc_middle/src/ty/error.rs b/compiler/rustc_middle/src/ty/error.rs
index c794c3fad..bf6f082c2 100644
--- a/compiler/rustc_middle/src/ty/error.rs
+++ b/compiler/rustc_middle/src/ty/error.rs
@@ -90,9 +90,9 @@ impl<'tcx> TypeError<'tcx> {
// A naive approach to making sure that we're not reporting silly errors such as:
// (expected closure, found closure).
if expected == found {
- format!("expected {}, found a different {}", expected, found)
+ format!("expected {expected}, found a different {found}")
} else {
- format!("expected {}, found {}", expected, found)
+ format!("expected {expected}, found {found}")
}
}
@@ -131,7 +131,7 @@ impl<'tcx> TypeError<'tcx> {
)
.into(),
ArgCount => "incorrect number of function parameters".into(),
- FieldMisMatch(adt, field) => format!("field type mismatch: {}.{}", adt, field).into(),
+ FieldMisMatch(adt, field) => format!("field type mismatch: {adt}.{field}").into(),
RegionsDoesNotOutlive(..) => "lifetime mismatch".into(),
// Actually naming the region here is a bit confusing because context is lacking
RegionsInsufficientlyPolymorphic(..) => {
@@ -164,7 +164,7 @@ impl<'tcx> TypeError<'tcx> {
ty::IntVarValue::IntType(ty) => ty.name_str(),
ty::IntVarValue::UintType(ty) => ty.name_str(),
};
- format!("expected `{}`, found `{}`", expected, found).into()
+ format!("expected `{expected}`, found `{found}`").into()
}
FloatMismatch(ref values) => format!(
"expected `{}`, found `{}`",
@@ -339,12 +339,17 @@ impl<'tcx> TyCtxt<'tcx> {
}
pub fn short_ty_string(self, ty: Ty<'tcx>) -> (String, Option<PathBuf>) {
- let width = self.sess.diagnostic_width();
- let length_limit = width.saturating_sub(30);
let regular = FmtPrinter::new(self, hir::def::Namespace::TypeNS)
.pretty_print_type(ty)
.expect("could not write to `String`")
.into_buffer();
+
+ if !self.sess.opts.unstable_opts.write_long_types_to_disk {
+ return (regular, None);
+ }
+
+ let width = self.sess.diagnostic_width();
+ let length_limit = width.saturating_sub(30);
if regular.len() <= width {
return (regular, None);
}
diff --git a/compiler/rustc_middle/src/ty/fast_reject.rs b/compiler/rustc_middle/src/ty/fast_reject.rs
index 76f61d9ac..668aa4521 100644
--- a/compiler/rustc_middle/src/ty/fast_reject.rs
+++ b/compiler/rustc_middle/src/ty/fast_reject.rs
@@ -1,40 +1,38 @@
use crate::mir::Mutability;
-use crate::ty::subst::GenericArgKind;
-use crate::ty::{self, SubstsRef, Ty, TyCtxt, TypeVisitableExt};
+use crate::ty::GenericArgKind;
+use crate::ty::{self, GenericArgsRef, Ty, TyCtxt, TypeVisitableExt};
use rustc_hir::def_id::DefId;
use std::fmt::Debug;
use std::hash::Hash;
use std::iter;
-use self::SimplifiedType::*;
-
/// See `simplify_type`.
#[derive(Clone, Copy, Debug, PartialEq, Eq, Hash, TyEncodable, TyDecodable, HashStable)]
pub enum SimplifiedType {
- BoolSimplifiedType,
- CharSimplifiedType,
- IntSimplifiedType(ty::IntTy),
- UintSimplifiedType(ty::UintTy),
- FloatSimplifiedType(ty::FloatTy),
- AdtSimplifiedType(DefId),
- ForeignSimplifiedType(DefId),
- StrSimplifiedType,
- ArraySimplifiedType,
- SliceSimplifiedType,
- RefSimplifiedType(Mutability),
- PtrSimplifiedType(Mutability),
- NeverSimplifiedType,
- TupleSimplifiedType(usize),
+ Bool,
+ Char,
+ Int(ty::IntTy),
+ Uint(ty::UintTy),
+ Float(ty::FloatTy),
+ Adt(DefId),
+ Foreign(DefId),
+ Str,
+ Array,
+ Slice,
+ Ref(Mutability),
+ Ptr(Mutability),
+ Never,
+ Tuple(usize),
/// A trait object, all of whose components are markers
/// (e.g., `dyn Send + Sync`).
- MarkerTraitObjectSimplifiedType,
- TraitSimplifiedType(DefId),
- ClosureSimplifiedType(DefId),
- GeneratorSimplifiedType(DefId),
- GeneratorWitnessSimplifiedType(usize),
- GeneratorWitnessMIRSimplifiedType(DefId),
- FunctionSimplifiedType(usize),
- PlaceholderSimplifiedType,
+ MarkerTraitObject,
+ Trait(DefId),
+ Closure(DefId),
+ Generator(DefId),
+ GeneratorWitness(usize),
+ GeneratorWitnessMIR(DefId),
+ Function(usize),
+ Placeholder,
}
/// Generic parameters are pretty much just bound variables, e.g.
@@ -64,6 +62,9 @@ pub enum TreatParams {
/// correct mode for *lookup*, as during candidate selection.
///
/// N.B. during deep rejection, this acts identically to `ForLookup`.
+ ///
+ /// FIXME(-Ztrait-solver=next): Remove this variant and cleanup
+ /// the code.
NextSolverLookup,
}
@@ -110,34 +111,36 @@ pub fn simplify_type<'tcx>(
treat_params: TreatParams,
) -> Option<SimplifiedType> {
match *ty.kind() {
- ty::Bool => Some(BoolSimplifiedType),
- ty::Char => Some(CharSimplifiedType),
- ty::Int(int_type) => Some(IntSimplifiedType(int_type)),
- ty::Uint(uint_type) => Some(UintSimplifiedType(uint_type)),
- ty::Float(float_type) => Some(FloatSimplifiedType(float_type)),
- ty::Adt(def, _) => Some(AdtSimplifiedType(def.did())),
- ty::Str => Some(StrSimplifiedType),
- ty::Array(..) => Some(ArraySimplifiedType),
- ty::Slice(..) => Some(SliceSimplifiedType),
- ty::RawPtr(ptr) => Some(PtrSimplifiedType(ptr.mutbl)),
+ ty::Bool => Some(SimplifiedType::Bool),
+ ty::Char => Some(SimplifiedType::Char),
+ ty::Int(int_type) => Some(SimplifiedType::Int(int_type)),
+ ty::Uint(uint_type) => Some(SimplifiedType::Uint(uint_type)),
+ ty::Float(float_type) => Some(SimplifiedType::Float(float_type)),
+ ty::Adt(def, _) => Some(SimplifiedType::Adt(def.did())),
+ ty::Str => Some(SimplifiedType::Str),
+ ty::Array(..) => Some(SimplifiedType::Array),
+ ty::Slice(..) => Some(SimplifiedType::Slice),
+ ty::RawPtr(ptr) => Some(SimplifiedType::Ptr(ptr.mutbl)),
ty::Dynamic(trait_info, ..) => match trait_info.principal_def_id() {
Some(principal_def_id) if !tcx.trait_is_auto(principal_def_id) => {
- Some(TraitSimplifiedType(principal_def_id))
+ Some(SimplifiedType::Trait(principal_def_id))
}
- _ => Some(MarkerTraitObjectSimplifiedType),
+ _ => Some(SimplifiedType::MarkerTraitObject),
},
- ty::Ref(_, _, mutbl) => Some(RefSimplifiedType(mutbl)),
- ty::FnDef(def_id, _) | ty::Closure(def_id, _) => Some(ClosureSimplifiedType(def_id)),
- ty::Generator(def_id, _, _) => Some(GeneratorSimplifiedType(def_id)),
- ty::GeneratorWitness(tys) => Some(GeneratorWitnessSimplifiedType(tys.skip_binder().len())),
- ty::GeneratorWitnessMIR(def_id, _) => Some(GeneratorWitnessMIRSimplifiedType(def_id)),
- ty::Never => Some(NeverSimplifiedType),
- ty::Tuple(tys) => Some(TupleSimplifiedType(tys.len())),
- ty::FnPtr(f) => Some(FunctionSimplifiedType(f.skip_binder().inputs().len())),
- ty::Placeholder(..) => Some(PlaceholderSimplifiedType),
+ ty::Ref(_, _, mutbl) => Some(SimplifiedType::Ref(mutbl)),
+ ty::FnDef(def_id, _) | ty::Closure(def_id, _) => Some(SimplifiedType::Closure(def_id)),
+ ty::Generator(def_id, _, _) => Some(SimplifiedType::Generator(def_id)),
+ ty::GeneratorWitness(tys) => {
+ Some(SimplifiedType::GeneratorWitness(tys.skip_binder().len()))
+ }
+ ty::GeneratorWitnessMIR(def_id, _) => Some(SimplifiedType::GeneratorWitnessMIR(def_id)),
+ ty::Never => Some(SimplifiedType::Never),
+ ty::Tuple(tys) => Some(SimplifiedType::Tuple(tys.len())),
+ ty::FnPtr(f) => Some(SimplifiedType::Function(f.skip_binder().inputs().len())),
+ ty::Placeholder(..) => Some(SimplifiedType::Placeholder),
ty::Param(_) => match treat_params {
TreatParams::ForLookup | TreatParams::NextSolverLookup => {
- Some(PlaceholderSimplifiedType)
+ Some(SimplifiedType::Placeholder)
}
TreatParams::AsCandidateKey => None,
},
@@ -147,11 +150,13 @@ pub fn simplify_type<'tcx>(
//
// We will have to be careful with lazy normalization here.
// FIXME(lazy_normalization): This is probably not right...
- TreatParams::ForLookup if !ty.has_non_region_infer() => Some(PlaceholderSimplifiedType),
- TreatParams::NextSolverLookup => Some(PlaceholderSimplifiedType),
+ TreatParams::ForLookup if !ty.has_non_region_infer() => {
+ Some(SimplifiedType::Placeholder)
+ }
+ TreatParams::NextSolverLookup => Some(SimplifiedType::Placeholder),
TreatParams::ForLookup | TreatParams::AsCandidateKey => None,
},
- ty::Foreign(def_id) => Some(ForeignSimplifiedType(def_id)),
+ ty::Foreign(def_id) => Some(SimplifiedType::Foreign(def_id)),
ty::Bound(..) | ty::Infer(_) | ty::Error(_) => None,
}
}
@@ -159,12 +164,12 @@ pub fn simplify_type<'tcx>(
impl SimplifiedType {
pub fn def(self) -> Option<DefId> {
match self {
- AdtSimplifiedType(d)
- | ForeignSimplifiedType(d)
- | TraitSimplifiedType(d)
- | ClosureSimplifiedType(d)
- | GeneratorSimplifiedType(d)
- | GeneratorWitnessMIRSimplifiedType(d) => Some(d),
+ SimplifiedType::Adt(d)
+ | SimplifiedType::Foreign(d)
+ | SimplifiedType::Trait(d)
+ | SimplifiedType::Closure(d)
+ | SimplifiedType::Generator(d)
+ | SimplifiedType::GeneratorWitnessMIR(d) => Some(d),
_ => None,
}
}
@@ -188,12 +193,12 @@ pub struct DeepRejectCtxt {
}
impl DeepRejectCtxt {
- pub fn substs_refs_may_unify<'tcx>(
+ pub fn args_refs_may_unify<'tcx>(
self,
- obligation_substs: SubstsRef<'tcx>,
- impl_substs: SubstsRef<'tcx>,
+ obligation_args: GenericArgsRef<'tcx>,
+ impl_args: GenericArgsRef<'tcx>,
) -> bool {
- iter::zip(obligation_substs, impl_substs).all(|(obl, imp)| {
+ iter::zip(obligation_args, impl_args).all(|(obl, imp)| {
match (obl.unpack(), imp.unpack()) {
// We don't fast reject based on regions for now.
(GenericArgKind::Lifetime(_), GenericArgKind::Lifetime(_)) => true,
@@ -258,9 +263,9 @@ impl DeepRejectCtxt {
}
_ => false,
},
- ty::Adt(obl_def, obl_substs) => match k {
- &ty::Adt(impl_def, impl_substs) => {
- obl_def == impl_def && self.substs_refs_may_unify(obl_substs, impl_substs)
+ ty::Adt(obl_def, obl_args) => match k {
+ &ty::Adt(impl_def, impl_args) => {
+ obl_def == impl_def && self.args_refs_may_unify(obl_args, impl_args)
}
_ => false,
},
diff --git a/compiler/rustc_middle/src/ty/flags.rs b/compiler/rustc_middle/src/ty/flags.rs
index ff3917947..bbd4a6233 100644
--- a/compiler/rustc_middle/src/ty/flags.rs
+++ b/compiler/rustc_middle/src/ty/flags.rs
@@ -1,5 +1,5 @@
-use crate::ty::subst::{GenericArg, GenericArgKind};
use crate::ty::{self, InferConst, Ty, TypeFlags};
+use crate::ty::{GenericArg, GenericArgKind};
use std::slice;
#[derive(Debug)]
@@ -105,48 +105,48 @@ impl FlagComputation {
self.add_flags(TypeFlags::STILL_FURTHER_SPECIALIZABLE);
}
- ty::Generator(_, substs, _) => {
- let substs = substs.as_generator();
+ ty::Generator(_, args, _) => {
+ let args = args.as_generator();
let should_remove_further_specializable =
!self.flags.contains(TypeFlags::STILL_FURTHER_SPECIALIZABLE);
- self.add_substs(substs.parent_substs());
+ self.add_args(args.parent_args());
if should_remove_further_specializable {
self.flags -= TypeFlags::STILL_FURTHER_SPECIALIZABLE;
}
- self.add_ty(substs.resume_ty());
- self.add_ty(substs.return_ty());
- self.add_ty(substs.witness());
- self.add_ty(substs.yield_ty());
- self.add_ty(substs.tupled_upvars_ty());
+ self.add_ty(args.resume_ty());
+ self.add_ty(args.return_ty());
+ self.add_ty(args.witness());
+ self.add_ty(args.yield_ty());
+ self.add_ty(args.tupled_upvars_ty());
}
&ty::GeneratorWitness(ts) => {
self.bound_computation(ts, |flags, ts| flags.add_tys(ts));
}
- ty::GeneratorWitnessMIR(_, substs) => {
+ ty::GeneratorWitnessMIR(_, args) => {
let should_remove_further_specializable =
!self.flags.contains(TypeFlags::STILL_FURTHER_SPECIALIZABLE);
- self.add_substs(substs);
+ self.add_args(args);
if should_remove_further_specializable {
self.flags -= TypeFlags::STILL_FURTHER_SPECIALIZABLE;
}
self.add_flags(TypeFlags::HAS_TY_GENERATOR);
}
- &ty::Closure(_, substs) => {
- let substs = substs.as_closure();
+ &ty::Closure(_, args) => {
+ let args = args.as_closure();
let should_remove_further_specializable =
!self.flags.contains(TypeFlags::STILL_FURTHER_SPECIALIZABLE);
- self.add_substs(substs.parent_substs());
+ self.add_args(args.parent_args());
if should_remove_further_specializable {
self.flags -= TypeFlags::STILL_FURTHER_SPECIALIZABLE;
}
- self.add_ty(substs.sig_as_fn_ptr_ty());
- self.add_ty(substs.kind_ty());
- self.add_ty(substs.tupled_upvars_ty());
+ self.add_ty(args.sig_as_fn_ptr_ty());
+ self.add_ty(args.kind_ty());
+ self.add_ty(args.tupled_upvars_ty());
}
&ty::Bound(debruijn, _) => {
@@ -172,8 +172,8 @@ impl FlagComputation {
}
}
- &ty::Adt(_, substs) => {
- self.add_substs(substs);
+ &ty::Adt(_, args) => {
+ self.add_args(args);
}
&ty::Alias(kind, data) => {
@@ -189,7 +189,7 @@ impl FlagComputation {
&ty::Dynamic(obj, r, _) => {
for predicate in obj.iter() {
self.bound_computation(predicate, |computation, predicate| match predicate {
- ty::ExistentialPredicate::Trait(tr) => computation.add_substs(tr.substs),
+ ty::ExistentialPredicate::Trait(tr) => computation.add_args(tr.args),
ty::ExistentialPredicate::Projection(p) => {
computation.add_existential_projection(&p);
}
@@ -220,8 +220,8 @@ impl FlagComputation {
self.add_tys(types);
}
- &ty::FnDef(_, substs) => {
- self.add_substs(substs);
+ &ty::FnDef(_, args) => {
+ self.add_args(args);
}
&ty::FnPtr(fn_sig) => self.bound_computation(fn_sig, |computation, fn_sig| {
@@ -238,7 +238,7 @@ impl FlagComputation {
fn add_predicate_atom(&mut self, atom: ty::PredicateKind<'_>) {
match atom {
ty::PredicateKind::Clause(ty::ClauseKind::Trait(trait_pred)) => {
- self.add_substs(trait_pred.trait_ref.substs);
+ self.add_args(trait_pred.trait_ref.args);
}
ty::PredicateKind::Clause(ty::ClauseKind::RegionOutlives(ty::OutlivesPredicate(
a,
@@ -274,11 +274,11 @@ impl FlagComputation {
self.add_term(term);
}
ty::PredicateKind::Clause(ty::ClauseKind::WellFormed(arg)) => {
- self.add_substs(slice::from_ref(&arg));
+ self.add_args(slice::from_ref(&arg));
}
ty::PredicateKind::ObjectSafe(_def_id) => {}
- ty::PredicateKind::ClosureKind(_def_id, substs, _kind) => {
- self.add_substs(substs);
+ ty::PredicateKind::ClosureKind(_def_id, args, _kind) => {
+ self.add_args(args);
}
ty::PredicateKind::Clause(ty::ClauseKind::ConstEvaluatable(uv)) => {
self.add_const(uv);
@@ -317,7 +317,7 @@ impl FlagComputation {
self.add_ty(c.ty());
match c.kind() {
ty::ConstKind::Unevaluated(uv) => {
- self.add_substs(uv.substs);
+ self.add_args(uv.args);
self.add_flags(TypeFlags::HAS_CT_PROJECTION);
}
ty::ConstKind::Infer(infer) => {
@@ -365,7 +365,7 @@ impl FlagComputation {
}
fn add_existential_projection(&mut self, projection: &ty::ExistentialProjection<'_>) {
- self.add_substs(projection.substs);
+ self.add_args(projection.args);
match projection.term.unpack() {
ty::TermKind::Ty(ty) => self.add_ty(ty),
ty::TermKind::Const(ct) => self.add_const(ct),
@@ -373,11 +373,11 @@ impl FlagComputation {
}
fn add_alias_ty(&mut self, alias_ty: ty::AliasTy<'_>) {
- self.add_substs(alias_ty.substs);
+ self.add_args(alias_ty.args);
}
- fn add_substs(&mut self, substs: &[GenericArg<'_>]) {
- for kind in substs {
+ fn add_args(&mut self, args: &[GenericArg<'_>]) {
+ for kind in args {
match kind.unpack() {
GenericArgKind::Type(ty) => self.add_ty(ty),
GenericArgKind::Lifetime(lt) => self.add_region(lt),
diff --git a/compiler/rustc_middle/src/ty/subst.rs b/compiler/rustc_middle/src/ty/generic_args.rs
index 4d5f5b865..97dab5cb4 100644
--- a/compiler/rustc_middle/src/ty/subst.rs
+++ b/compiler/rustc_middle/src/ty/generic_args.rs
@@ -1,8 +1,8 @@
-// Type substitutions.
+// Generic arguments.
use crate::ty::codec::{TyDecoder, TyEncoder};
use crate::ty::fold::{FallibleTypeFolder, TypeFoldable, TypeFolder, TypeSuperFoldable};
-use crate::ty::sty::{ClosureSubsts, GeneratorSubsts, InlineConstSubsts};
+use crate::ty::sty::{ClosureArgs, GeneratorArgs, InlineConstArgs};
use crate::ty::visit::{TypeVisitable, TypeVisitableExt, TypeVisitor};
use crate::ty::{self, Lift, List, ParamConst, Ty, TyCtxt};
@@ -17,7 +17,6 @@ use smallvec::SmallVec;
use core::intrinsics;
use std::cmp::Ordering;
-use std::fmt;
use std::marker::PhantomData;
use std::mem;
use std::num::NonZeroUsize;
@@ -80,16 +79,6 @@ impl<'tcx> GenericArgKind<'tcx> {
}
}
-impl<'tcx> fmt::Debug for GenericArg<'tcx> {
- fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
- match self.unpack() {
- GenericArgKind::Lifetime(lt) => lt.fmt(f),
- GenericArgKind::Type(ty) => ty.fmt(f),
- GenericArgKind::Const(ct) => ct.fmt(f),
- }
- }
-}
-
impl<'tcx> Ord for GenericArg<'tcx> {
fn cmp(&self, other: &GenericArg<'tcx>) -> Ordering {
self.unpack().cmp(&other.unpack())
@@ -185,7 +174,7 @@ impl<'tcx> GenericArg<'tcx> {
}
/// Unpack the `GenericArg` as a type when it is known certainly to be a type.
- /// This is true in cases where `Substs` is used in places where the kinds are known
+ /// This is true in cases where `GenericArgs` is used in places where the kinds are known
/// to be limited (e.g. in tuples, where the only parameters are type parameters).
pub fn expect_ty(self) -> Ty<'tcx> {
self.as_type().unwrap_or_else(|| bug!("expected a type, but found another kind"))
@@ -252,13 +241,13 @@ impl<'tcx, D: TyDecoder<I = TyCtxt<'tcx>>> Decodable<D> for GenericArg<'tcx> {
}
}
-/// List of generic arguments that are gonna be used to substitute generic parameters.
-pub type InternalSubsts<'tcx> = List<GenericArg<'tcx>>;
+/// List of generic arguments that are gonna be used to replace generic parameters.
+pub type GenericArgs<'tcx> = List<GenericArg<'tcx>>;
-pub type SubstsRef<'tcx> = &'tcx InternalSubsts<'tcx>;
+pub type GenericArgsRef<'tcx> = &'tcx GenericArgs<'tcx>;
-impl<'tcx> InternalSubsts<'tcx> {
- /// Converts substs to a type list.
+impl<'tcx> GenericArgs<'tcx> {
+ /// Converts generic args to a type list.
///
/// # Panics
///
@@ -266,66 +255,71 @@ impl<'tcx> InternalSubsts<'tcx> {
pub fn into_type_list(&self, tcx: TyCtxt<'tcx>) -> &'tcx List<Ty<'tcx>> {
tcx.mk_type_list_from_iter(self.iter().map(|arg| match arg.unpack() {
GenericArgKind::Type(ty) => ty,
- _ => bug!("`into_type_list` called on substs with non-types"),
+ _ => bug!("`into_type_list` called on generic arg with non-types"),
}))
}
- /// Interpret these substitutions as the substitutions of a closure type.
- /// Closure substitutions have a particular structure controlled by the
+ /// Interpret these generic args as the args of a closure type.
+ /// Closure args have a particular structure controlled by the
/// compiler that encodes information like the signature and closure kind;
- /// see `ty::ClosureSubsts` struct for more comments.
- pub fn as_closure(&'tcx self) -> ClosureSubsts<'tcx> {
- ClosureSubsts { substs: self }
+ /// see `ty::ClosureArgs` struct for more comments.
+ pub fn as_closure(&'tcx self) -> ClosureArgs<'tcx> {
+ ClosureArgs { args: self }
}
- /// Interpret these substitutions as the substitutions of a generator type.
- /// Generator substitutions have a particular structure controlled by the
+ /// Interpret these generic args as the args of a generator type.
+ /// Generator args have a particular structure controlled by the
/// compiler that encodes information like the signature and generator kind;
- /// see `ty::GeneratorSubsts` struct for more comments.
- pub fn as_generator(&'tcx self) -> GeneratorSubsts<'tcx> {
- GeneratorSubsts { substs: self }
+ /// see `ty::GeneratorArgs` struct for more comments.
+ pub fn as_generator(&'tcx self) -> GeneratorArgs<'tcx> {
+ GeneratorArgs { args: self }
}
- /// Interpret these substitutions as the substitutions of an inline const.
- /// Inline const substitutions have a particular structure controlled by the
+ /// Interpret these generic args as the args of an inline const.
+ /// Inline const args have a particular structure controlled by the
/// compiler that encodes information like the inferred type;
- /// see `ty::InlineConstSubsts` struct for more comments.
- pub fn as_inline_const(&'tcx self) -> InlineConstSubsts<'tcx> {
- InlineConstSubsts { substs: self }
+ /// see `ty::InlineConstArgs` struct for more comments.
+ pub fn as_inline_const(&'tcx self) -> InlineConstArgs<'tcx> {
+ InlineConstArgs { args: self }
}
- /// Creates an `InternalSubsts` that maps each generic parameter to itself.
- pub fn identity_for_item(tcx: TyCtxt<'tcx>, def_id: impl Into<DefId>) -> SubstsRef<'tcx> {
+ /// Creates an `GenericArgs` that maps each generic parameter to itself.
+ pub fn identity_for_item(tcx: TyCtxt<'tcx>, def_id: impl Into<DefId>) -> GenericArgsRef<'tcx> {
Self::for_item(tcx, def_id.into(), |param, _| tcx.mk_param_from_def(param))
}
- /// Creates an `InternalSubsts` for generic parameter definitions,
+ /// Creates an `GenericArgs` for generic parameter definitions,
/// by calling closures to obtain each kind.
- /// The closures get to observe the `InternalSubsts` as they're
+ /// The closures get to observe the `GenericArgs` as they're
/// being built, which can be used to correctly
- /// substitute defaults of generic parameters.
- pub fn for_item<F>(tcx: TyCtxt<'tcx>, def_id: DefId, mut mk_kind: F) -> SubstsRef<'tcx>
+ /// replace defaults of generic parameters.
+ pub fn for_item<F>(tcx: TyCtxt<'tcx>, def_id: DefId, mut mk_kind: F) -> GenericArgsRef<'tcx>
where
F: FnMut(&ty::GenericParamDef, &[GenericArg<'tcx>]) -> GenericArg<'tcx>,
{
let defs = tcx.generics_of(def_id);
let count = defs.count();
- let mut substs = SmallVec::with_capacity(count);
- Self::fill_item(&mut substs, tcx, defs, &mut mk_kind);
- tcx.mk_substs(&substs)
+ let mut args = SmallVec::with_capacity(count);
+ Self::fill_item(&mut args, tcx, defs, &mut mk_kind);
+ tcx.mk_args(&args)
}
- pub fn extend_to<F>(&self, tcx: TyCtxt<'tcx>, def_id: DefId, mut mk_kind: F) -> SubstsRef<'tcx>
+ pub fn extend_to<F>(
+ &self,
+ tcx: TyCtxt<'tcx>,
+ def_id: DefId,
+ mut mk_kind: F,
+ ) -> GenericArgsRef<'tcx>
where
F: FnMut(&ty::GenericParamDef, &[GenericArg<'tcx>]) -> GenericArg<'tcx>,
{
- Self::for_item(tcx, def_id, |param, substs| {
- self.get(param.index as usize).cloned().unwrap_or_else(|| mk_kind(param, substs))
+ Self::for_item(tcx, def_id, |param, args| {
+ self.get(param.index as usize).cloned().unwrap_or_else(|| mk_kind(param, args))
})
}
pub fn fill_item<F>(
- substs: &mut SmallVec<[GenericArg<'tcx>; 8]>,
+ args: &mut SmallVec<[GenericArg<'tcx>; 8]>,
tcx: TyCtxt<'tcx>,
defs: &ty::Generics,
mk_kind: &mut F,
@@ -334,38 +328,38 @@ impl<'tcx> InternalSubsts<'tcx> {
{
if let Some(def_id) = defs.parent {
let parent_defs = tcx.generics_of(def_id);
- Self::fill_item(substs, tcx, parent_defs, mk_kind);
+ Self::fill_item(args, tcx, parent_defs, mk_kind);
}
- Self::fill_single(substs, defs, mk_kind)
+ Self::fill_single(args, defs, mk_kind)
}
pub fn fill_single<F>(
- substs: &mut SmallVec<[GenericArg<'tcx>; 8]>,
+ args: &mut SmallVec<[GenericArg<'tcx>; 8]>,
defs: &ty::Generics,
mk_kind: &mut F,
) where
F: FnMut(&ty::GenericParamDef, &[GenericArg<'tcx>]) -> GenericArg<'tcx>,
{
- substs.reserve(defs.params.len());
+ args.reserve(defs.params.len());
for param in &defs.params {
- let kind = mk_kind(param, substs);
- assert_eq!(param.index as usize, substs.len(), "{substs:#?}, {defs:#?}");
- substs.push(kind);
+ let kind = mk_kind(param, args);
+ assert_eq!(param.index as usize, args.len(), "{args:#?}, {defs:#?}");
+ args.push(kind);
}
}
- // Extend an `original_substs` list to the full number of substs expected by `def_id`,
+ // Extend an `original_args` list to the full number of args expected by `def_id`,
// filling in the missing parameters with error ty/ct or 'static regions.
pub fn extend_with_error(
tcx: TyCtxt<'tcx>,
def_id: DefId,
- original_substs: &[GenericArg<'tcx>],
- ) -> SubstsRef<'tcx> {
- ty::InternalSubsts::for_item(tcx, def_id, |def, substs| {
- if let Some(subst) = original_substs.get(def.index as usize) {
- *subst
+ original_args: &[GenericArg<'tcx>],
+ ) -> GenericArgsRef<'tcx> {
+ ty::GenericArgs::for_item(tcx, def_id, |def, args| {
+ if let Some(arg) = original_args.get(def.index as usize) {
+ *arg
} else {
- def.to_error(tcx, substs)
+ def.to_error(tcx, args)
}
})
}
@@ -421,9 +415,9 @@ impl<'tcx> InternalSubsts<'tcx> {
self.type_at(def.index as usize).into()
}
- /// Transform from substitutions for a child of `source_ancestor`
- /// (e.g., a trait or impl) to substitutions for the same child
- /// in a different item, with `target_substs` as the base for
+ /// Transform from generic args for a child of `source_ancestor`
+ /// (e.g., a trait or impl) to args for the same child
+ /// in a different item, with `target_args` as the base for
/// the target impl/trait, with the source child-specific
/// parameters (e.g., method parameters) on top of that base.
///
@@ -434,23 +428,23 @@ impl<'tcx> InternalSubsts<'tcx> {
/// impl<U> X<U> for U { fn f<V>() {} }
/// ```
///
- /// * If `self` is `[Self, S, T]`: the identity substs of `f` in the trait.
+ /// * If `self` is `[Self, S, T]`: the identity args of `f` in the trait.
/// * If `source_ancestor` is the def_id of the trait.
- /// * If `target_substs` is `[U]`, the substs for the impl.
- /// * Then we will return `[U, T]`, the subst for `f` in the impl that
+ /// * If `target_args` is `[U]`, the args for the impl.
+ /// * Then we will return `[U, T]`, the arg for `f` in the impl that
/// are needed for it to match the trait.
pub fn rebase_onto(
&self,
tcx: TyCtxt<'tcx>,
source_ancestor: DefId,
- target_substs: SubstsRef<'tcx>,
- ) -> SubstsRef<'tcx> {
+ target_args: GenericArgsRef<'tcx>,
+ ) -> GenericArgsRef<'tcx> {
let defs = tcx.generics_of(source_ancestor);
- tcx.mk_substs_from_iter(target_substs.iter().chain(self.iter().skip(defs.params.len())))
+ tcx.mk_args_from_iter(target_args.iter().chain(self.iter().skip(defs.params.len())))
}
- pub fn truncate_to(&self, tcx: TyCtxt<'tcx>, generics: &ty::Generics) -> SubstsRef<'tcx> {
- tcx.mk_substs_from_iter(self.iter().take(generics.count()))
+ pub fn truncate_to(&self, tcx: TyCtxt<'tcx>, generics: &ty::Generics) -> GenericArgsRef<'tcx> {
+ tcx.mk_args_from_iter(self.iter().take(generics.count()))
}
pub fn host_effect_param(&'tcx self) -> Option<ty::Const<'tcx>> {
@@ -458,7 +452,7 @@ impl<'tcx> InternalSubsts<'tcx> {
}
}
-impl<'tcx> TypeFoldable<TyCtxt<'tcx>> for SubstsRef<'tcx> {
+impl<'tcx> TypeFoldable<TyCtxt<'tcx>> for GenericArgsRef<'tcx> {
fn try_fold_with<F: FallibleTypeFolder<TyCtxt<'tcx>>>(
self,
folder: &mut F,
@@ -467,16 +461,12 @@ impl<'tcx> TypeFoldable<TyCtxt<'tcx>> for SubstsRef<'tcx> {
// common length lists, to avoid the overhead of `SmallVec` creation.
// The match arms are in order of frequency. The 1, 2, and 0 cases are
// typically hit in 90--99.99% of cases. When folding doesn't change
- // the substs, it's faster to reuse the existing substs rather than
- // calling `mk_substs`.
+ // the args, it's faster to reuse the existing args rather than
+ // calling `mk_args`.
match self.len() {
1 => {
let param0 = self[0].try_fold_with(folder)?;
- if param0 == self[0] {
- Ok(self)
- } else {
- Ok(folder.interner().mk_substs(&[param0]))
- }
+ if param0 == self[0] { Ok(self) } else { Ok(folder.interner().mk_args(&[param0])) }
}
2 => {
let param0 = self[0].try_fold_with(folder)?;
@@ -484,11 +474,11 @@ impl<'tcx> TypeFoldable<TyCtxt<'tcx>> for SubstsRef<'tcx> {
if param0 == self[0] && param1 == self[1] {
Ok(self)
} else {
- Ok(folder.interner().mk_substs(&[param0, param1]))
+ Ok(folder.interner().mk_args(&[param0, param1]))
}
}
0 => Ok(self),
- _ => ty::util::fold_list(self, folder, |tcx, v| tcx.mk_substs(v)),
+ _ => ty::util::fold_list(self, folder, |tcx, v| tcx.mk_args(v)),
}
}
}
@@ -498,7 +488,7 @@ impl<'tcx> TypeFoldable<TyCtxt<'tcx>> for &'tcx ty::List<Ty<'tcx>> {
self,
folder: &mut F,
) -> Result<Self, F::Error> {
- // This code is fairly hot, though not as hot as `SubstsRef`.
+ // This code is fairly hot, though not as hot as `GenericArgsRef`.
//
// When compiling stage 2, I get the following results:
//
@@ -536,18 +526,18 @@ impl<'tcx, T: TypeVisitable<TyCtxt<'tcx>>> TypeVisitable<TyCtxt<'tcx>> for &'tcx
}
/// Similar to [`super::Binder`] except that it tracks early bound generics, i.e. `struct Foo<T>(T)`
-/// needs `T` substituted immediately. This type primarily exists to avoid forgetting to call
-/// `subst`.
+/// needs `T` instantiated immediately. This type primarily exists to avoid forgetting to call
+/// `instantiate`.
///
-/// If you don't have anything to `subst`, you may be looking for
-/// [`subst_identity`](EarlyBinder::subst_identity) or [`skip_binder`](EarlyBinder::skip_binder).
+/// If you don't have anything to `instantiate`, you may be looking for
+/// [`instantiate_identity`](EarlyBinder::instantiate_identity) or [`skip_binder`](EarlyBinder::skip_binder).
#[derive(Copy, Clone, PartialEq, Eq, PartialOrd, Ord, Hash, Debug)]
#[derive(Encodable, Decodable, HashStable)]
pub struct EarlyBinder<T> {
value: T,
}
-/// For early binders, you should first call `subst` before using any visitors.
+/// For early binders, you should first call `instantiate` before using any visitors.
impl<'tcx, T> !TypeFoldable<TyCtxt<'tcx>> for ty::EarlyBinder<T> {}
impl<'tcx, T> !TypeVisitable<TyCtxt<'tcx>> for ty::EarlyBinder<T> {}
@@ -591,7 +581,7 @@ impl<T> EarlyBinder<T> {
/// This can be used to extract data that does not depend on generic parameters
/// (e.g., getting the `DefId` of the inner value or getting the number of
/// arguments of an `FnSig`). Otherwise, consider using
- /// [`subst_identity`](EarlyBinder::subst_identity).
+ /// [`instantiate_identity`](EarlyBinder::instantiate_identity).
///
/// To skip the binder on `x: &EarlyBinder<T>` to obtain `&T`, leverage
/// [`EarlyBinder::as_ref`](EarlyBinder::as_ref): `x.as_ref().skip_binder()`.
@@ -620,35 +610,35 @@ impl<'tcx, 's, I: IntoIterator> EarlyBinder<I>
where
I::Item: TypeFoldable<TyCtxt<'tcx>>,
{
- pub fn subst_iter(
+ pub fn iter_instantiated(
self,
tcx: TyCtxt<'tcx>,
- substs: &'s [GenericArg<'tcx>],
- ) -> SubstIter<'s, 'tcx, I> {
- SubstIter { it: self.value.into_iter(), tcx, substs }
+ args: &'s [GenericArg<'tcx>],
+ ) -> IterInstantiated<'s, 'tcx, I> {
+ IterInstantiated { it: self.value.into_iter(), tcx, args }
}
- /// Similar to [`subst_identity`](EarlyBinder::subst_identity),
+ /// Similar to [`instantiate_identity`](EarlyBinder::instantiate_identity),
/// but on an iterator of `TypeFoldable` values.
- pub fn subst_identity_iter(self) -> I::IntoIter {
+ pub fn instantiate_identity_iter(self) -> I::IntoIter {
self.value.into_iter()
}
}
-pub struct SubstIter<'s, 'tcx, I: IntoIterator> {
+pub struct IterInstantiated<'s, 'tcx, I: IntoIterator> {
it: I::IntoIter,
tcx: TyCtxt<'tcx>,
- substs: &'s [GenericArg<'tcx>],
+ args: &'s [GenericArg<'tcx>],
}
-impl<'tcx, I: IntoIterator> Iterator for SubstIter<'_, 'tcx, I>
+impl<'tcx, I: IntoIterator> Iterator for IterInstantiated<'_, 'tcx, I>
where
I::Item: TypeFoldable<TyCtxt<'tcx>>,
{
type Item = I::Item;
fn next(&mut self) -> Option<Self::Item> {
- Some(EarlyBinder { value: self.it.next()? }.subst(self.tcx, self.substs))
+ Some(EarlyBinder { value: self.it.next()? }.instantiate(self.tcx, self.args))
}
fn size_hint(&self) -> (usize, Option<usize>) {
@@ -656,17 +646,17 @@ where
}
}
-impl<'tcx, I: IntoIterator> DoubleEndedIterator for SubstIter<'_, 'tcx, I>
+impl<'tcx, I: IntoIterator> DoubleEndedIterator for IterInstantiated<'_, 'tcx, I>
where
I::IntoIter: DoubleEndedIterator,
I::Item: TypeFoldable<TyCtxt<'tcx>>,
{
fn next_back(&mut self) -> Option<Self::Item> {
- Some(EarlyBinder { value: self.it.next_back()? }.subst(self.tcx, self.substs))
+ Some(EarlyBinder { value: self.it.next_back()? }.instantiate(self.tcx, self.args))
}
}
-impl<'tcx, I: IntoIterator> ExactSizeIterator for SubstIter<'_, 'tcx, I>
+impl<'tcx, I: IntoIterator> ExactSizeIterator for IterInstantiated<'_, 'tcx, I>
where
I::IntoIter: ExactSizeIterator,
I::Item: TypeFoldable<TyCtxt<'tcx>>,
@@ -678,28 +668,30 @@ where
I::Item: Deref,
<I::Item as Deref>::Target: Copy + TypeFoldable<TyCtxt<'tcx>>,
{
- pub fn subst_iter_copied(
+ pub fn iter_instantiated_copied(
self,
tcx: TyCtxt<'tcx>,
- substs: &'s [GenericArg<'tcx>],
- ) -> SubstIterCopied<'s, 'tcx, I> {
- SubstIterCopied { it: self.value.into_iter(), tcx, substs }
+ args: &'s [GenericArg<'tcx>],
+ ) -> IterInstantiatedCopied<'s, 'tcx, I> {
+ IterInstantiatedCopied { it: self.value.into_iter(), tcx, args }
}
- /// Similar to [`subst_identity`](EarlyBinder::subst_identity),
+ /// Similar to [`instantiate_identity`](EarlyBinder::instantiate_identity),
/// but on an iterator of values that deref to a `TypeFoldable`.
- pub fn subst_identity_iter_copied(self) -> impl Iterator<Item = <I::Item as Deref>::Target> {
+ pub fn instantiate_identity_iter_copied(
+ self,
+ ) -> impl Iterator<Item = <I::Item as Deref>::Target> {
self.value.into_iter().map(|v| *v)
}
}
-pub struct SubstIterCopied<'a, 'tcx, I: IntoIterator> {
+pub struct IterInstantiatedCopied<'a, 'tcx, I: IntoIterator> {
it: I::IntoIter,
tcx: TyCtxt<'tcx>,
- substs: &'a [GenericArg<'tcx>],
+ args: &'a [GenericArg<'tcx>],
}
-impl<'tcx, I: IntoIterator> Iterator for SubstIterCopied<'_, 'tcx, I>
+impl<'tcx, I: IntoIterator> Iterator for IterInstantiatedCopied<'_, 'tcx, I>
where
I::Item: Deref,
<I::Item as Deref>::Target: Copy + TypeFoldable<TyCtxt<'tcx>>,
@@ -707,7 +699,7 @@ where
type Item = <I::Item as Deref>::Target;
fn next(&mut self) -> Option<Self::Item> {
- self.it.next().map(|value| EarlyBinder { value: *value }.subst(self.tcx, self.substs))
+ self.it.next().map(|value| EarlyBinder { value: *value }.instantiate(self.tcx, self.args))
}
fn size_hint(&self) -> (usize, Option<usize>) {
@@ -715,18 +707,20 @@ where
}
}
-impl<'tcx, I: IntoIterator> DoubleEndedIterator for SubstIterCopied<'_, 'tcx, I>
+impl<'tcx, I: IntoIterator> DoubleEndedIterator for IterInstantiatedCopied<'_, 'tcx, I>
where
I::IntoIter: DoubleEndedIterator,
I::Item: Deref,
<I::Item as Deref>::Target: Copy + TypeFoldable<TyCtxt<'tcx>>,
{
fn next_back(&mut self) -> Option<Self::Item> {
- self.it.next_back().map(|value| EarlyBinder { value: *value }.subst(self.tcx, self.substs))
+ self.it
+ .next_back()
+ .map(|value| EarlyBinder { value: *value }.instantiate(self.tcx, self.args))
}
}
-impl<'tcx, I: IntoIterator> ExactSizeIterator for SubstIterCopied<'_, 'tcx, I>
+impl<'tcx, I: IntoIterator> ExactSizeIterator for IterInstantiatedCopied<'_, 'tcx, I>
where
I::IntoIter: ExactSizeIterator,
I::Item: Deref,
@@ -757,20 +751,20 @@ impl<T: Iterator> Iterator for EarlyBinderIter<T> {
}
impl<'tcx, T: TypeFoldable<TyCtxt<'tcx>>> ty::EarlyBinder<T> {
- pub fn subst(self, tcx: TyCtxt<'tcx>, substs: &[GenericArg<'tcx>]) -> T {
- let mut folder = SubstFolder { tcx, substs, binders_passed: 0 };
+ pub fn instantiate(self, tcx: TyCtxt<'tcx>, args: &[GenericArg<'tcx>]) -> T {
+ let mut folder = ArgFolder { tcx, args, binders_passed: 0 };
self.value.fold_with(&mut folder)
}
- /// Makes the identity substitution `T0 => T0, ..., TN => TN`.
+ /// Makes the identity replacement `T0 => T0, ..., TN => TN`.
/// Conceptually, this converts universally bound variables into placeholders
/// when inside of a given item.
///
/// For example, consider `for<T> fn foo<T>(){ .. }`:
/// - Outside of `foo`, `T` is bound (represented by the presence of `EarlyBinder`).
/// - Inside of the body of `foo`, we treat `T` as a placeholder by calling
- /// `subst_identity` to discharge the `EarlyBinder`.
- pub fn subst_identity(self) -> T {
+ /// `instantiate_identity` to discharge the `EarlyBinder`.
+ pub fn instantiate_identity(self) -> T {
self.value
}
@@ -783,15 +777,15 @@ impl<'tcx, T: TypeFoldable<TyCtxt<'tcx>>> ty::EarlyBinder<T> {
///////////////////////////////////////////////////////////////////////////
// The actual substitution engine itself is a type folder.
-struct SubstFolder<'a, 'tcx> {
+struct ArgFolder<'a, 'tcx> {
tcx: TyCtxt<'tcx>,
- substs: &'a [GenericArg<'tcx>],
+ args: &'a [GenericArg<'tcx>],
/// Number of region binders we have passed through while doing the substitution
binders_passed: u32,
}
-impl<'a, 'tcx> TypeFolder<TyCtxt<'tcx>> for SubstFolder<'a, 'tcx> {
+impl<'a, 'tcx> TypeFolder<TyCtxt<'tcx>> for ArgFolder<'a, 'tcx> {
#[inline]
fn interner(&self) -> TyCtxt<'tcx> {
self.tcx
@@ -810,12 +804,12 @@ impl<'a, 'tcx> TypeFolder<TyCtxt<'tcx>> for SubstFolder<'a, 'tcx> {
fn fold_region(&mut self, r: ty::Region<'tcx>) -> ty::Region<'tcx> {
#[cold]
#[inline(never)]
- fn region_param_out_of_range(data: ty::EarlyBoundRegion, substs: &[GenericArg<'_>]) -> ! {
+ fn region_param_out_of_range(data: ty::EarlyBoundRegion, args: &[GenericArg<'_>]) -> ! {
bug!(
- "Region parameter out of range when substituting in region {} (index={}, substs = {:?})",
+ "Region parameter out of range when substituting in region {} (index={}, args = {:?})",
data.name,
data.index,
- substs,
+ args,
)
}
@@ -837,11 +831,11 @@ impl<'a, 'tcx> TypeFolder<TyCtxt<'tcx>> for SubstFolder<'a, 'tcx> {
// the specialized routine `ty::replace_late_regions()`.
match *r {
ty::ReEarlyBound(data) => {
- let rk = self.substs.get(data.index as usize).map(|k| k.unpack());
+ let rk = self.args.get(data.index as usize).map(|k| k.unpack());
match rk {
Some(GenericArgKind::Lifetime(lt)) => self.shift_region_through_binders(lt),
Some(other) => region_param_invalid(data, other),
- None => region_param_out_of_range(data, self.substs),
+ None => region_param_out_of_range(data, self.args),
}
}
ty::ReLateBound(..)
@@ -874,10 +868,10 @@ impl<'a, 'tcx> TypeFolder<TyCtxt<'tcx>> for SubstFolder<'a, 'tcx> {
}
}
-impl<'a, 'tcx> SubstFolder<'a, 'tcx> {
+impl<'a, 'tcx> ArgFolder<'a, 'tcx> {
fn ty_for_param(&self, p: ty::ParamTy, source_ty: Ty<'tcx>) -> Ty<'tcx> {
- // Look up the type in the substitutions. It really should be in there.
- let opt_ty = self.substs.get(p.index as usize).map(|k| k.unpack());
+ // Look up the type in the args. It really should be in there.
+ let opt_ty = self.args.get(p.index as usize).map(|k| k.unpack());
let ty = match opt_ty {
Some(GenericArgKind::Type(ty)) => ty,
Some(kind) => self.type_param_expected(p, source_ty, kind),
@@ -891,12 +885,12 @@ impl<'a, 'tcx> SubstFolder<'a, 'tcx> {
#[inline(never)]
fn type_param_expected(&self, p: ty::ParamTy, ty: Ty<'tcx>, kind: GenericArgKind<'tcx>) -> ! {
bug!(
- "expected type for `{:?}` ({:?}/{}) but found {:?} when substituting, substs={:?}",
+ "expected type for `{:?}` ({:?}/{}) but found {:?} when substituting, args={:?}",
p,
ty,
p.index,
kind,
- self.substs,
+ self.args,
)
}
@@ -904,17 +898,17 @@ impl<'a, 'tcx> SubstFolder<'a, 'tcx> {
#[inline(never)]
fn type_param_out_of_range(&self, p: ty::ParamTy, ty: Ty<'tcx>) -> ! {
bug!(
- "type parameter `{:?}` ({:?}/{}) out of range when substituting, substs={:?}",
+ "type parameter `{:?}` ({:?}/{}) out of range when substituting, args={:?}",
p,
ty,
p.index,
- self.substs,
+ self.args,
)
}
fn const_for_param(&self, p: ParamConst, source_ct: ty::Const<'tcx>) -> ty::Const<'tcx> {
- // Look up the const in the substitutions. It really should be in there.
- let opt_ct = self.substs.get(p.index as usize).map(|k| k.unpack());
+ // Look up the const in the args. It really should be in there.
+ let opt_ct = self.args.get(p.index as usize).map(|k| k.unpack());
let ct = match opt_ct {
Some(GenericArgKind::Const(ct)) => ct,
Some(kind) => self.const_param_expected(p, source_ct, kind),
@@ -933,12 +927,12 @@ impl<'a, 'tcx> SubstFolder<'a, 'tcx> {
kind: GenericArgKind<'tcx>,
) -> ! {
bug!(
- "expected const for `{:?}` ({:?}/{}) but found {:?} when substituting substs={:?}",
+ "expected const for `{:?}` ({:?}/{}) but found {:?} when substituting args={:?}",
p,
ct,
p.index,
kind,
- self.substs,
+ self.args,
)
}
@@ -946,11 +940,11 @@ impl<'a, 'tcx> SubstFolder<'a, 'tcx> {
#[inline(never)]
fn const_param_out_of_range(&self, p: ty::ParamConst, ct: ty::Const<'tcx>) -> ! {
bug!(
- "const parameter `{:?}` ({:?}/{}) out of range when substituting substs={:?}",
+ "const parameter `{:?}` ({:?}/{}) out of range when substituting args={:?}",
p,
ct,
p.index,
- self.substs,
+ self.args,
)
}
@@ -1022,13 +1016,13 @@ impl<'a, 'tcx> SubstFolder<'a, 'tcx> {
}
}
-/// Stores the user-given substs to reach some fully qualified path
+/// Stores the user-given args to reach some fully qualified path
/// (e.g., `<T>::Item` or `<T as Trait>::Item`).
#[derive(Copy, Clone, Debug, PartialEq, Eq, Hash, TyEncodable, TyDecodable)]
#[derive(HashStable, TypeFoldable, TypeVisitable, Lift)]
-pub struct UserSubsts<'tcx> {
- /// The substitutions for the item as given by the user.
- pub substs: SubstsRef<'tcx>,
+pub struct UserArgs<'tcx> {
+ /// The args for the item as given by the user.
+ pub args: GenericArgsRef<'tcx>,
/// The self type, in the case of a `<T>::Item` path (when applied
/// to an inherent impl). See `UserSelfTy` below.
@@ -1048,7 +1042,7 @@ pub struct UserSubsts<'tcx> {
/// when you then have a path like `<Foo<&'static u32>>::method`,
/// this struct would carry the `DefId` of the impl along with the
/// self type `Foo<u32>`. Then we can instantiate the parameters of
-/// the impl (with the substs from `UserSubsts`) and apply those to
+/// the impl (with the args from `UserArgs`) and apply those to
/// the self type, giving `Foo<?A>`. Finally, we unify that with
/// the self type here, which contains `?A` to be `&'static u32`
#[derive(Copy, Clone, Debug, PartialEq, Eq, Hash, TyEncodable, TyDecodable)]
diff --git a/compiler/rustc_middle/src/ty/generics.rs b/compiler/rustc_middle/src/ty/generics.rs
index 6c7125c4c..70a35f137 100644
--- a/compiler/rustc_middle/src/ty/generics.rs
+++ b/compiler/rustc_middle/src/ty/generics.rs
@@ -1,5 +1,5 @@
use crate::ty;
-use crate::ty::{EarlyBinder, SubstsRef};
+use crate::ty::{EarlyBinder, GenericArgsRef};
use rustc_ast as ast;
use rustc_data_structures::fx::FxHashMap;
use rustc_hir::def_id::DefId;
@@ -97,14 +97,14 @@ impl GenericParamDef {
pub fn to_error<'tcx>(
&self,
tcx: TyCtxt<'tcx>,
- preceding_substs: &[ty::GenericArg<'tcx>],
+ preceding_args: &[ty::GenericArg<'tcx>],
) -> ty::GenericArg<'tcx> {
match &self.kind {
ty::GenericParamDefKind::Lifetime => ty::Region::new_error_misc(tcx).into(),
ty::GenericParamDefKind::Type { .. } => Ty::new_misc_error(tcx).into(),
ty::GenericParamDefKind::Const { .. } => ty::Const::new_misc_error(
tcx,
- tcx.type_of(self.def_id).subst(tcx, preceding_substs),
+ tcx.type_of(self.def_id).instantiate(tcx, preceding_args),
)
.into(),
}
@@ -136,7 +136,7 @@ pub struct Generics {
pub has_self: bool,
pub has_late_bound_regions: Option<Span>,
- // The index of the host effect when substituted. (i.e. might be index to parent substs)
+ // The index of the host effect when substituted. (i.e. might be index to parent args)
pub host_effect_index: Option<usize>,
}
@@ -278,14 +278,14 @@ impl<'tcx> Generics {
})
}
- /// Returns the substs corresponding to the generic parameters
+ /// Returns the args corresponding to the generic parameters
/// of this item, excluding `Self`.
///
/// **This should only be used for diagnostics purposes.**
- pub fn own_substs_no_defaults(
+ pub fn own_args_no_defaults(
&'tcx self,
tcx: TyCtxt<'tcx>,
- substs: &'tcx [ty::GenericArg<'tcx>],
+ args: &'tcx [ty::GenericArg<'tcx>],
) -> &'tcx [ty::GenericArg<'tcx>] {
let mut own_params = self.parent_count..self.count();
if self.has_self && self.parent.is_none() {
@@ -304,22 +304,22 @@ impl<'tcx> Generics {
.rev()
.take_while(|param| {
param.default_value(tcx).is_some_and(|default| {
- default.subst(tcx, substs) == substs[param.index as usize]
+ default.instantiate(tcx, args) == args[param.index as usize]
})
})
.count();
- &substs[own_params]
+ &args[own_params]
}
- /// Returns the substs corresponding to the generic parameters of this item, excluding `Self`.
+ /// Returns the args corresponding to the generic parameters of this item, excluding `Self`.
///
/// **This should only be used for diagnostics purposes.**
- pub fn own_substs(
+ pub fn own_args(
&'tcx self,
- substs: &'tcx [ty::GenericArg<'tcx>],
+ args: &'tcx [ty::GenericArg<'tcx>],
) -> &'tcx [ty::GenericArg<'tcx>] {
- let own = &substs[self.parent_count..][..self.params.len()];
+ let own = &args[self.parent_count..][..self.params.len()];
if self.has_self && self.parent.is_none() { &own[1..] } else { &own }
}
}
@@ -335,19 +335,19 @@ impl<'tcx> GenericPredicates<'tcx> {
pub fn instantiate(
&self,
tcx: TyCtxt<'tcx>,
- substs: SubstsRef<'tcx>,
+ args: GenericArgsRef<'tcx>,
) -> InstantiatedPredicates<'tcx> {
let mut instantiated = InstantiatedPredicates::empty();
- self.instantiate_into(tcx, &mut instantiated, substs);
+ self.instantiate_into(tcx, &mut instantiated, args);
instantiated
}
pub fn instantiate_own(
&self,
tcx: TyCtxt<'tcx>,
- substs: SubstsRef<'tcx>,
+ args: GenericArgsRef<'tcx>,
) -> impl Iterator<Item = (Clause<'tcx>, Span)> + DoubleEndedIterator + ExactSizeIterator {
- EarlyBinder::bind(self.predicates).subst_iter_copied(tcx, substs)
+ EarlyBinder::bind(self.predicates).iter_instantiated_copied(tcx, args)
}
#[instrument(level = "debug", skip(self, tcx))]
@@ -355,14 +355,14 @@ impl<'tcx> GenericPredicates<'tcx> {
&self,
tcx: TyCtxt<'tcx>,
instantiated: &mut InstantiatedPredicates<'tcx>,
- substs: SubstsRef<'tcx>,
+ args: GenericArgsRef<'tcx>,
) {
if let Some(def_id) = self.parent {
- tcx.predicates_of(def_id).instantiate_into(tcx, instantiated, substs);
+ tcx.predicates_of(def_id).instantiate_into(tcx, instantiated, args);
}
- instantiated
- .predicates
- .extend(self.predicates.iter().map(|(p, _)| EarlyBinder::bind(*p).subst(tcx, substs)));
+ instantiated.predicates.extend(
+ self.predicates.iter().map(|(p, _)| EarlyBinder::bind(*p).instantiate(tcx, args)),
+ );
instantiated.spans.extend(self.predicates.iter().map(|(_, sp)| *sp));
}
diff --git a/compiler/rustc_middle/src/ty/impls_ty.rs b/compiler/rustc_middle/src/ty/impls_ty.rs
index 02baa395c..b03874a90 100644
--- a/compiler/rustc_middle/src/ty/impls_ty.rs
+++ b/compiler/rustc_middle/src/ty/impls_ty.rs
@@ -67,7 +67,7 @@ impl<'a> ToStableHashKey<StableHashingContext<'a>> for SimplifiedType {
}
}
-impl<'a, 'tcx> HashStable<StableHashingContext<'a>> for ty::subst::GenericArg<'tcx> {
+impl<'a, 'tcx> HashStable<StableHashingContext<'a>> for ty::GenericArg<'tcx> {
fn hash_stable(&self, hcx: &mut StableHashingContext<'a>, hasher: &mut StableHasher) {
self.unpack().hash_stable(hcx, hasher);
}
diff --git a/compiler/rustc_middle/src/ty/inhabitedness/inhabited_predicate.rs b/compiler/rustc_middle/src/ty/inhabitedness/inhabited_predicate.rs
index 295cb1464..f278cace9 100644
--- a/compiler/rustc_middle/src/ty/inhabitedness/inhabited_predicate.rs
+++ b/compiler/rustc_middle/src/ty/inhabitedness/inhabited_predicate.rs
@@ -19,7 +19,7 @@ pub enum InhabitedPredicate<'tcx> {
/// type has restricted visibility.
NotInModule(DefId),
/// Inhabited if some generic type is inhabited.
- /// These are replaced by calling [`Self::subst`].
+ /// These are replaced by calling [`Self::instantiate`].
GenericType(Ty<'tcx>),
/// A AND B
And(&'tcx [InhabitedPredicate<'tcx>; 2]),
@@ -162,14 +162,14 @@ impl<'tcx> InhabitedPredicate<'tcx> {
}
/// Replaces generic types with its corresponding predicate
- pub fn subst(self, tcx: TyCtxt<'tcx>, substs: ty::SubstsRef<'tcx>) -> Self {
- self.subst_opt(tcx, substs).unwrap_or(self)
+ pub fn instantiate(self, tcx: TyCtxt<'tcx>, args: ty::GenericArgsRef<'tcx>) -> Self {
+ self.instantiate_opt(tcx, args).unwrap_or(self)
}
- fn subst_opt(self, tcx: TyCtxt<'tcx>, substs: ty::SubstsRef<'tcx>) -> Option<Self> {
+ fn instantiate_opt(self, tcx: TyCtxt<'tcx>, args: ty::GenericArgsRef<'tcx>) -> Option<Self> {
match self {
Self::ConstIsZero(c) => {
- let c = ty::EarlyBinder::bind(c).subst(tcx, substs);
+ let c = ty::EarlyBinder::bind(c).instantiate(tcx, args);
let pred = match c.try_to_target_usize(tcx) {
Some(0) => Self::True,
Some(1..) => Self::False,
@@ -178,17 +178,17 @@ impl<'tcx> InhabitedPredicate<'tcx> {
Some(pred)
}
Self::GenericType(t) => {
- Some(ty::EarlyBinder::bind(t).subst(tcx, substs).inhabited_predicate(tcx))
+ Some(ty::EarlyBinder::bind(t).instantiate(tcx, args).inhabited_predicate(tcx))
}
- Self::And(&[a, b]) => match a.subst_opt(tcx, substs) {
- None => b.subst_opt(tcx, substs).map(|b| a.and(tcx, b)),
+ Self::And(&[a, b]) => match a.instantiate_opt(tcx, args) {
+ None => b.instantiate_opt(tcx, args).map(|b| a.and(tcx, b)),
Some(InhabitedPredicate::False) => Some(InhabitedPredicate::False),
- Some(a) => Some(a.and(tcx, b.subst_opt(tcx, substs).unwrap_or(b))),
+ Some(a) => Some(a.and(tcx, b.instantiate_opt(tcx, args).unwrap_or(b))),
},
- Self::Or(&[a, b]) => match a.subst_opt(tcx, substs) {
- None => b.subst_opt(tcx, substs).map(|b| a.or(tcx, b)),
+ Self::Or(&[a, b]) => match a.instantiate_opt(tcx, args) {
+ None => b.instantiate_opt(tcx, args).map(|b| a.or(tcx, b)),
Some(InhabitedPredicate::True) => Some(InhabitedPredicate::True),
- Some(a) => Some(a.or(tcx, b.subst_opt(tcx, substs).unwrap_or(b))),
+ Some(a) => Some(a.or(tcx, b.instantiate_opt(tcx, args).unwrap_or(b))),
},
_ => None,
}
diff --git a/compiler/rustc_middle/src/ty/inhabitedness/mod.rs b/compiler/rustc_middle/src/ty/inhabitedness/mod.rs
index b92d84152..4dac6891b 100644
--- a/compiler/rustc_middle/src/ty/inhabitedness/mod.rs
+++ b/compiler/rustc_middle/src/ty/inhabitedness/mod.rs
@@ -58,7 +58,7 @@ pub(crate) fn provide(providers: &mut Providers) {
}
/// Returns an `InhabitedPredicate` that is generic over type parameters and
-/// requires calling [`InhabitedPredicate::subst`]
+/// requires calling [`InhabitedPredicate::instantiate`]
fn inhabited_predicate_adt(tcx: TyCtxt<'_>, def_id: DefId) -> InhabitedPredicate<'_> {
if let Some(def_id) = def_id.as_local() {
if matches!(tcx.representability(def_id), ty::Representability::Infinite) {
@@ -87,7 +87,7 @@ impl<'tcx> VariantDef {
InhabitedPredicate::all(
tcx,
self.fields.iter().map(|field| {
- let pred = tcx.type_of(field.did).subst_identity().inhabited_predicate(tcx);
+ let pred = tcx.type_of(field.did).instantiate_identity().inhabited_predicate(tcx);
if adt.is_enum() {
return pred;
}
@@ -114,8 +114,8 @@ impl<'tcx> Ty<'tcx> {
Never => InhabitedPredicate::False,
Param(_) | Alias(ty::Projection, _) => InhabitedPredicate::GenericType(self),
// FIXME(inherent_associated_types): Most likely we can just map to `GenericType` like above.
- // However it's unclear if the substs passed to `InhabitedPredicate::subst` are of the correct
- // format, i.e. don't contain parent substs. If you hit this case, please verify this beforehand.
+ // However it's unclear if the args passed to `InhabitedPredicate::instantiate` are of the correct
+ // format, i.e. don't contain parent args. If you hit this case, please verify this beforehand.
Alias(ty::Inherent, _) => {
bug!("unimplemented: inhabitedness checking for inherent projections")
}
@@ -189,7 +189,7 @@ impl<'tcx> Ty<'tcx> {
/// N.B. this query should only be called through `Ty::inhabited_predicate`
fn inhabited_predicate_type<'tcx>(tcx: TyCtxt<'tcx>, ty: Ty<'tcx>) -> InhabitedPredicate<'tcx> {
match *ty.kind() {
- Adt(adt, substs) => tcx.inhabited_predicate_adt(adt.did()).subst(tcx, substs),
+ Adt(adt, args) => tcx.inhabited_predicate_adt(adt.did()).instantiate(tcx, args),
Tuple(tys) => {
InhabitedPredicate::all(tcx, tys.iter().map(|ty| ty.inhabited_predicate(tcx)))
diff --git a/compiler/rustc_middle/src/ty/instance.rs b/compiler/rustc_middle/src/ty/instance.rs
index ae57e954f..8913bf76d 100644
--- a/compiler/rustc_middle/src/ty/instance.rs
+++ b/compiler/rustc_middle/src/ty/instance.rs
@@ -1,7 +1,7 @@
use crate::middle::codegen_fn_attrs::CodegenFnAttrFlags;
use crate::ty::print::{FmtPrinter, Printer};
use crate::ty::{self, Ty, TyCtxt, TypeFoldable, TypeSuperFoldable};
-use crate::ty::{EarlyBinder, InternalSubsts, SubstsRef, TypeVisitableExt};
+use crate::ty::{EarlyBinder, GenericArgs, GenericArgsRef, TypeVisitableExt};
use rustc_errors::ErrorGuaranteed;
use rustc_hir::def::Namespace;
use rustc_hir::def_id::{CrateNum, DefId};
@@ -16,13 +16,13 @@ use std::fmt;
/// A monomorphized `InstanceDef`.
///
/// Monomorphization happens on-the-fly and no monomorphized MIR is ever created. Instead, this type
-/// simply couples a potentially generic `InstanceDef` with some substs, and codegen and const eval
+/// simply couples a potentially generic `InstanceDef` with some args, and codegen and const eval
/// will do all required substitution as they run.
#[derive(Copy, Clone, PartialEq, Eq, Hash, Debug, TyEncodable, TyDecodable)]
#[derive(HashStable, Lift, TypeFoldable, TypeVisitable)]
pub struct Instance<'tcx> {
pub def: InstanceDef<'tcx>,
- pub substs: SubstsRef<'tcx>,
+ pub args: GenericArgsRef<'tcx>,
}
#[derive(Copy, Clone, PartialEq, Eq, PartialOrd, Ord, Hash, Debug)]
@@ -115,7 +115,7 @@ impl<'tcx> Instance<'tcx> {
/// lifetimes erased, allowing a `ParamEnv` to be specified for use during normalization.
pub fn ty(&self, tcx: TyCtxt<'tcx>, param_env: ty::ParamEnv<'tcx>) -> Ty<'tcx> {
let ty = tcx.type_of(self.def.def_id());
- tcx.subst_and_normalize_erasing_regions(self.substs, param_env, ty)
+ tcx.subst_and_normalize_erasing_regions(self.args, param_env, ty)
}
/// Finds a crate that contains a monomorphization of this instance that
@@ -139,13 +139,13 @@ impl<'tcx> Instance<'tcx> {
}
// If this a non-generic instance, it cannot be a shared monomorphization.
- self.substs.non_erasable_generics().next()?;
+ self.args.non_erasable_generics().next()?;
match self.def {
InstanceDef::Item(def) => tcx
.upstream_monomorphizations_for(def)
- .and_then(|monos| monos.get(&self.substs).cloned()),
- InstanceDef::DropGlue(_, Some(_)) => tcx.upstream_drop_glue_for(self.substs),
+ .and_then(|monos| monos.get(&self.args).cloned()),
+ InstanceDef::DropGlue(_, Some(_)) => tcx.upstream_drop_glue_for(self.args),
_ => None,
}
}
@@ -265,8 +265,8 @@ impl<'tcx> InstanceDef<'tcx> {
}
/// Returns `true` when the MIR body associated with this instance should be monomorphized
- /// by its users (e.g. codegen or miri) by substituting the `substs` from `Instance` (see
- /// `Instance::substs_for_mir_body`).
+ /// by its users (e.g. codegen or miri) by substituting the `args` from `Instance` (see
+ /// `Instance::args_for_mir_body`).
///
/// Otherwise, returns `false` only for some kinds of shims where the construction of the MIR
/// body should perform necessary substitutions.
@@ -294,10 +294,10 @@ fn fmt_instance(
type_length: rustc_session::Limit,
) -> fmt::Result {
ty::tls::with(|tcx| {
- let substs = tcx.lift(instance.substs).expect("could not lift for printing");
+ let args = tcx.lift(instance.args).expect("could not lift for printing");
let s = FmtPrinter::new_with_limit(tcx, Namespace::ValueNS, type_length)
- .print_def_path(instance.def_id(), substs)?
+ .print_def_path(instance.def_id(), args)?
.into_buffer();
f.write_str(&s)
})?;
@@ -308,13 +308,13 @@ fn fmt_instance(
InstanceDef::ReifyShim(_) => write!(f, " - shim(reify)"),
InstanceDef::ThreadLocalShim(_) => write!(f, " - shim(tls)"),
InstanceDef::Intrinsic(_) => write!(f, " - intrinsic"),
- InstanceDef::Virtual(_, num) => write!(f, " - virtual#{}", num),
- InstanceDef::FnPtrShim(_, ty) => write!(f, " - shim({})", ty),
+ InstanceDef::Virtual(_, num) => write!(f, " - virtual#{num}"),
+ InstanceDef::FnPtrShim(_, ty) => write!(f, " - shim({ty})"),
InstanceDef::ClosureOnceShim { .. } => write!(f, " - shim"),
InstanceDef::DropGlue(_, None) => write!(f, " - shim(None)"),
- InstanceDef::DropGlue(_, Some(ty)) => write!(f, " - shim(Some({}))", ty),
- InstanceDef::CloneShim(_, ty) => write!(f, " - shim({})", ty),
- InstanceDef::FnPtrAddrShim(_, ty) => write!(f, " - shim({})", ty),
+ InstanceDef::DropGlue(_, Some(ty)) => write!(f, " - shim(Some({ty}))"),
+ InstanceDef::CloneShim(_, ty) => write!(f, " - shim({ty})"),
+ InstanceDef::FnPtrAddrShim(_, ty) => write!(f, " - shim({ty})"),
}
}
@@ -333,18 +333,16 @@ impl<'tcx> fmt::Display for Instance<'tcx> {
}
impl<'tcx> Instance<'tcx> {
- pub fn new(def_id: DefId, substs: SubstsRef<'tcx>) -> Instance<'tcx> {
+ pub fn new(def_id: DefId, args: GenericArgsRef<'tcx>) -> Instance<'tcx> {
assert!(
- !substs.has_escaping_bound_vars(),
- "substs of instance {:?} not normalized for codegen: {:?}",
- def_id,
- substs
+ !args.has_escaping_bound_vars(),
+ "args of instance {def_id:?} not normalized for codegen: {args:?}"
);
- Instance { def: InstanceDef::Item(def_id), substs }
+ Instance { def: InstanceDef::Item(def_id), args }
}
pub fn mono(tcx: TyCtxt<'tcx>, def_id: DefId) -> Instance<'tcx> {
- let substs = InternalSubsts::for_item(tcx, def_id, |param, _| match param.kind {
+ let args = GenericArgs::for_item(tcx, def_id, |param, _| match param.kind {
ty::GenericParamDefKind::Lifetime => tcx.lifetimes.re_erased.into(),
ty::GenericParamDefKind::Type { .. } => {
bug!("Instance::mono: {:?} has type parameters", def_id)
@@ -354,7 +352,7 @@ impl<'tcx> Instance<'tcx> {
}
});
- Instance::new(def_id, substs)
+ Instance::new(def_id, args)
}
#[inline]
@@ -362,7 +360,7 @@ impl<'tcx> Instance<'tcx> {
self.def.def_id()
}
- /// Resolves a `(def_id, substs)` pair to an (optional) instance -- most commonly,
+ /// Resolves a `(def_id, args)` pair to an (optional) instance -- most commonly,
/// this is used to find the precise code that will run for a trait method invocation,
/// if known.
///
@@ -390,29 +388,29 @@ impl<'tcx> Instance<'tcx> {
tcx: TyCtxt<'tcx>,
param_env: ty::ParamEnv<'tcx>,
def_id: DefId,
- substs: SubstsRef<'tcx>,
+ args: GenericArgsRef<'tcx>,
) -> Result<Option<Instance<'tcx>>, ErrorGuaranteed> {
// All regions in the result of this query are erased, so it's
// fine to erase all of the input regions.
- // HACK(eddyb) erase regions in `substs` first, so that `param_env.and(...)`
+ // HACK(eddyb) erase regions in `args` first, so that `param_env.and(...)`
// below is more likely to ignore the bounds in scope (e.g. if the only
- // generic parameters mentioned by `substs` were lifetime ones).
- let substs = tcx.erase_regions(substs);
- tcx.resolve_instance(tcx.erase_regions(param_env.and((def_id, substs))))
+ // generic parameters mentioned by `args` were lifetime ones).
+ let args = tcx.erase_regions(args);
+ tcx.resolve_instance(tcx.erase_regions(param_env.and((def_id, args))))
}
pub fn expect_resolve(
tcx: TyCtxt<'tcx>,
param_env: ty::ParamEnv<'tcx>,
def_id: DefId,
- substs: SubstsRef<'tcx>,
+ args: GenericArgsRef<'tcx>,
) -> Instance<'tcx> {
- match ty::Instance::resolve(tcx, param_env, def_id, substs) {
+ match ty::Instance::resolve(tcx, param_env, def_id, args) {
Ok(Some(instance)) => instance,
instance => bug!(
"failed to resolve instance for {}: {instance:#?}",
- tcx.def_path_str_with_substs(def_id, substs)
+ tcx.def_path_str_with_args(def_id, args)
),
}
}
@@ -421,12 +419,12 @@ impl<'tcx> Instance<'tcx> {
tcx: TyCtxt<'tcx>,
param_env: ty::ParamEnv<'tcx>,
def_id: DefId,
- substs: SubstsRef<'tcx>,
+ args: GenericArgsRef<'tcx>,
) -> Option<Instance<'tcx>> {
- debug!("resolve(def_id={:?}, substs={:?})", def_id, substs);
+ debug!("resolve(def_id={:?}, args={:?})", def_id, args);
// Use either `resolve_closure` or `resolve_for_vtable`
- assert!(!tcx.is_closure(def_id), "Called `resolve_for_fn_ptr` on closure: {:?}", def_id);
- Instance::resolve(tcx, param_env, def_id, substs).ok().flatten().map(|mut resolved| {
+ assert!(!tcx.is_closure(def_id), "Called `resolve_for_fn_ptr` on closure: {def_id:?}");
+ Instance::resolve(tcx, param_env, def_id, args).ok().flatten().map(|mut resolved| {
match resolved.def {
InstanceDef::Item(def) if resolved.def.requires_caller_location(tcx) => {
debug!(" => fn pointer created for function with #[track_caller]");
@@ -447,18 +445,18 @@ impl<'tcx> Instance<'tcx> {
tcx: TyCtxt<'tcx>,
param_env: ty::ParamEnv<'tcx>,
def_id: DefId,
- substs: SubstsRef<'tcx>,
+ args: GenericArgsRef<'tcx>,
) -> Option<Instance<'tcx>> {
- debug!("resolve_for_vtable(def_id={:?}, substs={:?})", def_id, substs);
- let fn_sig = tcx.fn_sig(def_id).subst_identity();
+ debug!("resolve_for_vtable(def_id={:?}, args={:?})", def_id, args);
+ let fn_sig = tcx.fn_sig(def_id).instantiate_identity();
let is_vtable_shim = !fn_sig.inputs().skip_binder().is_empty()
&& fn_sig.input(0).skip_binder().is_param(0)
&& tcx.generics_of(def_id).has_self;
if is_vtable_shim {
debug!(" => associated item with unsizeable self: Self");
- Some(Instance { def: InstanceDef::VTableShim(def_id), substs })
+ Some(Instance { def: InstanceDef::VTableShim(def_id), args })
} else {
- Instance::resolve(tcx, param_env, def_id, substs).ok().flatten().map(|mut resolved| {
+ Instance::resolve(tcx, param_env, def_id, args).ok().flatten().map(|mut resolved| {
match resolved.def {
InstanceDef::Item(def) => {
// We need to generate a shim when we cannot guarantee that
@@ -489,12 +487,12 @@ impl<'tcx> Instance<'tcx> {
{
if tcx.is_closure(def) {
debug!(" => vtable fn pointer created for closure with #[track_caller]: {:?} for method {:?} {:?}",
- def, def_id, substs);
+ def, def_id, args);
// Create a shim for the `FnOnce/FnMut/Fn` method we are calling
// - unlike functions, invoking a closure always goes through a
// trait.
- resolved = Instance { def: InstanceDef::ReifyShim(def_id), substs };
+ resolved = Instance { def: InstanceDef::ReifyShim(def_id), args };
} else {
debug!(
" => vtable fn pointer created for function with #[track_caller]: {:?}", def
@@ -518,28 +516,28 @@ impl<'tcx> Instance<'tcx> {
pub fn resolve_closure(
tcx: TyCtxt<'tcx>,
def_id: DefId,
- substs: ty::SubstsRef<'tcx>,
+ args: ty::GenericArgsRef<'tcx>,
requested_kind: ty::ClosureKind,
) -> Option<Instance<'tcx>> {
- let actual_kind = substs.as_closure().kind();
+ let actual_kind = args.as_closure().kind();
match needs_fn_once_adapter_shim(actual_kind, requested_kind) {
- Ok(true) => Instance::fn_once_adapter_instance(tcx, def_id, substs),
- _ => Some(Instance::new(def_id, substs)),
+ Ok(true) => Instance::fn_once_adapter_instance(tcx, def_id, args),
+ _ => Some(Instance::new(def_id, args)),
}
}
pub fn resolve_drop_in_place(tcx: TyCtxt<'tcx>, ty: Ty<'tcx>) -> ty::Instance<'tcx> {
let def_id = tcx.require_lang_item(LangItem::DropInPlace, None);
- let substs = tcx.mk_substs(&[ty.into()]);
- Instance::expect_resolve(tcx, ty::ParamEnv::reveal_all(), def_id, substs)
+ let args = tcx.mk_args(&[ty.into()]);
+ Instance::expect_resolve(tcx, ty::ParamEnv::reveal_all(), def_id, args)
}
#[instrument(level = "debug", skip(tcx), ret)]
pub fn fn_once_adapter_instance(
tcx: TyCtxt<'tcx>,
closure_did: DefId,
- substs: ty::SubstsRef<'tcx>,
+ args: ty::GenericArgsRef<'tcx>,
) -> Option<Instance<'tcx>> {
let fn_once = tcx.require_lang_item(LangItem::FnOnce, None);
let call_once = tcx
@@ -552,30 +550,30 @@ impl<'tcx> Instance<'tcx> {
tcx.codegen_fn_attrs(closure_did).flags.contains(CodegenFnAttrFlags::TRACK_CALLER);
let def = ty::InstanceDef::ClosureOnceShim { call_once, track_caller };
- let self_ty = Ty::new_closure(tcx, closure_did, substs);
+ let self_ty = Ty::new_closure(tcx, closure_did, args);
- let sig = substs.as_closure().sig();
+ let sig = args.as_closure().sig();
let sig =
tcx.try_normalize_erasing_late_bound_regions(ty::ParamEnv::reveal_all(), sig).ok()?;
assert_eq!(sig.inputs().len(), 1);
- let substs = tcx.mk_substs_trait(self_ty, [sig.inputs()[0].into()]);
+ let args = tcx.mk_args_trait(self_ty, [sig.inputs()[0].into()]);
debug!(?self_ty, ?sig);
- Some(Instance { def, substs })
+ Some(Instance { def, args })
}
/// Depending on the kind of `InstanceDef`, the MIR body associated with an
/// instance is expressed in terms of the generic parameters of `self.def_id()`, and in other
/// cases the MIR body is expressed in terms of the types found in the substitution array.
/// In the former case, we want to substitute those generic types and replace them with the
- /// values from the substs when monomorphizing the function body. But in the latter case, we
+ /// values from the args when monomorphizing the function body. But in the latter case, we
/// don't want to do that substitution, since it has already been done effectively.
///
- /// This function returns `Some(substs)` in the former case and `None` otherwise -- i.e., if
+ /// This function returns `Some(args)` in the former case and `None` otherwise -- i.e., if
/// this function returns `None`, then the MIR body does not require substitution during
/// codegen.
- fn substs_for_mir_body(&self) -> Option<SubstsRef<'tcx>> {
- self.def.has_polymorphic_mir_body().then_some(self.substs)
+ fn args_for_mir_body(&self) -> Option<GenericArgsRef<'tcx>> {
+ self.def.has_polymorphic_mir_body().then_some(self.args)
}
pub fn subst_mir<T>(&self, tcx: TyCtxt<'tcx>, v: EarlyBinder<&T>) -> T
@@ -583,10 +581,10 @@ impl<'tcx> Instance<'tcx> {
T: TypeFoldable<TyCtxt<'tcx>> + Copy,
{
let v = v.map_bound(|v| *v);
- if let Some(substs) = self.substs_for_mir_body() {
- v.subst(tcx, substs)
+ if let Some(args) = self.args_for_mir_body() {
+ v.instantiate(tcx, args)
} else {
- v.subst_identity()
+ v.instantiate_identity()
}
}
@@ -600,8 +598,8 @@ impl<'tcx> Instance<'tcx> {
where
T: TypeFoldable<TyCtxt<'tcx>> + Clone,
{
- if let Some(substs) = self.substs_for_mir_body() {
- tcx.subst_and_normalize_erasing_regions(substs, param_env, v)
+ if let Some(args) = self.args_for_mir_body() {
+ tcx.subst_and_normalize_erasing_regions(args, param_env, v)
} else {
tcx.normalize_erasing_regions(param_env, v.skip_binder())
}
@@ -617,14 +615,14 @@ impl<'tcx> Instance<'tcx> {
where
T: TypeFoldable<TyCtxt<'tcx>> + Clone,
{
- if let Some(substs) = self.substs_for_mir_body() {
- tcx.try_subst_and_normalize_erasing_regions(substs, param_env, v)
+ if let Some(args) = self.args_for_mir_body() {
+ tcx.try_subst_and_normalize_erasing_regions(args, param_env, v)
} else {
tcx.try_normalize_erasing_regions(param_env, v.skip_binder())
}
}
- /// Returns a new `Instance` where generic parameters in `instance.substs` are replaced by
+ /// Returns a new `Instance` where generic parameters in `instance.args` are replaced by
/// identity parameters if they are determined to be unused in `instance.def`.
pub fn polymorphize(self, tcx: TyCtxt<'tcx>) -> Self {
debug!("polymorphize: running polymorphization analysis");
@@ -632,18 +630,18 @@ impl<'tcx> Instance<'tcx> {
return self;
}
- let polymorphized_substs = polymorphize(tcx, self.def, self.substs);
- debug!("polymorphize: self={:?} polymorphized_substs={:?}", self, polymorphized_substs);
- Self { def: self.def, substs: polymorphized_substs }
+ let polymorphized_args = polymorphize(tcx, self.def, self.args);
+ debug!("polymorphize: self={:?} polymorphized_args={:?}", self, polymorphized_args);
+ Self { def: self.def, args: polymorphized_args }
}
}
fn polymorphize<'tcx>(
tcx: TyCtxt<'tcx>,
instance: ty::InstanceDef<'tcx>,
- substs: SubstsRef<'tcx>,
-) -> SubstsRef<'tcx> {
- debug!("polymorphize({:?}, {:?})", instance, substs);
+ args: GenericArgsRef<'tcx>,
+) -> GenericArgsRef<'tcx> {
+ debug!("polymorphize({:?}, {:?})", instance, args);
let unused = tcx.unused_generic_params(instance);
debug!("polymorphize: unused={:?}", unused);
@@ -653,9 +651,9 @@ fn polymorphize<'tcx>(
// multiple mono items (and eventually symbol clashes).
let def_id = instance.def_id();
let upvars_ty = if tcx.is_closure(def_id) {
- Some(substs.as_closure().tupled_upvars_ty())
+ Some(args.as_closure().tupled_upvars_ty())
} else if tcx.type_of(def_id).skip_binder().is_generator() {
- Some(substs.as_generator().tupled_upvars_ty())
+ Some(args.as_generator().tupled_upvars_ty())
} else {
None
};
@@ -674,22 +672,22 @@ fn polymorphize<'tcx>(
fn fold_ty(&mut self, ty: Ty<'tcx>) -> Ty<'tcx> {
debug!("fold_ty: ty={:?}", ty);
match *ty.kind() {
- ty::Closure(def_id, substs) => {
- let polymorphized_substs =
- polymorphize(self.tcx, ty::InstanceDef::Item(def_id), substs);
- if substs == polymorphized_substs {
+ ty::Closure(def_id, args) => {
+ let polymorphized_args =
+ polymorphize(self.tcx, ty::InstanceDef::Item(def_id), args);
+ if args == polymorphized_args {
ty
} else {
- Ty::new_closure(self.tcx, def_id, polymorphized_substs)
+ Ty::new_closure(self.tcx, def_id, polymorphized_args)
}
}
- ty::Generator(def_id, substs, movability) => {
- let polymorphized_substs =
- polymorphize(self.tcx, ty::InstanceDef::Item(def_id), substs);
- if substs == polymorphized_substs {
+ ty::Generator(def_id, args, movability) => {
+ let polymorphized_args =
+ polymorphize(self.tcx, ty::InstanceDef::Item(def_id), args);
+ if args == polymorphized_args {
ty
} else {
- Ty::new_generator(self.tcx, def_id, polymorphized_substs, movability)
+ Ty::new_generator(self.tcx, def_id, polymorphized_args, movability)
}
}
_ => ty.super_fold_with(self),
@@ -697,7 +695,7 @@ fn polymorphize<'tcx>(
}
}
- InternalSubsts::for_item(tcx, def_id, |param, _| {
+ GenericArgs::for_item(tcx, def_id, |param, _| {
let is_unused = unused.is_unused(param.index);
debug!("polymorphize: param={:?} is_unused={:?}", param, is_unused);
match param.kind {
@@ -706,7 +704,7 @@ fn polymorphize<'tcx>(
// ..and has upvars..
has_upvars &&
// ..and this param has the same type as the tupled upvars..
- upvars_ty == Some(substs[param.index as usize].expect_ty()) => {
+ upvars_ty == Some(args[param.index as usize].expect_ty()) => {
// ..then double-check that polymorphization marked it used..
debug_assert!(!is_unused);
// ..and polymorphize any closures/generators captured as upvars.
@@ -725,7 +723,7 @@ fn polymorphize<'tcx>(
tcx.mk_param_from_def(param),
// Otherwise, use the parameter as before.
- _ => substs[param.index as usize],
+ _ => args[param.index as usize],
}
})
}
diff --git a/compiler/rustc_middle/src/ty/layout.rs b/compiler/rustc_middle/src/ty/layout.rs
index d95b05ef7..e362b3477 100644
--- a/compiler/rustc_middle/src/ty/layout.rs
+++ b/compiler/rustc_middle/src/ty/layout.rs
@@ -10,7 +10,7 @@ use rustc_hir::def_id::DefId;
use rustc_index::IndexVec;
use rustc_session::config::OptLevel;
use rustc_span::symbol::{sym, Symbol};
-use rustc_span::{Span, DUMMY_SP};
+use rustc_span::{ErrorGuaranteed, Span, DUMMY_SP};
use rustc_target::abi::call::FnAbi;
use rustc_target::abi::*;
use rustc_target::spec::{abi::Abi as SpecAbi, HasTargetSpec, PanicStrategy, Target};
@@ -212,6 +212,7 @@ pub enum LayoutError<'tcx> {
Unknown(Ty<'tcx>),
SizeOverflow(Ty<'tcx>),
NormalizationFailure(Ty<'tcx>, NormalizationError<'tcx>),
+ ReferencesError(ErrorGuaranteed),
Cycle,
}
@@ -224,6 +225,7 @@ impl<'tcx> LayoutError<'tcx> {
SizeOverflow(_) => middle_values_too_big,
NormalizationFailure(_, _) => middle_cannot_be_normalized,
Cycle => middle_cycle,
+ ReferencesError(_) => middle_layout_references_error,
}
}
@@ -237,6 +239,7 @@ impl<'tcx> LayoutError<'tcx> {
E::NormalizationFailure { ty, failure_ty: e.get_type_for_failure() }
}
Cycle => E::Cycle,
+ ReferencesError(_) => E::ReferencesError,
}
}
}
@@ -246,9 +249,9 @@ impl<'tcx> LayoutError<'tcx> {
impl<'tcx> fmt::Display for LayoutError<'tcx> {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
match *self {
- LayoutError::Unknown(ty) => write!(f, "the type `{}` has an unknown layout", ty),
+ LayoutError::Unknown(ty) => write!(f, "the type `{ty}` has an unknown layout"),
LayoutError::SizeOverflow(ty) => {
- write!(f, "values of the type `{}` are too big for the current architecture", ty)
+ write!(f, "values of the type `{ty}` are too big for the current architecture")
}
LayoutError::NormalizationFailure(t, e) => write!(
f,
@@ -257,6 +260,7 @@ impl<'tcx> fmt::Display for LayoutError<'tcx> {
e.get_type_for_failure()
),
LayoutError::Cycle => write!(f, "a cycle occurred during layout computation"),
+ LayoutError::ReferencesError(_) => write!(f, "the type has an unknown layout"),
}
}
}
@@ -323,7 +327,8 @@ impl<'tcx> SizeSkeleton<'tcx> {
Err(
e @ LayoutError::Cycle
| e @ LayoutError::SizeOverflow(_)
- | e @ LayoutError::NormalizationFailure(..),
+ | e @ LayoutError::NormalizationFailure(..)
+ | e @ LayoutError::ReferencesError(_),
) => return Err(e),
};
@@ -374,7 +379,7 @@ impl<'tcx> SizeSkeleton<'tcx> {
}
}
- ty::Adt(def, substs) => {
+ ty::Adt(def, args) => {
// Only newtypes and enums w/ nullable pointer optimization.
if def.is_union() || def.variants().is_empty() || def.variants().len() > 2 {
return Err(err);
@@ -385,7 +390,7 @@ impl<'tcx> SizeSkeleton<'tcx> {
let i = VariantIdx::from_usize(i);
let fields =
def.variant(i).fields.iter().map(|field| {
- SizeSkeleton::compute(field.ty(tcx, substs), tcx, param_env)
+ SizeSkeleton::compute(field.ty(tcx, args), tcx, param_env)
});
let mut ptr = None;
for field in fields {
@@ -741,9 +746,9 @@ where
let fields = match this.ty.kind() {
ty::Adt(def, _) if def.variants().is_empty() =>
- bug!("for_variant called on zero-variant enum"),
+ bug!("for_variant called on zero-variant enum {}", this.ty),
ty::Adt(def, _) => def.variant(variant_index).fields.len(),
- _ => bug!(),
+ _ => bug!("`ty_and_layout_for_variant` on unexpected type {}", this.ty),
};
tcx.mk_layout(LayoutS {
variants: Variants::Single { index: variant_index },
@@ -755,6 +760,8 @@ where
largest_niche: None,
align: tcx.data_layout.i8_align,
size: Size::ZERO,
+ max_repr_align: None,
+ unadjusted_abi_align: tcx.data_layout.i8_align.abi,
})
}
@@ -861,9 +868,9 @@ where
// offers better information than `std::ptr::metadata::VTable`,
// and we rely on this layout information to trigger a panic in
// `std::mem::uninitialized::<&dyn Trait>()`, for example.
- if let ty::Adt(def, substs) = metadata.kind()
+ if let ty::Adt(def, args) = metadata.kind()
&& Some(def.did()) == tcx.lang_items().dyn_metadata()
- && substs.type_at(0).is_trait()
+ && args.type_at(0).is_trait()
{
mk_dyn_vtable()
} else {
@@ -885,16 +892,15 @@ where
ty::Str => TyMaybeWithLayout::Ty(tcx.types.u8),
// Tuples, generators and closures.
- ty::Closure(_, ref substs) => field_ty_or_layout(
- TyAndLayout { ty: substs.as_closure().tupled_upvars_ty(), ..this },
+ ty::Closure(_, ref args) => field_ty_or_layout(
+ TyAndLayout { ty: args.as_closure().tupled_upvars_ty(), ..this },
cx,
i,
),
- ty::Generator(def_id, ref substs, _) => match this.variants {
+ ty::Generator(def_id, ref args, _) => match this.variants {
Variants::Single { index } => TyMaybeWithLayout::Ty(
- substs
- .as_generator()
+ args.as_generator()
.state_tys(def_id, tcx)
.nth(index.as_usize())
.unwrap()
@@ -905,18 +911,18 @@ where
if i == tag_field {
return TyMaybeWithLayout::TyAndLayout(tag_layout(tag));
}
- TyMaybeWithLayout::Ty(substs.as_generator().prefix_tys().nth(i).unwrap())
+ TyMaybeWithLayout::Ty(args.as_generator().prefix_tys()[i])
}
},
ty::Tuple(tys) => TyMaybeWithLayout::Ty(tys[i]),
// ADTs.
- ty::Adt(def, substs) => {
+ ty::Adt(def, args) => {
match this.variants {
Variants::Single { index } => {
let field = &def.variant(index).fields[FieldIdx::from_usize(i)];
- TyMaybeWithLayout::Ty(field.ty(tcx, substs))
+ TyMaybeWithLayout::Ty(field.ty(tcx, args))
}
// Discriminant field for enums (where applicable).
@@ -1233,6 +1239,8 @@ pub fn fn_can_unwind(tcx: TyCtxt<'_>, fn_def_id: Option<DefId>, abi: SpecAbi) ->
| EfiApi
| AvrInterrupt
| AvrNonBlockingInterrupt
+ | RiscvInterruptM
+ | RiscvInterruptS
| CCmseNonSecureCall
| Wasm
| PlatformIntrinsic
diff --git a/compiler/rustc_middle/src/ty/list.rs b/compiler/rustc_middle/src/ty/list.rs
index 71911a5a6..7a32cfb10 100644
--- a/compiler/rustc_middle/src/ty/list.rs
+++ b/compiler/rustc_middle/src/ty/list.rs
@@ -1,6 +1,7 @@
use crate::arena::Arena;
use rustc_data_structures::aligned::{align_of, Aligned};
use rustc_serialize::{Encodable, Encoder};
+use rustc_type_ir::{InferCtxtLike, OptWithInfcx};
use std::alloc::Layout;
use std::cmp::Ordering;
use std::fmt;
@@ -119,6 +120,14 @@ impl<T: fmt::Debug> fmt::Debug for List<T> {
(**self).fmt(f)
}
}
+impl<'tcx, T: super::DebugWithInfcx<TyCtxt<'tcx>>> super::DebugWithInfcx<TyCtxt<'tcx>> for List<T> {
+ fn fmt<InfCtx: InferCtxtLike<TyCtxt<'tcx>>>(
+ this: OptWithInfcx<'_, TyCtxt<'tcx>, InfCtx, &Self>,
+ f: &mut core::fmt::Formatter<'_>,
+ ) -> core::fmt::Result {
+ fmt::Debug::fmt(&this.map(|this| this.as_slice()), f)
+ }
+}
impl<S: Encoder, T: Encodable<S>> Encodable<S> for List<T> {
#[inline]
@@ -202,6 +211,8 @@ unsafe impl<T: Sync> Sync for List<T> {}
// We need this since `List` uses extern type `OpaqueListContents`.
#[cfg(parallel_compiler)]
use rustc_data_structures::sync::DynSync;
+
+use super::TyCtxt;
#[cfg(parallel_compiler)]
unsafe impl<T: DynSync> DynSync for List<T> {}
diff --git a/compiler/rustc_middle/src/ty/mod.rs b/compiler/rustc_middle/src/ty/mod.rs
index aa8bfd317..1274f427e 100644
--- a/compiler/rustc_middle/src/ty/mod.rs
+++ b/compiler/rustc_middle/src/ty/mod.rs
@@ -28,6 +28,7 @@ use crate::ty::fast_reject::SimplifiedType;
use crate::ty::util::Discr;
pub use adt::*;
pub use assoc::*;
+pub use generic_args::*;
pub use generics::*;
use rustc_ast as ast;
use rustc_ast::node_id::NodeMap;
@@ -53,7 +54,7 @@ use rustc_span::symbol::{kw, sym, Ident, Symbol};
use rustc_span::{ExpnId, ExpnKind, Span};
use rustc_target::abi::{Align, FieldIdx, Integer, IntegerType, VariantIdx};
pub use rustc_target::abi::{ReprFlags, ReprOptions};
-pub use subst::*;
+pub use rustc_type_ir::{DebugWithInfcx, InferCtxtLike, OptWithInfcx};
pub use vtable::*;
use std::fmt::Debug;
@@ -81,8 +82,7 @@ pub use self::binding::BindingMode::*;
pub use self::closure::{
is_ancestor_or_same_capture, place_to_string_for_capture, BorrowKind, CaptureInfo,
CapturedPlace, ClosureKind, ClosureTypeInfo, MinCaptureInformationMap, MinCaptureList,
- RootVariableMinCaptureList, UpvarCapture, UpvarCaptureMap, UpvarId, UpvarListMap, UpvarPath,
- CAPTURE_STRUCT_LOCAL,
+ RootVariableMinCaptureList, UpvarCapture, UpvarId, UpvarPath, CAPTURE_STRUCT_LOCAL,
};
pub use self::consts::{
Const, ConstData, ConstInt, Expr, InferConst, ScalarInt, UnevaluatedConst, ValTree,
@@ -97,12 +97,12 @@ pub use self::rvalue_scopes::RvalueScopes;
pub use self::sty::BoundRegionKind::*;
pub use self::sty::{
AliasTy, Article, Binder, BoundRegion, BoundRegionKind, BoundTy, BoundTyKind, BoundVar,
- BoundVariableKind, CanonicalPolyFnSig, ClosureSubsts, ClosureSubstsParts, ConstKind, ConstVid,
+ BoundVariableKind, CanonicalPolyFnSig, ClosureArgs, ClosureArgsParts, ConstKind, ConstVid,
EarlyBoundRegion, ExistentialPredicate, ExistentialProjection, ExistentialTraitRef, FnSig,
- FreeRegion, GenSig, GeneratorSubsts, GeneratorSubstsParts, InlineConstSubsts,
- InlineConstSubstsParts, ParamConst, ParamTy, PolyExistentialPredicate,
- PolyExistentialProjection, PolyExistentialTraitRef, PolyFnSig, PolyGenSig, PolyTraitRef,
- Region, RegionKind, RegionVid, TraitRef, TyKind, TypeAndMut, UpvarSubsts, VarianceDiagInfo,
+ FreeRegion, GenSig, GeneratorArgs, GeneratorArgsParts, InlineConstArgs, InlineConstArgsParts,
+ ParamConst, ParamTy, PolyExistentialPredicate, PolyExistentialProjection,
+ PolyExistentialTraitRef, PolyFnSig, PolyGenSig, PolyTraitRef, Region, RegionKind, RegionVid,
+ TraitRef, TyKind, TypeAndMut, UpvarArgs, VarianceDiagInfo,
};
pub use self::trait_def::TraitDef;
pub use self::typeck_results::{
@@ -126,7 +126,6 @@ pub mod layout;
pub mod normalize_erasing_regions;
pub mod print;
pub mod relate;
-pub mod subst;
pub mod trait_def;
pub mod util;
pub mod visit;
@@ -140,6 +139,7 @@ mod consts;
mod context;
mod diagnostics;
mod erase_regions;
+mod generic_args;
mod generics;
mod impls_ty;
mod instance;
@@ -148,7 +148,7 @@ mod opaque_types;
mod parameterized;
mod rvalue_scopes;
mod structural_impls;
-#[cfg_attr(not(bootstrap), allow(hidden_glob_reexports))]
+#[allow(hidden_glob_reexports)]
mod sty;
mod typeck_results;
@@ -238,7 +238,7 @@ pub struct ImplHeader<'tcx> {
pub impl_def_id: DefId,
pub self_ty: Ty<'tcx>,
pub trait_ref: Option<TraitRef<'tcx>>,
- pub predicates: Vec<Predicate<'tcx>>,
+ pub predicates: Vec<(Predicate<'tcx>, Span)>,
}
#[derive(Copy, Clone, PartialEq, Eq, Debug, TypeFoldable, TypeVisitable)]
@@ -355,8 +355,8 @@ impl TyCtxt<'_> {
#[inline]
#[track_caller]
- pub fn local_parent(self, id: LocalDefId) -> LocalDefId {
- self.parent(id.to_def_id()).expect_local()
+ pub fn local_parent(self, id: impl Into<LocalDefId>) -> LocalDefId {
+ self.parent(id.into().to_def_id()).expect_local()
}
pub fn is_descendant_of(self, mut descendant: DefId, ancestor: DefId) -> bool {
@@ -498,11 +498,9 @@ impl<'tcx> Predicate<'tcx> {
.map_bound(|kind| match kind {
PredicateKind::Clause(ClauseKind::Trait(TraitPredicate {
trait_ref,
- constness,
polarity,
})) => Some(PredicateKind::Clause(ClauseKind::Trait(TraitPredicate {
trait_ref,
- constness,
polarity: polarity.flip()?,
}))),
@@ -513,19 +511,6 @@ impl<'tcx> Predicate<'tcx> {
Some(tcx.mk_predicate(kind))
}
- pub fn without_const(mut self, tcx: TyCtxt<'tcx>) -> Self {
- if let PredicateKind::Clause(ClauseKind::Trait(TraitPredicate { trait_ref, constness, polarity })) = self.kind().skip_binder()
- && constness != BoundConstness::NotConst
- {
- self = tcx.mk_predicate(self.kind().rebind(PredicateKind::Clause(ClauseKind::Trait(TraitPredicate {
- trait_ref,
- constness: BoundConstness::NotConst,
- polarity,
- }))));
- }
- self
- }
-
#[instrument(level = "debug", skip(tcx), ret)]
pub fn is_coinductive(self, tcx: TyCtxt<'tcx>) -> bool {
match self.kind().skip_binder() {
@@ -629,10 +614,6 @@ impl<'tcx> Clause<'tcx> {
None
}
}
-
- pub fn without_const(self, tcx: TyCtxt<'tcx>) -> Clause<'tcx> {
- self.as_predicate().without_const(tcx).expect_clause()
- }
}
#[derive(Clone, Copy, PartialEq, Eq, Hash, TyEncodable, TyDecodable)]
@@ -676,9 +657,9 @@ pub enum PredicateKind<'tcx> {
ObjectSafe(DefId),
/// No direct syntax. May be thought of as `where T: FnFoo<...>`
- /// for some substitutions `...` and `T` being a closure type.
+ /// for some generic args `...` and `T` being a closure type.
/// Satisfied (or refuted) once we know the closure's kind.
- ClosureKind(DefId, SubstsRef<'tcx>, ClosureKind),
+ ClosureKind(DefId, GenericArgsRef<'tcx>, ClosureKind),
/// `T1 <: T2`
///
@@ -813,15 +794,15 @@ impl<'tcx> Clause<'tcx> {
// this trick achieves that).
// Working through the second example:
- // trait_ref: for<'x> T: Foo1<'^0.0>; substs: [T, '^0.0]
- // predicate: for<'b> Self: Bar1<'a, '^0.0>; substs: [Self, 'a, '^0.0]
+ // trait_ref: for<'x> T: Foo1<'^0.0>; args: [T, '^0.0]
+ // predicate: for<'b> Self: Bar1<'a, '^0.0>; args: [Self, 'a, '^0.0]
// We want to end up with:
// for<'x, 'b> T: Bar1<'^0.0, '^0.1>
// To do this:
// 1) We must shift all bound vars in predicate by the length
// of trait ref's bound vars. So, we would end up with predicate like
// Self: Bar1<'a, '^0.1>
- // 2) We can then apply the trait substs to this, ending up with
+ // 2) We can then apply the trait args to this, ending up with
// T: Bar1<'^0.0, '^0.1>
// 3) Finally, to create the final bound vars, we concatenate the bound
// vars of the trait ref with those of the predicate:
@@ -833,7 +814,7 @@ impl<'tcx> Clause<'tcx> {
let shifted_pred =
tcx.shift_bound_var_indices(trait_bound_vars.len(), bound_pred.skip_binder());
// 2) Self: Bar1<'a, '^0.1> -> T: Bar1<'^0.0, '^0.1>
- let new = EarlyBinder::bind(shifted_pred).subst(tcx, trait_ref.skip_binder().substs);
+ let new = EarlyBinder::bind(shifted_pred).instantiate(tcx, trait_ref.skip_binder().args);
// 3) ['x] + ['b] -> ['x, 'b]
let bound_vars =
tcx.mk_bound_variable_kinds_from_iter(trait_bound_vars.iter().chain(pred_bound_vars));
@@ -852,8 +833,6 @@ impl<'tcx> Clause<'tcx> {
pub struct TraitPredicate<'tcx> {
pub trait_ref: TraitRef<'tcx>,
- pub constness: BoundConstness,
-
/// If polarity is Positive: we are proving that the trait is implemented.
///
/// If polarity is Negative: we are proving that a negative impl of this trait
@@ -867,20 +846,6 @@ pub struct TraitPredicate<'tcx> {
pub type PolyTraitPredicate<'tcx> = ty::Binder<'tcx, TraitPredicate<'tcx>>;
impl<'tcx> TraitPredicate<'tcx> {
- pub fn remap_constness(&mut self, param_env: &mut ParamEnv<'tcx>) {
- *param_env = param_env.with_constness(self.constness.and(param_env.constness()))
- }
-
- /// Remap the constness of this predicate before emitting it for diagnostics.
- pub fn remap_constness_diag(&mut self, param_env: ParamEnv<'tcx>) {
- // this is different to `remap_constness` that callees want to print this predicate
- // in case of selection errors. `T: ~const Drop` bounds cannot end up here when the
- // param_env is not const because it is always satisfied in non-const contexts.
- if let hir::Constness::NotConst = param_env.constness() {
- self.constness = ty::BoundConstness::NotConst;
- }
- }
-
pub fn with_self_ty(self, tcx: TyCtxt<'tcx>, self_ty: Ty<'tcx>) -> Self {
Self { trait_ref: self.trait_ref.with_self_ty(tcx, self_ty), ..self }
}
@@ -892,24 +857,6 @@ impl<'tcx> TraitPredicate<'tcx> {
pub fn self_ty(self) -> Ty<'tcx> {
self.trait_ref.self_ty()
}
-
- #[inline]
- pub fn is_const_if_const(self) -> bool {
- self.constness == BoundConstness::ConstIfConst
- }
-
- pub fn is_constness_satisfied_by(self, constness: hir::Constness) -> bool {
- match (self.constness, constness) {
- (BoundConstness::NotConst, _)
- | (BoundConstness::ConstIfConst, hir::Constness::Const) => true,
- (BoundConstness::ConstIfConst, hir::Constness::NotConst) => false,
- }
- }
-
- pub fn without_const(mut self) -> Self {
- self.constness = BoundConstness::NotConst;
- self
- }
}
impl<'tcx> PolyTraitPredicate<'tcx> {
@@ -922,19 +869,6 @@ impl<'tcx> PolyTraitPredicate<'tcx> {
self.map_bound(|trait_ref| trait_ref.self_ty())
}
- /// Remap the constness of this predicate before emitting it for diagnostics.
- pub fn remap_constness_diag(&mut self, param_env: ParamEnv<'tcx>) {
- *self = self.map_bound(|mut p| {
- p.remap_constness_diag(param_env);
- p
- });
- }
-
- #[inline]
- pub fn is_const_if_const(self) -> bool {
- self.skip_binder().is_const_if_const()
- }
-
#[inline]
pub fn polarity(self) -> ImplPolarity {
self.skip_binder().polarity
@@ -980,9 +914,9 @@ pub struct Term<'tcx> {
impl Debug for Term<'_> {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
let data = if let Some(ty) = self.ty() {
- format!("Term::Ty({:?})", ty)
+ format!("Term::Ty({ty:?})")
} else if let Some(ct) = self.ct() {
- format!("Term::Ct({:?})", ct)
+ format!("Term::Ct({ct:?})")
} else {
unreachable!()
};
@@ -1079,7 +1013,7 @@ impl<'tcx> Term<'tcx> {
_ => None,
},
TermKind::Const(ct) => match ct.kind() {
- ConstKind::Unevaluated(uv) => Some(tcx.mk_alias_ty(uv.def, uv.substs)),
+ ConstKind::Unevaluated(uv) => Some(tcx.mk_alias_ty(uv.def, uv.args)),
_ => None,
},
}
@@ -1309,7 +1243,7 @@ impl<'tcx> ToPredicate<'tcx> for TraitRef<'tcx> {
impl<'tcx> ToPredicate<'tcx, TraitPredicate<'tcx>> for TraitRef<'tcx> {
#[inline(always)]
fn to_predicate(self, _tcx: TyCtxt<'tcx>) -> TraitPredicate<'tcx> {
- self.without_const()
+ TraitPredicate { trait_ref: self, polarity: ImplPolarity::Positive }
}
}
@@ -1350,7 +1284,6 @@ impl<'tcx> ToPredicate<'tcx, PolyTraitPredicate<'tcx>> for Binder<'tcx, TraitRef
fn to_predicate(self, _: TyCtxt<'tcx>) -> PolyTraitPredicate<'tcx> {
self.map_bound(|trait_ref| TraitPredicate {
trait_ref,
- constness: ty::BoundConstness::NotConst,
polarity: ty::ImplPolarity::Positive,
})
}
@@ -1381,24 +1314,49 @@ impl<'tcx> ToPredicate<'tcx, Clause<'tcx>> for PolyTraitPredicate<'tcx> {
}
}
+impl<'tcx> ToPredicate<'tcx> for OutlivesPredicate<ty::Region<'tcx>, ty::Region<'tcx>> {
+ fn to_predicate(self, tcx: TyCtxt<'tcx>) -> Predicate<'tcx> {
+ ty::Binder::dummy(PredicateKind::Clause(ClauseKind::RegionOutlives(self))).to_predicate(tcx)
+ }
+}
+
impl<'tcx> ToPredicate<'tcx> for PolyRegionOutlivesPredicate<'tcx> {
fn to_predicate(self, tcx: TyCtxt<'tcx>) -> Predicate<'tcx> {
self.map_bound(|p| PredicateKind::Clause(ClauseKind::RegionOutlives(p))).to_predicate(tcx)
}
}
+impl<'tcx> ToPredicate<'tcx> for OutlivesPredicate<Ty<'tcx>, ty::Region<'tcx>> {
+ fn to_predicate(self, tcx: TyCtxt<'tcx>) -> Predicate<'tcx> {
+ ty::Binder::dummy(PredicateKind::Clause(ClauseKind::TypeOutlives(self))).to_predicate(tcx)
+ }
+}
+
impl<'tcx> ToPredicate<'tcx> for PolyTypeOutlivesPredicate<'tcx> {
fn to_predicate(self, tcx: TyCtxt<'tcx>) -> Predicate<'tcx> {
self.map_bound(|p| PredicateKind::Clause(ClauseKind::TypeOutlives(p))).to_predicate(tcx)
}
}
+impl<'tcx> ToPredicate<'tcx> for ProjectionPredicate<'tcx> {
+ fn to_predicate(self, tcx: TyCtxt<'tcx>) -> Predicate<'tcx> {
+ ty::Binder::dummy(PredicateKind::Clause(ClauseKind::Projection(self))).to_predicate(tcx)
+ }
+}
+
impl<'tcx> ToPredicate<'tcx> for PolyProjectionPredicate<'tcx> {
fn to_predicate(self, tcx: TyCtxt<'tcx>) -> Predicate<'tcx> {
self.map_bound(|p| PredicateKind::Clause(ClauseKind::Projection(p))).to_predicate(tcx)
}
}
+impl<'tcx> ToPredicate<'tcx, Clause<'tcx>> for ProjectionPredicate<'tcx> {
+ fn to_predicate(self, tcx: TyCtxt<'tcx>) -> Clause<'tcx> {
+ let p: Predicate<'tcx> = self.to_predicate(tcx);
+ p.expect_clause()
+ }
+}
+
impl<'tcx> ToPredicate<'tcx, Clause<'tcx>> for PolyProjectionPredicate<'tcx> {
fn to_predicate(self, tcx: TyCtxt<'tcx>) -> Clause<'tcx> {
let p: Predicate<'tcx> = self.to_predicate(tcx);
@@ -1558,7 +1516,7 @@ impl<'a, 'tcx> IntoIterator for &'a InstantiatedPredicates<'tcx> {
#[derive(TypeFoldable, TypeVisitable)]
pub struct OpaqueTypeKey<'tcx> {
pub def_id: LocalDefId,
- pub substs: SubstsRef<'tcx>,
+ pub args: GenericArgsRef<'tcx>,
}
#[derive(Copy, Clone, Debug, TypeFoldable, TypeVisitable, HashStable, TyEncodable, TyDecodable)]
@@ -1629,21 +1587,21 @@ impl<'tcx> OpaqueHiddenType<'tcx> {
// typeck errors have subpar spans for opaque types, so delay error reporting until borrowck.
ignore_errors: bool,
) -> Self {
- let OpaqueTypeKey { def_id, substs } = opaque_type_key;
+ let OpaqueTypeKey { def_id, args } = opaque_type_key;
- // Use substs to build up a reverse map from regions to their
+ // Use args to build up a reverse map from regions to their
// identity mappings. This is necessary because of `impl
// Trait` lifetimes are computed by replacing existing
// lifetimes with 'static and remapping only those used in the
// `impl Trait` return type, resulting in the parameters
// shifting.
- let id_substs = InternalSubsts::identity_for_item(tcx, def_id);
- debug!(?id_substs);
+ let id_args = GenericArgs::identity_for_item(tcx, def_id);
+ debug!(?id_args);
- // This zip may have several times the same lifetime in `substs` paired with a different
- // lifetime from `id_substs`. Simply `collect`ing the iterator is the correct behaviour:
+ // This zip may have several times the same lifetime in `args` paired with a different
+ // lifetime from `id_args`. Simply `collect`ing the iterator is the correct behaviour:
// it will pick the last one, which is the one we introduced in the impl-trait desugaring.
- let map = substs.iter().zip(id_substs).collect();
+ let map = args.iter().zip(id_args).collect();
debug!("map = {:#?}", map);
// Convert the type from the function into a type valid outside
@@ -1700,15 +1658,12 @@ pub struct ParamEnv<'tcx> {
#[derive(Copy, Clone)]
struct ParamTag {
reveal: traits::Reveal,
- constness: hir::Constness,
}
impl_tag! {
impl Tag for ParamTag;
- ParamTag { reveal: traits::Reveal::UserFacing, constness: hir::Constness::NotConst },
- ParamTag { reveal: traits::Reveal::All, constness: hir::Constness::NotConst },
- ParamTag { reveal: traits::Reveal::UserFacing, constness: hir::Constness::Const },
- ParamTag { reveal: traits::Reveal::All, constness: hir::Constness::Const },
+ ParamTag { reveal: traits::Reveal::UserFacing },
+ ParamTag { reveal: traits::Reveal::All },
}
impl<'tcx> fmt::Debug for ParamEnv<'tcx> {
@@ -1716,7 +1671,6 @@ impl<'tcx> fmt::Debug for ParamEnv<'tcx> {
f.debug_struct("ParamEnv")
.field("caller_bounds", &self.caller_bounds())
.field("reveal", &self.reveal())
- .field("constness", &self.constness())
.finish()
}
}
@@ -1725,7 +1679,6 @@ impl<'a, 'tcx> HashStable<StableHashingContext<'a>> for ParamEnv<'tcx> {
fn hash_stable(&self, hcx: &mut StableHashingContext<'a>, hasher: &mut StableHasher) {
self.caller_bounds().hash_stable(hcx, hasher);
self.reveal().hash_stable(hcx, hasher);
- self.constness().hash_stable(hcx, hasher);
}
}
@@ -1737,7 +1690,6 @@ impl<'tcx> TypeFoldable<TyCtxt<'tcx>> for ParamEnv<'tcx> {
Ok(ParamEnv::new(
self.caller_bounds().try_fold_with(folder)?,
self.reveal().try_fold_with(folder)?,
- self.constness(),
))
}
}
@@ -1756,7 +1708,7 @@ impl<'tcx> ParamEnv<'tcx> {
/// type-checking.
#[inline]
pub fn empty() -> Self {
- Self::new(List::empty(), Reveal::UserFacing, hir::Constness::NotConst)
+ Self::new(List::empty(), Reveal::UserFacing)
}
#[inline]
@@ -1769,16 +1721,6 @@ impl<'tcx> ParamEnv<'tcx> {
self.packed.tag().reveal
}
- #[inline]
- pub fn constness(self) -> hir::Constness {
- self.packed.tag().constness
- }
-
- #[inline]
- pub fn is_const(self) -> bool {
- self.packed.tag().constness == hir::Constness::Const
- }
-
/// Construct a trait environment with no where-clauses in scope
/// where the values of all `impl Trait` and other hidden types
/// are revealed. This is suitable for monomorphized, post-typeck
@@ -1788,17 +1730,13 @@ impl<'tcx> ParamEnv<'tcx> {
/// or invoke `param_env.with_reveal_all()`.
#[inline]
pub fn reveal_all() -> Self {
- Self::new(List::empty(), Reveal::All, hir::Constness::NotConst)
+ Self::new(List::empty(), Reveal::All)
}
/// Construct a trait environment with the given set of predicates.
#[inline]
- pub fn new(
- caller_bounds: &'tcx List<Clause<'tcx>>,
- reveal: Reveal,
- constness: hir::Constness,
- ) -> Self {
- ty::ParamEnv { packed: CopyTaggedPtr::new(caller_bounds, ParamTag { reveal, constness }) }
+ pub fn new(caller_bounds: &'tcx List<Clause<'tcx>>, reveal: Reveal) -> Self {
+ ty::ParamEnv { packed: CopyTaggedPtr::new(caller_bounds, ParamTag { reveal }) }
}
pub fn with_user_facing(mut self) -> Self {
@@ -1806,29 +1744,6 @@ impl<'tcx> ParamEnv<'tcx> {
self
}
- #[inline]
- pub fn with_constness(mut self, constness: hir::Constness) -> Self {
- self.packed.set_tag(ParamTag { constness, ..self.packed.tag() });
- self
- }
-
- #[inline]
- pub fn with_const(mut self) -> Self {
- self.packed.set_tag(ParamTag { constness: hir::Constness::Const, ..self.packed.tag() });
- self
- }
-
- #[inline]
- pub fn without_const(mut self) -> Self {
- self.packed.set_tag(ParamTag { constness: hir::Constness::NotConst, ..self.packed.tag() });
- self
- }
-
- #[inline]
- pub fn remap_constness_with(&mut self, mut constness: ty::BoundConstness) {
- *self = self.with_constness(constness.and(self.constness()))
- }
-
/// Returns a new parameter environment with the same clauses, but
/// which "reveals" the true results of projections in all cases
/// (even for associated types that are specializable). This is
@@ -1843,17 +1758,13 @@ impl<'tcx> ParamEnv<'tcx> {
return self;
}
- ParamEnv::new(
- tcx.reveal_opaque_types_in_bounds(self.caller_bounds()),
- Reveal::All,
- self.constness(),
- )
+ ParamEnv::new(tcx.reveal_opaque_types_in_bounds(self.caller_bounds()), Reveal::All)
}
/// Returns this same environment but with no caller bounds.
#[inline]
pub fn without_caller_bounds(self) -> Self {
- Self::new(List::empty(), self.reveal(), self.constness())
+ Self::new(List::empty(), self.reveal())
}
/// Creates a suitable environment in which to perform trait
@@ -1883,24 +1794,6 @@ impl<'tcx> ParamEnv<'tcx> {
}
}
-// FIXME(ecstaticmorse): Audit all occurrences of `without_const().to_predicate(tcx)` to ensure that
-// the constness of trait bounds is being propagated correctly.
-impl<'tcx> PolyTraitRef<'tcx> {
- #[inline]
- pub fn with_constness(self, constness: BoundConstness) -> PolyTraitPredicate<'tcx> {
- self.map_bound(|trait_ref| ty::TraitPredicate {
- trait_ref,
- constness,
- polarity: ty::ImplPolarity::Positive,
- })
- }
-
- #[inline]
- pub fn without_const(self) -> PolyTraitPredicate<'tcx> {
- self.with_constness(BoundConstness::NotConst)
- }
-}
-
#[derive(Copy, Clone, Debug, PartialEq, Eq, Hash, TypeFoldable, TypeVisitable)]
#[derive(HashStable, Lift)]
pub struct ParamEnvAnd<'tcx, T> {
@@ -2164,10 +2057,10 @@ impl Hash for FieldDef {
}
impl<'tcx> FieldDef {
- /// Returns the type of this field. The resulting type is not normalized. The `subst` is
+ /// Returns the type of this field. The resulting type is not normalized. The `arg` is
/// typically obtained via the second field of [`TyKind::Adt`].
- pub fn ty(&self, tcx: TyCtxt<'tcx>, subst: SubstsRef<'tcx>) -> Ty<'tcx> {
- tcx.type_of(self.did).subst(tcx, subst)
+ pub fn ty(&self, tcx: TyCtxt<'tcx>, arg: GenericArgsRef<'tcx>) -> Ty<'tcx> {
+ tcx.type_of(self.did).instantiate(tcx, arg)
}
/// Computes the `Ident` of this variant by looking up the `Span`
@@ -2391,8 +2284,8 @@ impl<'tcx> TyCtxt<'tcx> {
let impl_trait_ref2 = self.impl_trait_ref(def_id2);
// If either trait impl references an error, they're allowed to overlap,
// as one of them essentially doesn't exist.
- if impl_trait_ref1.is_some_and(|tr| tr.subst_identity().references_error())
- || impl_trait_ref2.is_some_and(|tr| tr.subst_identity().references_error())
+ if impl_trait_ref1.is_some_and(|tr| tr.instantiate_identity().references_error())
+ || impl_trait_ref2.is_some_and(|tr| tr.instantiate_identity().references_error())
{
return Some(ImplOverlapKind::Permitted { marker: false });
}
@@ -2682,20 +2575,6 @@ impl<'tcx> TyCtxt<'tcx> {
matches!(self.trait_of_item(def_id), Some(trait_id) if self.has_attr(trait_id, sym::const_trait))
}
- pub fn impl_trait_in_trait_parent_fn(self, mut def_id: DefId) -> DefId {
- match self.opt_rpitit_info(def_id) {
- Some(ImplTraitInTraitData::Trait { fn_def_id, .. })
- | Some(ImplTraitInTraitData::Impl { fn_def_id, .. }) => fn_def_id,
- None => {
- while let def_kind = self.def_kind(def_id) && def_kind != DefKind::AssocFn {
- debug_assert_eq!(def_kind, DefKind::ImplTraitPlaceholder);
- def_id = self.parent(def_id);
- }
- def_id
- }
- }
- }
-
/// Returns the `DefId` of the item within which the `impl Trait` is declared.
/// For type-alias-impl-trait this is the `type` alias.
/// For impl-trait-in-assoc-type this is the assoc type.
@@ -2713,33 +2592,20 @@ impl<'tcx> TyCtxt<'tcx> {
return false;
}
- let Some(item) = self.opt_associated_item(def_id) else { return false; };
+ let Some(item) = self.opt_associated_item(def_id) else {
+ return false;
+ };
if item.container != ty::AssocItemContainer::ImplContainer {
return false;
}
- let Some(trait_item_def_id) = item.trait_item_def_id else { return false; };
-
- if self.lower_impl_trait_in_trait_to_assoc_ty() {
- return !self
- .associated_types_for_impl_traits_in_associated_fn(trait_item_def_id)
- .is_empty();
- }
+ let Some(trait_item_def_id) = item.trait_item_def_id else {
+ return false;
+ };
- // FIXME(RPITIT): This does a somewhat manual walk through the signature
- // of the trait fn to look for any RPITITs, but that's kinda doing a lot
- // of work. We can probably remove this when we refactor RPITITs to be
- // associated types.
- self.fn_sig(trait_item_def_id).subst_identity().skip_binder().output().walk().any(|arg| {
- if let ty::GenericArgKind::Type(ty) = arg.unpack()
- && let ty::Alias(ty::Projection, data) = ty.kind()
- && self.def_kind(data.def_id) == DefKind::ImplTraitPlaceholder
- {
- true
- } else {
- false
- }
- })
+ return !self
+ .associated_types_for_impl_traits_in_associated_fn(trait_item_def_id)
+ .is_empty();
}
}
diff --git a/compiler/rustc_middle/src/ty/normalize_erasing_regions.rs b/compiler/rustc_middle/src/ty/normalize_erasing_regions.rs
index a0c8d299f..2415d50b2 100644
--- a/compiler/rustc_middle/src/ty/normalize_erasing_regions.rs
+++ b/compiler/rustc_middle/src/ty/normalize_erasing_regions.rs
@@ -9,7 +9,7 @@
use crate::traits::query::NoSolution;
use crate::ty::fold::{FallibleTypeFolder, TypeFoldable, TypeFolder};
-use crate::ty::{self, EarlyBinder, SubstsRef, Ty, TyCtxt, TypeVisitableExt};
+use crate::ty::{self, EarlyBinder, GenericArgsRef, Ty, TyCtxt, TypeVisitableExt};
#[derive(Debug, Copy, Clone, HashStable, TyEncodable, TyDecodable)]
pub enum NormalizationError<'tcx> {
@@ -20,8 +20,8 @@ pub enum NormalizationError<'tcx> {
impl<'tcx> NormalizationError<'tcx> {
pub fn get_type_for_failure(&self) -> String {
match self {
- NormalizationError::Type(t) => format!("{}", t),
- NormalizationError::Const(c) => format!("{}", c),
+ NormalizationError::Type(t) => format!("{t}"),
+ NormalizationError::Const(c) => format!("{c}"),
}
}
}
@@ -137,7 +137,7 @@ impl<'tcx> TyCtxt<'tcx> {
/// use `try_subst_and_normalize_erasing_regions` instead.
pub fn subst_and_normalize_erasing_regions<T>(
self,
- param_substs: SubstsRef<'tcx>,
+ param_args: GenericArgsRef<'tcx>,
param_env: ty::ParamEnv<'tcx>,
value: EarlyBinder<T>,
) -> T
@@ -146,12 +146,12 @@ impl<'tcx> TyCtxt<'tcx> {
{
debug!(
"subst_and_normalize_erasing_regions(\
- param_substs={:?}, \
+ param_args={:?}, \
value={:?}, \
param_env={:?})",
- param_substs, value, param_env,
+ param_args, value, param_env,
);
- let substituted = value.subst(self, param_substs);
+ let substituted = value.instantiate(self, param_args);
self.normalize_erasing_regions(param_env, substituted)
}
@@ -161,7 +161,7 @@ impl<'tcx> TyCtxt<'tcx> {
/// not assume that normalization succeeds.
pub fn try_subst_and_normalize_erasing_regions<T>(
self,
- param_substs: SubstsRef<'tcx>,
+ param_args: GenericArgsRef<'tcx>,
param_env: ty::ParamEnv<'tcx>,
value: EarlyBinder<T>,
) -> Result<T, NormalizationError<'tcx>>
@@ -170,12 +170,12 @@ impl<'tcx> TyCtxt<'tcx> {
{
debug!(
"subst_and_normalize_erasing_regions(\
- param_substs={:?}, \
+ param_args={:?}, \
value={:?}, \
param_env={:?})",
- param_substs, value, param_env,
+ param_args, value, param_env,
);
- let substituted = value.subst(self, param_substs);
+ let substituted = value.instantiate(self, param_args);
self.try_normalize_erasing_regions(param_env, substituted)
}
}
diff --git a/compiler/rustc_middle/src/ty/opaque_types.rs b/compiler/rustc_middle/src/ty/opaque_types.rs
index b10921eff..0ff5ac903 100644
--- a/compiler/rustc_middle/src/ty/opaque_types.rs
+++ b/compiler/rustc_middle/src/ty/opaque_types.rs
@@ -1,7 +1,7 @@
use crate::error::ConstNotUsedTraitAlias;
use crate::ty::fold::{TypeFolder, TypeSuperFoldable};
-use crate::ty::subst::{GenericArg, GenericArgKind};
use crate::ty::{self, Ty, TyCtxt, TypeFoldable};
+use crate::ty::{GenericArg, GenericArgKind};
use rustc_data_structures::fx::FxHashMap;
use rustc_span::def_id::DefId;
use rustc_span::Span;
@@ -49,11 +49,11 @@ impl<'tcx> ReverseMapper<'tcx> {
kind.fold_with(self)
}
- fn fold_closure_substs(
+ fn fold_closure_args(
&mut self,
def_id: DefId,
- substs: ty::SubstsRef<'tcx>,
- ) -> ty::SubstsRef<'tcx> {
+ args: ty::GenericArgsRef<'tcx>,
+ ) -> ty::GenericArgsRef<'tcx> {
// I am a horrible monster and I pray for death. When
// we encounter a closure here, it is always a closure
// from within the function that we are currently
@@ -79,7 +79,7 @@ impl<'tcx> ReverseMapper<'tcx> {
// during codegen.
let generics = self.tcx.generics_of(def_id);
- self.tcx.mk_substs_from_iter(substs.iter().enumerate().map(|(index, kind)| {
+ self.tcx.mk_args_from_iter(args.iter().enumerate().map(|(index, kind)| {
if index < generics.parent_count {
// Accommodate missing regions in the parent kinds...
self.fold_kind_no_missing_regions_error(kind)
@@ -124,7 +124,7 @@ impl<'tcx> TypeFolder<TyCtxt<'tcx>> for ReverseMapper<'tcx> {
match self.map.get(&r.into()).map(|k| k.unpack()) {
Some(GenericArgKind::Lifetime(r1)) => r1,
- Some(u) => panic!("region mapped to unexpected kind: {:?}", u),
+ Some(u) => panic!("region mapped to unexpected kind: {u:?}"),
None if self.do_not_error => self.tcx.lifetimes.re_static,
None => {
let e = self
@@ -134,9 +134,8 @@ impl<'tcx> TypeFolder<TyCtxt<'tcx>> for ReverseMapper<'tcx> {
.span_label(
self.span,
format!(
- "lifetime `{}` is part of concrete type but not used in \
- parameter list of the `impl Trait` type alias",
- r
+ "lifetime `{r}` is part of concrete type but not used in \
+ parameter list of the `impl Trait` type alias"
),
)
.emit();
@@ -148,19 +147,19 @@ impl<'tcx> TypeFolder<TyCtxt<'tcx>> for ReverseMapper<'tcx> {
fn fold_ty(&mut self, ty: Ty<'tcx>) -> Ty<'tcx> {
match *ty.kind() {
- ty::Closure(def_id, substs) => {
- let substs = self.fold_closure_substs(def_id, substs);
- Ty::new_closure(self.tcx, def_id, substs)
+ ty::Closure(def_id, args) => {
+ let args = self.fold_closure_args(def_id, args);
+ Ty::new_closure(self.tcx, def_id, args)
}
- ty::Generator(def_id, substs, movability) => {
- let substs = self.fold_closure_substs(def_id, substs);
- Ty::new_generator(self.tcx, def_id, substs, movability)
+ ty::Generator(def_id, args, movability) => {
+ let args = self.fold_closure_args(def_id, args);
+ Ty::new_generator(self.tcx, def_id, args, movability)
}
- ty::GeneratorWitnessMIR(def_id, substs) => {
- let substs = self.fold_closure_substs(def_id, substs);
- Ty::new_generator_witness_mir(self.tcx, def_id, substs)
+ ty::GeneratorWitnessMIR(def_id, args) => {
+ let args = self.fold_closure_args(def_id, args);
+ Ty::new_generator_witness_mir(self.tcx, def_id, args)
}
ty::Param(param) => {
@@ -169,7 +168,7 @@ impl<'tcx> TypeFolder<TyCtxt<'tcx>> for ReverseMapper<'tcx> {
// Found it in the substitution list; replace with the parameter from the
// opaque type.
Some(GenericArgKind::Type(t1)) => t1,
- Some(u) => panic!("type mapped to unexpected kind: {:?}", u),
+ Some(u) => panic!("type mapped to unexpected kind: {u:?}"),
None => {
debug!(?param, ?self.map);
if !self.ignore_errors {
@@ -178,9 +177,8 @@ impl<'tcx> TypeFolder<TyCtxt<'tcx>> for ReverseMapper<'tcx> {
.struct_span_err(
self.span,
format!(
- "type parameter `{}` is part of concrete type but not \
- used in parameter list for the `impl Trait` type alias",
- ty
+ "type parameter `{ty}` is part of concrete type but not \
+ used in parameter list for the `impl Trait` type alias"
),
)
.emit();
@@ -205,7 +203,7 @@ impl<'tcx> TypeFolder<TyCtxt<'tcx>> for ReverseMapper<'tcx> {
// Found it in the substitution list, replace with the parameter from the
// opaque type.
Some(GenericArgKind::Const(c1)) => c1,
- Some(u) => panic!("const mapped to unexpected kind: {:?}", u),
+ Some(u) => panic!("const mapped to unexpected kind: {u:?}"),
None => {
let guar = self
.tcx
diff --git a/compiler/rustc_middle/src/ty/parameterized.rs b/compiler/rustc_middle/src/ty/parameterized.rs
index cc2b26a5e..f1c389842 100644
--- a/compiler/rustc_middle/src/ty/parameterized.rs
+++ b/compiler/rustc_middle/src/ty/parameterized.rs
@@ -52,6 +52,7 @@ trivially_parameterized_over_tcx! {
usize,
(),
u32,
+ u64,
bool,
std::string::String,
crate::metadata::ModChild,
diff --git a/compiler/rustc_middle/src/ty/print/mod.rs b/compiler/rustc_middle/src/ty/print/mod.rs
index 2de0a3f75..05871d0bc 100644
--- a/compiler/rustc_middle/src/ty/print/mod.rs
+++ b/compiler/rustc_middle/src/ty/print/mod.rs
@@ -42,19 +42,19 @@ pub trait Printer<'tcx>: Sized {
fn print_def_path(
self,
def_id: DefId,
- substs: &'tcx [GenericArg<'tcx>],
+ args: &'tcx [GenericArg<'tcx>],
) -> Result<Self::Path, Self::Error> {
- self.default_print_def_path(def_id, substs)
+ self.default_print_def_path(def_id, args)
}
fn print_impl_path(
self,
impl_def_id: DefId,
- substs: &'tcx [GenericArg<'tcx>],
+ args: &'tcx [GenericArg<'tcx>],
self_ty: Ty<'tcx>,
trait_ref: Option<ty::TraitRef<'tcx>>,
) -> Result<Self::Path, Self::Error> {
- self.default_print_impl_path(impl_def_id, substs, self_ty, trait_ref)
+ self.default_print_impl_path(impl_def_id, args, self_ty, trait_ref)
}
fn print_region(self, region: ty::Region<'tcx>) -> Result<Self::Region, Self::Error>;
@@ -102,7 +102,7 @@ pub trait Printer<'tcx>: Sized {
fn default_print_def_path(
self,
def_id: DefId,
- substs: &'tcx [GenericArg<'tcx>],
+ args: &'tcx [GenericArg<'tcx>],
) -> Result<Self::Path, Self::Error> {
let key = self.tcx().def_key(def_id);
debug!(?key);
@@ -117,25 +117,28 @@ pub trait Printer<'tcx>: Sized {
let generics = self.tcx().generics_of(def_id);
let self_ty = self.tcx().type_of(def_id);
let impl_trait_ref = self.tcx().impl_trait_ref(def_id);
- let (self_ty, impl_trait_ref) = if substs.len() >= generics.count() {
+ let (self_ty, impl_trait_ref) = if args.len() >= generics.count() {
(
- self_ty.subst(self.tcx(), substs),
- impl_trait_ref.map(|i| i.subst(self.tcx(), substs)),
+ self_ty.instantiate(self.tcx(), args),
+ impl_trait_ref.map(|i| i.instantiate(self.tcx(), args)),
)
} else {
- (self_ty.subst_identity(), impl_trait_ref.map(|i| i.subst_identity()))
+ (
+ self_ty.instantiate_identity(),
+ impl_trait_ref.map(|i| i.instantiate_identity()),
+ )
};
- self.print_impl_path(def_id, substs, self_ty, impl_trait_ref)
+ self.print_impl_path(def_id, args, self_ty, impl_trait_ref)
}
_ => {
let parent_def_id = DefId { index: key.parent.unwrap(), ..def_id };
- let mut parent_substs = substs;
+ let mut parent_args = args;
let mut trait_qualify_parent = false;
- if !substs.is_empty() {
+ if !args.is_empty() {
let generics = self.tcx().generics_of(def_id);
- parent_substs = &substs[..generics.parent_count.min(substs.len())];
+ parent_args = &args[..generics.parent_count.min(args.len())];
match key.disambiguated_data.data {
// Closures' own generics are only captures, don't print them.
@@ -148,10 +151,10 @@ pub trait Printer<'tcx>: Sized {
// If we have any generic arguments to print, we do that
// on top of the same path, but without its own generics.
_ => {
- if !generics.params.is_empty() && substs.len() >= generics.count() {
- let args = generics.own_substs_no_defaults(self.tcx(), substs);
+ if !generics.params.is_empty() && args.len() >= generics.count() {
+ let args = generics.own_args_no_defaults(self.tcx(), args);
return self.path_generic_args(
- |cx| cx.print_def_path(def_id, parent_substs),
+ |cx| cx.print_def_path(def_id, parent_args),
args,
);
}
@@ -162,7 +165,7 @@ pub trait Printer<'tcx>: Sized {
// logic, instead of doing it when printing the child.
trait_qualify_parent = generics.has_self
&& generics.parent == Some(parent_def_id)
- && parent_substs.len() == generics.parent_count
+ && parent_args.len() == generics.parent_count
&& self.tcx().generics_of(parent_def_id).parent_count == 0;
}
@@ -172,11 +175,11 @@ pub trait Printer<'tcx>: Sized {
let trait_ref = ty::TraitRef::new(
cx.tcx(),
parent_def_id,
- parent_substs.iter().copied(),
+ parent_args.iter().copied(),
);
cx.path_qualified(trait_ref.self_ty(), Some(trait_ref))
} else {
- cx.print_def_path(parent_def_id, parent_substs)
+ cx.print_def_path(parent_def_id, parent_args)
}
},
&key.disambiguated_data,
@@ -188,7 +191,7 @@ pub trait Printer<'tcx>: Sized {
fn default_print_impl_path(
self,
impl_def_id: DefId,
- _substs: &'tcx [GenericArg<'tcx>],
+ _args: &'tcx [GenericArg<'tcx>],
self_ty: Ty<'tcx>,
impl_trait_ref: Option<ty::TraitRef<'tcx>>,
) -> Result<Self::Path, Self::Error> {
@@ -326,7 +329,8 @@ impl<'tcx, P: Printer<'tcx>> Print<'tcx, P> for ty::Const<'tcx> {
}
// This is only used by query descriptions
-pub fn describe_as_module(def_id: LocalDefId, tcx: TyCtxt<'_>) -> String {
+pub fn describe_as_module(def_id: impl Into<LocalDefId>, tcx: TyCtxt<'_>) -> String {
+ let def_id = def_id.into();
if def_id.is_top_level_module() {
"top-level module".to_string()
} else {
diff --git a/compiler/rustc_middle/src/ty/print/pretty.rs b/compiler/rustc_middle/src/ty/print/pretty.rs
index 96cf36eb9..ac0c88468 100644
--- a/compiler/rustc_middle/src/ty/print/pretty.rs
+++ b/compiler/rustc_middle/src/ty/print/pretty.rs
@@ -11,12 +11,13 @@ use rustc_data_structures::fx::{FxHashMap, FxIndexMap};
use rustc_data_structures::sso::SsoHashSet;
use rustc_hir as hir;
use rustc_hir::def::{self, CtorKind, DefKind, Namespace};
-use rustc_hir::def_id::{DefId, DefIdSet, CRATE_DEF_ID, LOCAL_CRATE};
+use rustc_hir::def_id::{DefId, DefIdSet, ModDefId, CRATE_DEF_ID, LOCAL_CRATE};
use rustc_hir::definitions::{DefKey, DefPathData, DefPathDataName, DisambiguatedDefPathData};
use rustc_hir::LangItem;
use rustc_session::config::TrimmedDefPaths;
use rustc_session::cstore::{ExternCrate, ExternCrateSource};
use rustc_session::Limit;
+use rustc_span::sym;
use rustc_span::symbol::{kw, Ident, Symbol};
use rustc_span::FileNameDisplayPreference;
use rustc_target::abi::Size;
@@ -224,9 +225,9 @@ pub trait PrettyPrinter<'tcx>:
fn print_value_path(
self,
def_id: DefId,
- substs: &'tcx [GenericArg<'tcx>],
+ args: &'tcx [GenericArg<'tcx>],
) -> Result<Self::Path, Self::Error> {
- self.print_def_path(def_id, substs)
+ self.print_def_path(def_id, args)
}
fn in_binder<T>(self, value: &ty::Binder<'tcx, T>) -> Result<Self, Self::Error>
@@ -325,7 +326,8 @@ pub trait PrettyPrinter<'tcx>:
{
this
.tcx()
- .module_children(visible_parent)
+ // FIXME(typed_def_id): Further propagate ModDefId
+ .module_children(ModDefId::new_unchecked(*visible_parent))
.iter()
.filter(|child| child.res.opt_def_id() == Some(def_id))
.find(|child| child.vis.is_public() && child.ident.name != kw::Underscore)
@@ -363,7 +365,7 @@ pub trait PrettyPrinter<'tcx>:
self.write_str(get_local_name(&self, symbol, parent, parent_key).as_str())?;
self.write_str("::")?;
} else if let DefKind::Struct | DefKind::Union | DefKind::Enum | DefKind::Trait
- | DefKind::TyAlias | DefKind::Fn | DefKind::Const | DefKind::Static(_) = kind
+ | DefKind::TyAlias { .. } | DefKind::Fn | DefKind::Const | DefKind::Static(_) = kind
{
} else {
// If not covered above, like for example items out of `impl` blocks, fallback.
@@ -550,7 +552,8 @@ pub trait PrettyPrinter<'tcx>:
// that's public and whose identifier isn't `_`.
let reexport = self
.tcx()
- .module_children(visible_parent)
+ // FIXME(typed_def_id): Further propagate ModDefId
+ .module_children(ModDefId::new_unchecked(visible_parent))
.iter()
.filter(|child| child.res.opt_def_id() == Some(def_id))
.find(|child| child.vis.is_public() && child.ident.name != kw::Underscore)
@@ -679,12 +682,12 @@ pub trait PrettyPrinter<'tcx>:
}
p!(")")
}
- ty::FnDef(def_id, substs) => {
+ ty::FnDef(def_id, args) => {
if with_no_queries() {
- p!(print_def_path(def_id, substs));
+ p!(print_def_path(def_id, args));
} else {
- let sig = self.tcx().fn_sig(def_id).subst(self.tcx(), substs);
- p!(print(sig), " {{", print_value_path(def_id, substs), "}}");
+ let sig = self.tcx().fn_sig(def_id).instantiate(self.tcx(), args);
+ p!(print(sig), " {{", print_value_path(def_id, args), "}}");
}
}
ty::FnPtr(ref bare_fn) => p!(print(bare_fn)),
@@ -715,8 +718,8 @@ pub trait PrettyPrinter<'tcx>:
false => p!(write("{s}")),
},
},
- ty::Adt(def, substs) => {
- p!(print_def_path(def.did(), substs));
+ ty::Adt(def, args) => {
+ p!(print_def_path(def.did(), args));
}
ty::Dynamic(data, r, repr) => {
let print_r = self.should_print_region(r);
@@ -739,7 +742,7 @@ pub trait PrettyPrinter<'tcx>:
if !(self.should_print_verbose() || with_no_queries())
&& self.tcx().is_impl_trait_in_trait(data.def_id)
{
- return self.pretty_print_opaque_impl_type(data.def_id, data.substs);
+ return self.pretty_print_opaque_impl_type(data.def_id, data.args);
} else {
p!(print(data))
}
@@ -751,7 +754,7 @@ pub trait PrettyPrinter<'tcx>:
false => p!(write("{name}")),
},
},
- ty::Alias(ty::Opaque, ty::AliasTy { def_id, substs, .. }) => {
+ ty::Alias(ty::Opaque, ty::AliasTy { def_id, args, .. }) => {
// We use verbose printing in 'NO_QUERIES' mode, to
// avoid needing to call `predicates_of`. This should
// only affect certain debug messages (e.g. messages printed
@@ -759,27 +762,27 @@ pub trait PrettyPrinter<'tcx>:
// and should have no effect on any compiler output.
if self.should_print_verbose() {
// FIXME(eddyb) print this with `print_def_path`.
- p!(write("Opaque({:?}, {:?})", def_id, substs));
+ p!(write("Opaque({:?}, {:?})", def_id, args));
return Ok(self);
}
let parent = self.tcx().parent(def_id);
match self.tcx().def_kind(parent) {
- DefKind::TyAlias | DefKind::AssocTy => {
+ DefKind::TyAlias { .. } | DefKind::AssocTy => {
// NOTE: I know we should check for NO_QUERIES here, but it's alright.
// `type_of` on a type alias or assoc type should never cause a cycle.
if let ty::Alias(ty::Opaque, ty::AliasTy { def_id: d, .. }) =
- *self.tcx().type_of(parent).subst_identity().kind()
+ *self.tcx().type_of(parent).instantiate_identity().kind()
{
if d == def_id {
// If the type alias directly starts with the `impl` of the
// opaque type we're printing, then skip the `::{opaque#1}`.
- p!(print_def_path(parent, substs));
+ p!(print_def_path(parent, args));
return Ok(self);
}
}
// Complex opaque type, e.g. `type Foo = (i32, impl Debug);`
- p!(print_def_path(def_id, substs));
+ p!(print_def_path(def_id, args));
return Ok(self);
}
_ => {
@@ -787,13 +790,13 @@ pub trait PrettyPrinter<'tcx>:
p!(print_def_path(def_id, &[]));
return Ok(self);
} else {
- return self.pretty_print_opaque_impl_type(def_id, substs);
+ return self.pretty_print_opaque_impl_type(def_id, args);
}
}
}
}
ty::Str => p!("str"),
- ty::Generator(did, substs, movability) => {
+ ty::Generator(did, args, movability) => {
p!(write("["));
let generator_kind = self.tcx().generator_kind(did).unwrap();
let should_print_movability =
@@ -818,20 +821,20 @@ pub trait PrettyPrinter<'tcx>:
self.tcx().sess.source_map().span_to_embeddable_string(span)
));
} else {
- p!(write("@"), print_def_path(did, substs));
+ p!(write("@"), print_def_path(did, args));
}
} else {
- p!(print_def_path(did, substs));
+ p!(print_def_path(did, args));
p!(" upvar_tys=(");
- if !substs.as_generator().is_valid() {
+ if !args.as_generator().is_valid() {
p!("unavailable");
} else {
- self = self.comma_sep(substs.as_generator().upvar_tys())?;
+ self = self.comma_sep(args.as_generator().upvar_tys().iter())?;
}
p!(")");
- if substs.as_generator().is_valid() {
- p!(" ", print(substs.as_generator().witness()));
+ if args.as_generator().is_valid() {
+ p!(" ", print(args.as_generator().witness()));
}
}
@@ -840,7 +843,7 @@ pub trait PrettyPrinter<'tcx>:
ty::GeneratorWitness(types) => {
p!(in_binder(&types));
}
- ty::GeneratorWitnessMIR(did, substs) => {
+ ty::GeneratorWitnessMIR(did, args) => {
p!(write("["));
if !self.tcx().sess.verbose() {
p!("generator witness");
@@ -854,22 +857,22 @@ pub trait PrettyPrinter<'tcx>:
self.tcx().sess.source_map().span_to_embeddable_string(span)
));
} else {
- p!(write("@"), print_def_path(did, substs));
+ p!(write("@"), print_def_path(did, args));
}
} else {
- p!(print_def_path(did, substs));
+ p!(print_def_path(did, args));
}
p!("]")
}
- ty::Closure(did, substs) => {
+ ty::Closure(did, args) => {
p!(write("["));
if !self.should_print_verbose() {
p!(write("closure"));
// FIXME(eddyb) should use `def_span`.
if let Some(did) = did.as_local() {
if self.tcx().sess.opts.unstable_opts.span_free_formats {
- p!("@", print_def_path(did.to_def_id(), substs));
+ p!("@", print_def_path(did.to_def_id(), args));
} else {
let span = self.tcx().def_span(did);
let preference = if FORCE_TRIMMED_PATH.with(|flag| flag.get()) {
@@ -885,21 +888,21 @@ pub trait PrettyPrinter<'tcx>:
));
}
} else {
- p!(write("@"), print_def_path(did, substs));
+ p!(write("@"), print_def_path(did, args));
}
} else {
- p!(print_def_path(did, substs));
- if !substs.as_closure().is_valid() {
- p!(" closure_substs=(unavailable)");
- p!(write(" substs={:?}", substs));
+ p!(print_def_path(did, args));
+ if !args.as_closure().is_valid() {
+ p!(" closure_args=(unavailable)");
+ p!(write(" args={:?}", args));
} else {
- p!(" closure_kind_ty=", print(substs.as_closure().kind_ty()));
+ p!(" closure_kind_ty=", print(args.as_closure().kind_ty()));
p!(
" closure_sig_as_fn_ptr_ty=",
- print(substs.as_closure().sig_as_fn_ptr_ty())
+ print(args.as_closure().sig_as_fn_ptr_ty())
);
p!(" upvar_tys=(");
- self = self.comma_sep(substs.as_closure().upvar_tys())?;
+ self = self.comma_sep(args.as_closure().upvar_tys().iter())?;
p!(")");
}
}
@@ -915,7 +918,7 @@ pub trait PrettyPrinter<'tcx>:
fn pretty_print_opaque_impl_type(
mut self,
def_id: DefId,
- substs: &'tcx ty::List<ty::GenericArg<'tcx>>,
+ args: &'tcx ty::List<ty::GenericArg<'tcx>>,
) -> Result<Self::Type, Self::Error> {
let tcx = self.tcx();
@@ -928,7 +931,7 @@ pub trait PrettyPrinter<'tcx>:
let mut is_sized = false;
let mut lifetimes = SmallVec::<[ty::Region<'tcx>; 1]>::new();
- for (predicate, _) in bounds.subst_iter_copied(tcx, substs) {
+ for (predicate, _) in bounds.iter_instantiated_copied(tcx, args) {
let bound_predicate = predicate.kind();
match bound_predicate.skip_binder() {
@@ -978,9 +981,9 @@ pub trait PrettyPrinter<'tcx>:
define_scoped_cx!(cx);
// Get the (single) generic ty (the args) of this FnOnce trait ref.
let generics = tcx.generics_of(trait_ref.def_id);
- let args = generics.own_substs_no_defaults(tcx, trait_ref.substs);
+ let own_args = generics.own_args_no_defaults(tcx, trait_ref.args);
- match (entry.return_ty, args[0].expect_ty()) {
+ match (entry.return_ty, own_args[0].expect_ty()) {
// We can only print `impl Fn() -> ()` if we have a tuple of args and we recorded
// a return type.
(Some(return_ty), arg_tys) if matches!(arg_tys.kind(), ty::Tuple(_)) => {
@@ -1044,12 +1047,12 @@ pub trait PrettyPrinter<'tcx>:
p!(print(trait_ref.print_only_trait_name()));
let generics = tcx.generics_of(trait_ref.def_id);
- let args = generics.own_substs_no_defaults(tcx, trait_ref.substs);
+ let own_args = generics.own_args_no_defaults(tcx, trait_ref.args);
- if !args.is_empty() || !assoc_items.is_empty() {
+ if !own_args.is_empty() || !assoc_items.is_empty() {
let mut first = true;
- for ty in args {
+ for ty in own_args {
if first {
p!("<");
first = false;
@@ -1068,8 +1071,8 @@ pub trait PrettyPrinter<'tcx>:
&& assoc.trait_container(tcx) == tcx.lang_items().gen_trait()
&& assoc.name == rustc_span::sym::Return
{
- if let ty::Generator(_, substs, _) = substs.type_at(0).kind() {
- let return_ty = substs.as_generator().return_ty();
+ if let ty::Generator(_, args, _) = args.type_at(0).kind() {
+ let return_ty = args.as_generator().return_ty();
if !return_ty.is_ty_var() {
return_ty.into()
} else {
@@ -1182,7 +1185,7 @@ pub trait PrettyPrinter<'tcx>:
&def_key.disambiguated_data,
)
},
- &alias_ty.substs[1..],
+ &alias_ty.args[1..],
)
}
@@ -1211,7 +1214,7 @@ pub trait PrettyPrinter<'tcx>:
// Special-case `Fn(...) -> ...` and re-sugar it.
let fn_trait_kind = cx.tcx().fn_trait_kind_from_def_id(principal.def_id);
if !cx.should_print_verbose() && fn_trait_kind.is_some() {
- if let ty::Tuple(tys) = principal.substs.type_at(0).kind() {
+ if let ty::Tuple(tys) = principal.args.type_at(0).kind() {
let mut projections = predicates.projection_bounds();
if let (Some(proj), None) = (projections.next(), projections.next()) {
p!(pretty_fn_sig(
@@ -1234,7 +1237,7 @@ pub trait PrettyPrinter<'tcx>:
let args = cx
.tcx()
.generics_of(principal.def_id)
- .own_substs_no_defaults(cx.tcx(), principal.substs);
+ .own_args_no_defaults(cx.tcx(), principal.args);
let mut projections = predicates.projection_bounds();
@@ -1341,10 +1344,10 @@ pub trait PrettyPrinter<'tcx>:
}
match ct.kind() {
- ty::ConstKind::Unevaluated(ty::UnevaluatedConst { def, substs }) => {
+ ty::ConstKind::Unevaluated(ty::UnevaluatedConst { def, args }) => {
match self.tcx().def_kind(def) {
DefKind::Const | DefKind::AssocConst => {
- p!(print_value_path(def, substs))
+ p!(print_value_path(def, args))
}
DefKind::AnonConst => {
if def.is_local()
@@ -1449,7 +1452,7 @@ pub trait PrettyPrinter<'tcx>:
self.tcx().try_get_global_alloc(alloc_id)
{
self = self.typed_value(
- |this| this.print_value_path(instance.def_id(), instance.substs),
+ |this| this.print_value_path(instance.def_id(), instance.args),
|this| this.print_type(ty),
" as ",
)?;
@@ -1497,7 +1500,7 @@ pub trait PrettyPrinter<'tcx>:
let data = int.assert_bits(self.tcx().data_layout.pointer_size);
self = self.typed_value(
|mut this| {
- write!(this, "0x{:x}", data)?;
+ write!(this, "0x{data:x}")?;
Ok(this)
},
|this| this.print_type(ty),
@@ -1510,7 +1513,7 @@ pub trait PrettyPrinter<'tcx>:
if int.size() == Size::ZERO {
write!(this, "transmute(())")?;
} else {
- write!(this, "transmute(0x{:x})", int)?;
+ write!(this, "transmute(0x{int:x})")?;
}
Ok(this)
};
@@ -1619,11 +1622,11 @@ pub trait PrettyPrinter<'tcx>:
": ",
)?;
}
- ty::Adt(def, substs) => {
+ ty::Adt(def, args) => {
let variant_idx =
contents.variant.expect("destructed const of adt without variant idx");
let variant_def = &def.variant(variant_idx);
- p!(print_value_path(variant_def.def_id, substs));
+ p!(print_value_path(variant_def.def_id, args));
match variant_def.ctor_kind() {
Some(CtorKind::Const) => {}
Some(CtorKind::Fn) => {
@@ -1673,7 +1676,7 @@ pub trait PrettyPrinter<'tcx>:
fn pretty_closure_as_impl(
mut self,
- closure: ty::ClosureSubsts<'tcx>,
+ closure: ty::ClosureArgs<'tcx>,
) -> Result<Self::Const, Self::Error> {
let sig = closure.sig();
let kind = closure.kind_ty().to_opt_closure_kind().unwrap_or(ty::ClosureKind::Fn);
@@ -1798,29 +1801,29 @@ impl<'t> TyCtxt<'t> {
/// Returns a string identifying this `DefId`. This string is
/// suitable for user output.
pub fn def_path_str(self, def_id: impl IntoQueryParam<DefId>) -> String {
- self.def_path_str_with_substs(def_id, &[])
+ self.def_path_str_with_args(def_id, &[])
}
- pub fn def_path_str_with_substs(
+ pub fn def_path_str_with_args(
self,
def_id: impl IntoQueryParam<DefId>,
- substs: &'t [GenericArg<'t>],
+ args: &'t [GenericArg<'t>],
) -> String {
let def_id = def_id.into_query_param();
let ns = guess_def_namespace(self, def_id);
debug!("def_path_str: def_id={:?}, ns={:?}", def_id, ns);
- FmtPrinter::new(self, ns).print_def_path(def_id, substs).unwrap().into_buffer()
+ FmtPrinter::new(self, ns).print_def_path(def_id, args).unwrap().into_buffer()
}
- pub fn value_path_str_with_substs(
+ pub fn value_path_str_with_args(
self,
def_id: impl IntoQueryParam<DefId>,
- substs: &'t [GenericArg<'t>],
+ args: &'t [GenericArg<'t>],
) -> String {
let def_id = def_id.into_query_param();
let ns = guess_def_namespace(self, def_id);
debug!("value_path_str: def_id={:?}, ns={:?}", def_id, ns);
- FmtPrinter::new(self, ns).print_value_path(def_id, substs).unwrap().into_buffer()
+ FmtPrinter::new(self, ns).print_value_path(def_id, args).unwrap().into_buffer()
}
}
@@ -1847,11 +1850,11 @@ impl<'tcx> Printer<'tcx> for FmtPrinter<'_, 'tcx> {
fn print_def_path(
mut self,
def_id: DefId,
- substs: &'tcx [GenericArg<'tcx>],
+ args: &'tcx [GenericArg<'tcx>],
) -> Result<Self::Path, Self::Error> {
define_scoped_cx!(self);
- if substs.is_empty() {
+ if args.is_empty() {
match self.try_print_trimmed_def_path(def_id)? {
(cx, true) => return Ok(cx),
(cx, false) => self = cx,
@@ -1900,7 +1903,7 @@ impl<'tcx> Printer<'tcx> for FmtPrinter<'_, 'tcx> {
}
}
- self.default_print_def_path(def_id, substs)
+ self.default_print_def_path(def_id, args)
}
fn print_region(self, region: ty::Region<'tcx>) -> Result<Self::Region, Self::Error> {
@@ -1932,7 +1935,7 @@ impl<'tcx> Printer<'tcx> for FmtPrinter<'_, 'tcx> {
fn path_crate(mut self, cnum: CrateNum) -> Result<Self::Path, Self::Error> {
self.empty_path = true;
if cnum == LOCAL_CRATE {
- if self.tcx.sess.rust_2018() {
+ if self.tcx.sess.at_least_rust_2018() {
// We add the `crate::` keyword on Rust 2018, only when desired.
if SHOULD_PREFIX_WITH_CRATE.with(|flag| flag.get()) {
write!(self, "{}", kw::Crate)?;
@@ -2017,11 +2020,37 @@ impl<'tcx> Printer<'tcx> for FmtPrinter<'_, 'tcx> {
) -> Result<Self::Path, Self::Error> {
self = print_prefix(self)?;
- if args.first().is_some() {
+ let tcx = self.tcx;
+
+ let args = args.iter().copied();
+
+ let args: Vec<_> = if !tcx.sess.verbose() {
+ // skip host param as those are printed as `~const`
+ args.filter(|arg| match arg.unpack() {
+ // FIXME(effects) there should be a better way than just matching the name
+ GenericArgKind::Const(c)
+ if tcx.features().effects
+ && matches!(
+ c.kind(),
+ ty::ConstKind::Param(ty::ParamConst { name: sym::host, .. })
+ ) =>
+ {
+ false
+ }
+ _ => true,
+ })
+ .collect()
+ } else {
+ // If -Zverbose is passed, we should print the host parameter instead
+ // of eating it.
+ args.collect()
+ };
+
+ if !args.is_empty() {
if self.in_value {
write!(self, "::")?;
}
- self.generic_delimiters(|cx| cx.comma_sep(args.iter().cloned()))
+ self.generic_delimiters(|cx| cx.comma_sep(args.into_iter()))
} else {
Ok(self)
}
@@ -2044,10 +2073,10 @@ impl<'tcx> PrettyPrinter<'tcx> for FmtPrinter<'_, 'tcx> {
fn print_value_path(
mut self,
def_id: DefId,
- substs: &'tcx [GenericArg<'tcx>],
+ args: &'tcx [GenericArg<'tcx>],
) -> Result<Self::Path, Self::Error> {
let was_in_value = std::mem::replace(&mut self.in_value, true);
- self = self.print_def_path(def_id, substs)?;
+ self = self.print_def_path(def_id, args)?;
self.in_value = was_in_value;
Ok(self)
@@ -2348,10 +2377,10 @@ impl<'tcx> FmtPrinter<'_, 'tcx> {
} else {
cont
};
- let _ = write!(cx, "{}", w);
+ let _ = write!(cx, "{w}");
};
let do_continue = |cx: &mut Self, cont: Symbol| {
- let _ = write!(cx, "{}", cont);
+ let _ = write!(cx, "{cont}");
};
define_scoped_cx!(self);
@@ -2387,7 +2416,7 @@ impl<'tcx> FmtPrinter<'_, 'tcx> {
let (new_value, map) = if self.should_print_verbose() {
for var in value.bound_vars().iter() {
start_or_continue(&mut self, "for<", ", ");
- write!(self, "{:?}", var)?;
+ write!(self, "{var:?}")?;
}
start_or_continue(&mut self, "", "> ");
(value.clone().skip_binder(), BTreeMap::default())
@@ -2695,7 +2724,7 @@ impl<'tcx> ty::PolyTraitPredicate<'tcx> {
#[derive(Debug, Copy, Clone, Lift)]
pub struct PrintClosureAsImpl<'tcx> {
- pub closure: ty::ClosureSubsts<'tcx>,
+ pub closure: ty::ClosureArgs<'tcx>,
}
forward_display_to_print! {
@@ -2707,8 +2736,9 @@ forward_display_to_print! {
// HACK(eddyb) these are exhaustive instead of generic,
// because `for<'tcx>` isn't possible yet.
ty::PolyExistentialPredicate<'tcx>,
+ ty::PolyExistentialProjection<'tcx>,
+ ty::PolyExistentialTraitRef<'tcx>,
ty::Binder<'tcx, ty::TraitRef<'tcx>>,
- ty::Binder<'tcx, ty::ExistentialTraitRef<'tcx>>,
ty::Binder<'tcx, TraitRefPrintOnlyTraitPath<'tcx>>,
ty::Binder<'tcx, TraitRefPrintOnlyTraitName<'tcx>>,
ty::Binder<'tcx, ty::FnSig<'tcx>>,
@@ -2771,7 +2801,7 @@ define_print_and_forward_display! {
}
TraitRefPrintOnlyTraitPath<'tcx> {
- p!(print_def_path(self.0.def_id, self.0.substs));
+ p!(print_def_path(self.0.def_id, self.0.args));
}
TraitRefPrintOnlyTraitName<'tcx> {
@@ -2779,10 +2809,7 @@ define_print_and_forward_display! {
}
TraitPredPrintModifiersAndPath<'tcx> {
- if let ty::BoundConstness::ConstIfConst = self.0.constness {
- p!("~const ")
- }
-
+ // FIXME(effects) print `~const` here
if let ty::ImplPolarity::Negative = self.0.polarity {
p!("!")
}
@@ -2816,9 +2843,12 @@ define_print_and_forward_display! {
ty::TraitPredicate<'tcx> {
p!(print(self.trait_ref.self_ty()), ": ");
- if let ty::BoundConstness::ConstIfConst = self.constness && cx.tcx().features().const_trait_impl {
- p!("~const ");
+ if let Some(idx) = cx.tcx().generics_of(self.trait_ref.def_id).host_effect_index {
+ if self.trait_ref.args.const_at(idx) != cx.tcx().consts.true_ {
+ p!("~const ");
+ }
}
+ // FIXME(effects) print `~const` here
if let ty::ImplPolarity::Negative = self.polarity {
p!("!");
}
@@ -2842,16 +2872,12 @@ define_print_and_forward_display! {
if let DefKind::Impl { of_trait: false } = cx.tcx().def_kind(cx.tcx().parent(self.def_id)) {
p!(pretty_print_inherent_projection(self))
} else {
- p!(print_def_path(self.def_id, self.substs));
+ p!(print_def_path(self.def_id, self.args));
}
}
ty::ClosureKind {
- match *self {
- ty::ClosureKind::Fn => p!("Fn"),
- ty::ClosureKind::FnMut => p!("FnMut"),
- ty::ClosureKind::FnOnce => p!("FnOnce"),
- }
+ p!(write("{}", self.as_str()))
}
ty::Predicate<'tcx> {
@@ -2891,7 +2917,7 @@ define_print_and_forward_display! {
ty::PredicateKind::ObjectSafe(trait_def_id) => {
p!("the trait `", print_def_path(trait_def_id, &[]), "` is object-safe")
}
- ty::PredicateKind::ClosureKind(closure_def_id, _closure_substs, kind) => p!(
+ ty::PredicateKind::ClosureKind(closure_def_id, _closure_args, kind) => p!(
"the closure `",
print_value_path(closure_def_id, &[]),
write("` implements the trait `{}`", kind)
@@ -2960,7 +2986,7 @@ fn for_each_def(tcx: TyCtxt<'_>, mut collect_fn: impl for<'b> FnMut(&'b Ident, N
match child.res {
def::Res::Def(DefKind::AssocTy, _) => {}
- def::Res::Def(DefKind::TyAlias, _) => {}
+ def::Res::Def(DefKind::TyAlias { .. }, _) => {}
def::Res::Def(defkind, def_id) => {
if let Some(ns) = defkind.ns() {
collect_fn(&child.ident, ns, def_id);
diff --git a/compiler/rustc_middle/src/ty/relate.rs b/compiler/rustc_middle/src/ty/relate.rs
index 5741832c9..47512d350 100644
--- a/compiler/rustc_middle/src/ty/relate.rs
+++ b/compiler/rustc_middle/src/ty/relate.rs
@@ -6,7 +6,7 @@
use crate::ty::error::{ExpectedFound, TypeError};
use crate::ty::{self, Expr, ImplSubject, Term, TermKind, Ty, TyCtxt, TypeFoldable};
-use crate::ty::{GenericArg, GenericArgKind, SubstsRef};
+use crate::ty::{GenericArg, GenericArgKind, GenericArgsRef};
use rustc_hir as hir;
use rustc_hir::def_id::DefId;
use rustc_target::spec::abi;
@@ -43,23 +43,23 @@ pub trait TypeRelation<'tcx>: Sized {
Relate::relate(self, a, b)
}
- /// Relate the two substitutions for the given item. The default
+ /// Relate the two args for the given item. The default
/// is to look up the variance for the item and proceed
/// accordingly.
- fn relate_item_substs(
+ fn relate_item_args(
&mut self,
item_def_id: DefId,
- a_subst: SubstsRef<'tcx>,
- b_subst: SubstsRef<'tcx>,
- ) -> RelateResult<'tcx, SubstsRef<'tcx>> {
+ a_arg: GenericArgsRef<'tcx>,
+ b_arg: GenericArgsRef<'tcx>,
+ ) -> RelateResult<'tcx, GenericArgsRef<'tcx>> {
debug!(
- "relate_item_substs(item_def_id={:?}, a_subst={:?}, b_subst={:?})",
- item_def_id, a_subst, b_subst
+ "relate_item_args(item_def_id={:?}, a_arg={:?}, b_arg={:?})",
+ item_def_id, a_arg, b_arg
);
let tcx = self.tcx();
let opt_variances = tcx.variances_of(item_def_id);
- relate_substs_with_variances(self, item_def_id, opt_variances, a_subst, b_subst, true)
+ relate_args_with_variances(self, item_def_id, opt_variances, a_arg, b_arg, true)
}
/// Switch variance for the purpose of relating `a` and `b`.
@@ -134,31 +134,32 @@ pub fn relate_type_and_mut<'tcx, R: TypeRelation<'tcx>>(
}
#[inline]
-pub fn relate_substs<'tcx, R: TypeRelation<'tcx>>(
+pub fn relate_args<'tcx, R: TypeRelation<'tcx>>(
relation: &mut R,
- a_subst: SubstsRef<'tcx>,
- b_subst: SubstsRef<'tcx>,
-) -> RelateResult<'tcx, SubstsRef<'tcx>> {
- relation.tcx().mk_substs_from_iter(iter::zip(a_subst, b_subst).map(|(a, b)| {
+ a_arg: GenericArgsRef<'tcx>,
+ b_arg: GenericArgsRef<'tcx>,
+) -> RelateResult<'tcx, GenericArgsRef<'tcx>> {
+ relation.tcx().mk_args_from_iter(iter::zip(a_arg, b_arg).map(|(a, b)| {
relation.relate_with_variance(ty::Invariant, ty::VarianceDiagInfo::default(), a, b)
}))
}
-pub fn relate_substs_with_variances<'tcx, R: TypeRelation<'tcx>>(
+pub fn relate_args_with_variances<'tcx, R: TypeRelation<'tcx>>(
relation: &mut R,
ty_def_id: DefId,
variances: &[ty::Variance],
- a_subst: SubstsRef<'tcx>,
- b_subst: SubstsRef<'tcx>,
+ a_arg: GenericArgsRef<'tcx>,
+ b_arg: GenericArgsRef<'tcx>,
fetch_ty_for_diag: bool,
-) -> RelateResult<'tcx, SubstsRef<'tcx>> {
+) -> RelateResult<'tcx, GenericArgsRef<'tcx>> {
let tcx = relation.tcx();
let mut cached_ty = None;
- let params = iter::zip(a_subst, b_subst).enumerate().map(|(i, (a, b))| {
+ let params = iter::zip(a_arg, b_arg).enumerate().map(|(i, (a, b))| {
let variance = variances[i];
let variance_info = if variance == ty::Invariant && fetch_ty_for_diag {
- let ty = *cached_ty.get_or_insert_with(|| tcx.type_of(ty_def_id).subst(tcx, a_subst));
+ let ty =
+ *cached_ty.get_or_insert_with(|| tcx.type_of(ty_def_id).instantiate(tcx, a_arg));
ty::VarianceDiagInfo::Invariant { ty, param_index: i.try_into().unwrap() }
} else {
ty::VarianceDiagInfo::default()
@@ -166,7 +167,7 @@ pub fn relate_substs_with_variances<'tcx, R: TypeRelation<'tcx>>(
relation.relate_with_variance(variance, variance_info, a, b)
});
- tcx.mk_substs_from_iter(params)
+ tcx.mk_args_from_iter(params)
}
impl<'tcx> Relate<'tcx> for ty::FnSig<'tcx> {
@@ -272,8 +273,8 @@ impl<'tcx> Relate<'tcx> for ty::AliasTy<'tcx> {
if a.def_id != b.def_id {
Err(TypeError::ProjectionMismatched(expected_found(relation, a.def_id, b.def_id)))
} else {
- let substs = relation.relate(a.substs, b.substs)?;
- Ok(relation.tcx().mk_alias_ty(a.def_id, substs))
+ let args = relation.relate(a.args, b.args)?;
+ Ok(relation.tcx().mk_alias_ty(a.def_id, args))
}
}
}
@@ -293,13 +294,13 @@ impl<'tcx> Relate<'tcx> for ty::ExistentialProjection<'tcx> {
a.term,
b.term,
)?;
- let substs = relation.relate_with_variance(
+ let args = relation.relate_with_variance(
ty::Invariant,
ty::VarianceDiagInfo::default(),
- a.substs,
- b.substs,
+ a.args,
+ b.args,
)?;
- Ok(ty::ExistentialProjection { def_id: a.def_id, substs, term })
+ Ok(ty::ExistentialProjection { def_id: a.def_id, args, term })
}
}
}
@@ -314,8 +315,8 @@ impl<'tcx> Relate<'tcx> for ty::TraitRef<'tcx> {
if a.def_id != b.def_id {
Err(TypeError::Traits(expected_found(relation, a.def_id, b.def_id)))
} else {
- let substs = relate_substs(relation, a.substs, b.substs)?;
- Ok(ty::TraitRef::new(relation.tcx(), a.def_id, substs))
+ let args = relate_args(relation, a.args, b.args)?;
+ Ok(ty::TraitRef::new(relation.tcx(), a.def_id, args))
}
}
}
@@ -330,8 +331,8 @@ impl<'tcx> Relate<'tcx> for ty::ExistentialTraitRef<'tcx> {
if a.def_id != b.def_id {
Err(TypeError::Traits(expected_found(relation, a.def_id, b.def_id)))
} else {
- let substs = relate_substs(relation, a.substs, b.substs)?;
- Ok(ty::ExistentialTraitRef { def_id: a.def_id, substs })
+ let args = relate_args(relation, a.args, b.args)?;
+ Ok(ty::ExistentialTraitRef { def_id: a.def_id, args })
}
}
}
@@ -426,9 +427,9 @@ pub fn structurally_relate_tys<'tcx, R: TypeRelation<'tcx>>(
(ty::Placeholder(p1), ty::Placeholder(p2)) if p1 == p2 => Ok(a),
- (&ty::Adt(a_def, a_substs), &ty::Adt(b_def, b_substs)) if a_def == b_def => {
- let substs = relation.relate_item_substs(a_def.did(), a_substs, b_substs)?;
- Ok(Ty::new_adt(tcx, a_def, substs))
+ (&ty::Adt(a_def, a_args), &ty::Adt(b_def, b_args)) if a_def == b_def => {
+ let args = relation.relate_item_args(a_def.did(), a_args, b_args)?;
+ Ok(Ty::new_adt(tcx, a_def, args))
}
(&ty::Foreign(a_id), &ty::Foreign(b_id)) if a_id == b_id => Ok(Ty::new_foreign(tcx, a_id)),
@@ -442,14 +443,14 @@ pub fn structurally_relate_tys<'tcx, R: TypeRelation<'tcx>>(
Ok(Ty::new_dynamic(tcx, relation.relate(a_obj, b_obj)?, region_bound, a_repr))
}
- (&ty::Generator(a_id, a_substs, movability), &ty::Generator(b_id, b_substs, _))
+ (&ty::Generator(a_id, a_args, movability), &ty::Generator(b_id, b_args, _))
if a_id == b_id =>
{
// All Generator types with the same id represent
// the (anonymous) type of the same generator expression. So
// all of their regions should be equated.
- let substs = relation.relate(a_substs, b_substs)?;
- Ok(Ty::new_generator(tcx, a_id, substs, movability))
+ let args = relation.relate(a_args, b_args)?;
+ Ok(Ty::new_generator(tcx, a_id, args, movability))
}
(&ty::GeneratorWitness(a_types), &ty::GeneratorWitness(b_types)) => {
@@ -462,22 +463,22 @@ pub fn structurally_relate_tys<'tcx, R: TypeRelation<'tcx>>(
Ok(Ty::new_generator_witness(tcx, types))
}
- (&ty::GeneratorWitnessMIR(a_id, a_substs), &ty::GeneratorWitnessMIR(b_id, b_substs))
+ (&ty::GeneratorWitnessMIR(a_id, a_args), &ty::GeneratorWitnessMIR(b_id, b_args))
if a_id == b_id =>
{
// All GeneratorWitness types with the same id represent
// the (anonymous) type of the same generator expression. So
// all of their regions should be equated.
- let substs = relation.relate(a_substs, b_substs)?;
- Ok(Ty::new_generator_witness_mir(tcx, a_id, substs))
+ let args = relation.relate(a_args, b_args)?;
+ Ok(Ty::new_generator_witness_mir(tcx, a_id, args))
}
- (&ty::Closure(a_id, a_substs), &ty::Closure(b_id, b_substs)) if a_id == b_id => {
+ (&ty::Closure(a_id, a_args), &ty::Closure(b_id, b_args)) if a_id == b_id => {
// All Closure types with the same id represent
// the (anonymous) type of the same closure expression. So
// all of their regions should be equated.
- let substs = relation.relate(a_substs, b_substs)?;
- Ok(Ty::new_closure(tcx, a_id, &substs))
+ let args = relation.relate(a_args, b_args)?;
+ Ok(Ty::new_closure(tcx, a_id, &args))
}
(&ty::RawPtr(a_mt), &ty::RawPtr(b_mt)) => {
@@ -535,11 +536,9 @@ pub fn structurally_relate_tys<'tcx, R: TypeRelation<'tcx>>(
}
}
- (&ty::FnDef(a_def_id, a_substs), &ty::FnDef(b_def_id, b_substs))
- if a_def_id == b_def_id =>
- {
- let substs = relation.relate_item_substs(a_def_id, a_substs, b_substs)?;
- Ok(Ty::new_fn_def(tcx, a_def_id, substs))
+ (&ty::FnDef(a_def_id, a_args), &ty::FnDef(b_def_id, b_args)) if a_def_id == b_def_id => {
+ let args = relation.relate_item_args(a_def_id, a_args, b_args)?;
+ Ok(Ty::new_fn_def(tcx, a_def_id, args))
}
(&ty::FnPtr(a_fty), &ty::FnPtr(b_fty)) => {
@@ -547,35 +546,29 @@ pub fn structurally_relate_tys<'tcx, R: TypeRelation<'tcx>>(
Ok(Ty::new_fn_ptr(tcx, fty))
}
- // The substs of opaque types may not all be invariant, so we have
+ // The args of opaque types may not all be invariant, so we have
// to treat them separately from other aliases.
(
- &ty::Alias(ty::Opaque, ty::AliasTy { def_id: a_def_id, substs: a_substs, .. }),
- &ty::Alias(ty::Opaque, ty::AliasTy { def_id: b_def_id, substs: b_substs, .. }),
+ &ty::Alias(ty::Opaque, ty::AliasTy { def_id: a_def_id, args: a_args, .. }),
+ &ty::Alias(ty::Opaque, ty::AliasTy { def_id: b_def_id, args: b_args, .. }),
) if a_def_id == b_def_id => {
let opt_variances = tcx.variances_of(a_def_id);
- let substs = relate_substs_with_variances(
+ let args = relate_args_with_variances(
relation,
a_def_id,
opt_variances,
- a_substs,
- b_substs,
+ a_args,
+ b_args,
false, // do not fetch `type_of(a_def_id)`, as it will cause a cycle
)?;
- Ok(Ty::new_opaque(tcx, a_def_id, substs))
+ Ok(Ty::new_opaque(tcx, a_def_id, args))
}
// Alias tend to mostly already be handled downstream due to normalization.
(&ty::Alias(a_kind, a_data), &ty::Alias(b_kind, b_data)) => {
- // FIXME(-Zlower-impl-trait-in-trait-to-assoc-ty): This if can be removed
- // and the assert uncommented once the new desugaring is stable.
- if a_kind == b_kind {
- let alias_ty = relation.relate(a_data, b_data)?;
- // assert_eq!(a_kind, b_kind);
- Ok(Ty::new_alias(tcx, a_kind, alias_ty))
- } else {
- Err(TypeError::Sorts(expected_found(relation, a, b)))
- }
+ let alias_ty = relation.relate(a_data, b_data)?;
+ assert_eq!(a_kind, b_kind);
+ Ok(Ty::new_alias(tcx, a_kind, alias_ty))
}
_ => Err(TypeError::Sorts(expected_found(relation, a, b))),
@@ -624,15 +617,15 @@ pub fn structurally_relate_consts<'tcx, R: TypeRelation<'tcx>>(
// be stabilized.
(ty::ConstKind::Unevaluated(au), ty::ConstKind::Unevaluated(bu)) if au.def == bu.def => {
assert_eq!(a.ty(), b.ty());
- let substs = relation.relate_with_variance(
+ let args = relation.relate_with_variance(
ty::Variance::Invariant,
ty::VarianceDiagInfo::default(),
- au.substs,
- bu.substs,
+ au.args,
+ bu.args,
)?;
return Ok(ty::Const::new_unevaluated(
tcx,
- ty::UnevaluatedConst { def: au.def, substs },
+ ty::UnevaluatedConst { def: au.def, args },
a.ty(),
));
}
@@ -646,7 +639,7 @@ pub fn structurally_relate_consts<'tcx, R: TypeRelation<'tcx>>(
// FIXME(generic_const_exprs): relating the `ty()`s is a little weird since it is supposed to
// ICE If they mismatch. Unfortunately `ConstKind::Expr` is a little special and can be thought
// of as being generic over the argument types, however this is implicit so these types don't get
- // related when we relate the substs of the item this const arg is for.
+ // related when we relate the args of the item this const arg is for.
let expr = match (ae, be) {
(Expr::Binop(a_op, al, ar), Expr::Binop(b_op, bl, br)) if a_op == b_op => {
r.relate(al.ty(), bl.ty())?;
@@ -720,35 +713,35 @@ impl<'tcx> Relate<'tcx> for &'tcx ty::List<ty::PolyExistentialPredicate<'tcx>> {
}
}
-impl<'tcx> Relate<'tcx> for ty::ClosureSubsts<'tcx> {
+impl<'tcx> Relate<'tcx> for ty::ClosureArgs<'tcx> {
fn relate<R: TypeRelation<'tcx>>(
relation: &mut R,
- a: ty::ClosureSubsts<'tcx>,
- b: ty::ClosureSubsts<'tcx>,
- ) -> RelateResult<'tcx, ty::ClosureSubsts<'tcx>> {
- let substs = relate_substs(relation, a.substs, b.substs)?;
- Ok(ty::ClosureSubsts { substs })
+ a: ty::ClosureArgs<'tcx>,
+ b: ty::ClosureArgs<'tcx>,
+ ) -> RelateResult<'tcx, ty::ClosureArgs<'tcx>> {
+ let args = relate_args(relation, a.args, b.args)?;
+ Ok(ty::ClosureArgs { args })
}
}
-impl<'tcx> Relate<'tcx> for ty::GeneratorSubsts<'tcx> {
+impl<'tcx> Relate<'tcx> for ty::GeneratorArgs<'tcx> {
fn relate<R: TypeRelation<'tcx>>(
relation: &mut R,
- a: ty::GeneratorSubsts<'tcx>,
- b: ty::GeneratorSubsts<'tcx>,
- ) -> RelateResult<'tcx, ty::GeneratorSubsts<'tcx>> {
- let substs = relate_substs(relation, a.substs, b.substs)?;
- Ok(ty::GeneratorSubsts { substs })
+ a: ty::GeneratorArgs<'tcx>,
+ b: ty::GeneratorArgs<'tcx>,
+ ) -> RelateResult<'tcx, ty::GeneratorArgs<'tcx>> {
+ let args = relate_args(relation, a.args, b.args)?;
+ Ok(ty::GeneratorArgs { args })
}
}
-impl<'tcx> Relate<'tcx> for SubstsRef<'tcx> {
+impl<'tcx> Relate<'tcx> for GenericArgsRef<'tcx> {
fn relate<R: TypeRelation<'tcx>>(
relation: &mut R,
- a: SubstsRef<'tcx>,
- b: SubstsRef<'tcx>,
- ) -> RelateResult<'tcx, SubstsRef<'tcx>> {
- relate_substs(relation, a, b)
+ a: GenericArgsRef<'tcx>,
+ b: GenericArgsRef<'tcx>,
+ ) -> RelateResult<'tcx, GenericArgsRef<'tcx>> {
+ relate_args(relation, a, b)
}
}
@@ -833,7 +826,6 @@ impl<'tcx> Relate<'tcx> for ty::TraitPredicate<'tcx> {
) -> RelateResult<'tcx, ty::TraitPredicate<'tcx>> {
Ok(ty::TraitPredicate {
trait_ref: relation.relate(a.trait_ref, b.trait_ref)?,
- constness: relation.relate(a.constness, b.constness)?,
polarity: relation.relate(a.polarity, b.polarity)?,
})
}
diff --git a/compiler/rustc_middle/src/ty/rvalue_scopes.rs b/compiler/rustc_middle/src/ty/rvalue_scopes.rs
index e79b79a25..17eabec25 100644
--- a/compiler/rustc_middle/src/ty/rvalue_scopes.rs
+++ b/compiler/rustc_middle/src/ty/rvalue_scopes.rs
@@ -1,12 +1,12 @@
use crate::middle::region::{Scope, ScopeData, ScopeTree};
-use rustc_data_structures::fx::FxHashMap;
use rustc_hir as hir;
+use rustc_hir::ItemLocalMap;
/// `RvalueScopes` is a mapping from sub-expressions to _extended_ lifetime as determined by
/// rules laid out in `rustc_hir_analysis::check::rvalue_scopes`.
#[derive(TyEncodable, TyDecodable, Clone, Debug, Default, Eq, PartialEq, HashStable)]
pub struct RvalueScopes {
- map: FxHashMap<hir::ItemLocalId, Option<Scope>>,
+ map: ItemLocalMap<Option<Scope>>,
}
impl RvalueScopes {
diff --git a/compiler/rustc_middle/src/ty/structural_impls.rs b/compiler/rustc_middle/src/ty/structural_impls.rs
index 7220d133f..f979ddd00 100644
--- a/compiler/rustc_middle/src/ty/structural_impls.rs
+++ b/compiler/rustc_middle/src/ty/structural_impls.rs
@@ -11,13 +11,15 @@ use crate::ty::{self, AliasTy, InferConst, Lift, Term, TermKind, Ty, TyCtxt};
use rustc_hir::def::Namespace;
use rustc_index::{Idx, IndexVec};
use rustc_target::abi::TyAndLayout;
-use rustc_type_ir::ConstKind;
+use rustc_type_ir::{ConstKind, DebugWithInfcx, InferCtxtLike, OptWithInfcx};
-use std::fmt;
+use std::fmt::{self, Debug};
use std::ops::ControlFlow;
use std::rc::Rc;
use std::sync::Arc;
+use super::{GenericArg, GenericArgKind, Region};
+
impl fmt::Debug for ty::TraitDef {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
ty::tls::with(|tcx| {
@@ -71,9 +73,9 @@ impl fmt::Debug for ty::BoundRegionKind {
ty::BrAnon(span) => write!(f, "BrAnon({span:?})"),
ty::BrNamed(did, name) => {
if did.is_crate_root() {
- write!(f, "BrNamed({})", name)
+ write!(f, "BrNamed({name})")
} else {
- write!(f, "BrNamed({:?}, {})", did, name)
+ write!(f, "BrNamed({did:?}, {name})")
}
}
ty::BrEnv => write!(f, "BrEnv"),
@@ -89,7 +91,16 @@ impl fmt::Debug for ty::FreeRegion {
impl<'tcx> fmt::Debug for ty::FnSig<'tcx> {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
- let ty::FnSig { inputs_and_output: _, c_variadic, unsafety, abi } = self;
+ OptWithInfcx::new_no_ctx(self).fmt(f)
+ }
+}
+impl<'tcx> DebugWithInfcx<TyCtxt<'tcx>> for ty::FnSig<'tcx> {
+ fn fmt<InfCtx: InferCtxtLike<TyCtxt<'tcx>>>(
+ this: OptWithInfcx<'_, TyCtxt<'tcx>, InfCtx, &Self>,
+ f: &mut core::fmt::Formatter<'_>,
+ ) -> core::fmt::Result {
+ let sig = this.data;
+ let ty::FnSig { inputs_and_output: _, c_variadic, unsafety, abi } = sig;
write!(f, "{}", unsafety.prefix_str())?;
match abi {
@@ -98,15 +109,15 @@ impl<'tcx> fmt::Debug for ty::FnSig<'tcx> {
};
write!(f, "fn(")?;
- let inputs = self.inputs();
+ let inputs = sig.inputs();
match inputs.len() {
0 if *c_variadic => write!(f, "...)")?,
0 => write!(f, ")")?,
_ => {
- for ty in &self.inputs()[0..(self.inputs().len() - 1)] {
- write!(f, "{ty:?}, ")?;
+ for ty in &sig.inputs()[0..(sig.inputs().len() - 1)] {
+ write!(f, "{:?}, ", &this.wrap(ty))?;
}
- write!(f, "{:?}", self.inputs().last().unwrap())?;
+ write!(f, "{:?}", &this.wrap(sig.inputs().last().unwrap()))?;
if *c_variadic {
write!(f, "...")?;
}
@@ -114,9 +125,9 @@ impl<'tcx> fmt::Debug for ty::FnSig<'tcx> {
}
}
- match self.output().kind() {
+ match sig.output().kind() {
ty::Tuple(list) if list.is_empty() => Ok(()),
- _ => write!(f, " -> {:?}", self.output()),
+ _ => write!(f, " -> {:?}", &this.wrap(sig.output())),
}
}
}
@@ -133,6 +144,14 @@ impl<'tcx> fmt::Debug for ty::TraitRef<'tcx> {
}
}
+impl<'tcx> ty::DebugWithInfcx<TyCtxt<'tcx>> for Ty<'tcx> {
+ fn fmt<InfCtx: InferCtxtLike<TyCtxt<'tcx>>>(
+ this: OptWithInfcx<'_, TyCtxt<'tcx>, InfCtx, &Self>,
+ f: &mut core::fmt::Formatter<'_>,
+ ) -> core::fmt::Result {
+ this.data.fmt(f)
+ }
+}
impl<'tcx> fmt::Debug for Ty<'tcx> {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
with_no_trimmed_paths!(fmt::Display::fmt(self, f))
@@ -153,9 +172,7 @@ impl fmt::Debug for ty::ParamConst {
impl<'tcx> fmt::Debug for ty::TraitPredicate<'tcx> {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
- if let ty::BoundConstness::ConstIfConst = self.constness {
- write!(f, "~const ")?;
- }
+ // FIXME(effects) printing?
write!(f, "TraitPredicate({:?}, polarity:{:?})", self.trait_ref, self.polarity)
}
}
@@ -186,7 +203,7 @@ impl<'tcx> fmt::Debug for ty::ClauseKind<'tcx> {
ty::ClauseKind::RegionOutlives(ref pair) => pair.fmt(f),
ty::ClauseKind::TypeOutlives(ref pair) => pair.fmt(f),
ty::ClauseKind::Projection(ref pair) => pair.fmt(f),
- ty::ClauseKind::WellFormed(ref data) => write!(f, "WellFormed({:?})", data),
+ ty::ClauseKind::WellFormed(ref data) => write!(f, "WellFormed({data:?})"),
ty::ClauseKind::ConstEvaluatable(ct) => {
write!(f, "ConstEvaluatable({ct:?})")
}
@@ -201,12 +218,12 @@ impl<'tcx> fmt::Debug for ty::PredicateKind<'tcx> {
ty::PredicateKind::Subtype(ref pair) => pair.fmt(f),
ty::PredicateKind::Coerce(ref pair) => pair.fmt(f),
ty::PredicateKind::ObjectSafe(trait_def_id) => {
- write!(f, "ObjectSafe({:?})", trait_def_id)
+ write!(f, "ObjectSafe({trait_def_id:?})")
}
- ty::PredicateKind::ClosureKind(closure_def_id, closure_substs, kind) => {
- write!(f, "ClosureKind({:?}, {:?}, {:?})", closure_def_id, closure_substs, kind)
+ ty::PredicateKind::ClosureKind(closure_def_id, closure_args, kind) => {
+ write!(f, "ClosureKind({closure_def_id:?}, {closure_args:?}, {kind:?})")
}
- ty::PredicateKind::ConstEquate(c1, c2) => write!(f, "ConstEquate({:?}, {:?})", c1, c2),
+ ty::PredicateKind::ConstEquate(c1, c2) => write!(f, "ConstEquate({c1:?}, {c2:?})"),
ty::PredicateKind::Ambiguous => write!(f, "Ambiguous"),
ty::PredicateKind::AliasRelate(t1, t2, dir) => {
write!(f, "AliasRelate({t1:?}, {dir:?}, {t2:?})")
@@ -217,9 +234,17 @@ impl<'tcx> fmt::Debug for ty::PredicateKind<'tcx> {
impl<'tcx> fmt::Debug for AliasTy<'tcx> {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ OptWithInfcx::new_no_ctx(self).fmt(f)
+ }
+}
+impl<'tcx> DebugWithInfcx<TyCtxt<'tcx>> for AliasTy<'tcx> {
+ fn fmt<InfCtx: InferCtxtLike<TyCtxt<'tcx>>>(
+ this: OptWithInfcx<'_, TyCtxt<'tcx>, InfCtx, &Self>,
+ f: &mut core::fmt::Formatter<'_>,
+ ) -> core::fmt::Result {
f.debug_struct("AliasTy")
- .field("substs", &self.substs)
- .field("def_id", &self.def_id)
+ .field("args", &this.map(|data| data.args))
+ .field("def_id", &this.data.def_id)
.finish()
}
}
@@ -232,13 +257,93 @@ impl<'tcx> fmt::Debug for ty::InferConst<'tcx> {
}
}
}
+impl<'tcx> DebugWithInfcx<TyCtxt<'tcx>> for ty::InferConst<'tcx> {
+ fn fmt<InfCtx: InferCtxtLike<TyCtxt<'tcx>>>(
+ this: OptWithInfcx<'_, TyCtxt<'tcx>, InfCtx, &Self>,
+ f: &mut core::fmt::Formatter<'_>,
+ ) -> core::fmt::Result {
+ use ty::InferConst::*;
+ match this.infcx.and_then(|infcx| infcx.universe_of_ct(*this.data)) {
+ None => write!(f, "{:?}", this.data),
+ Some(universe) => match *this.data {
+ Var(vid) => write!(f, "?{}_{}c", vid.index, universe.index()),
+ Fresh(_) => {
+ unreachable!()
+ }
+ },
+ }
+ }
+}
+
+impl<'tcx> fmt::Debug for ty::consts::Expr<'tcx> {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ OptWithInfcx::new_no_ctx(self).fmt(f)
+ }
+}
+impl<'tcx> DebugWithInfcx<TyCtxt<'tcx>> for ty::consts::Expr<'tcx> {
+ fn fmt<InfCtx: InferCtxtLike<TyCtxt<'tcx>>>(
+ this: OptWithInfcx<'_, TyCtxt<'tcx>, InfCtx, &Self>,
+ f: &mut core::fmt::Formatter<'_>,
+ ) -> core::fmt::Result {
+ match this.data {
+ ty::Expr::Binop(op, lhs, rhs) => {
+ write!(f, "({op:?}: {:?}, {:?})", &this.wrap(lhs), &this.wrap(rhs))
+ }
+ ty::Expr::UnOp(op, rhs) => write!(f, "({op:?}: {:?})", &this.wrap(rhs)),
+ ty::Expr::FunctionCall(func, args) => {
+ write!(f, "{:?}(", &this.wrap(func))?;
+ for arg in args.as_slice().iter().rev().skip(1).rev() {
+ write!(f, "{:?}, ", &this.wrap(arg))?;
+ }
+ if let Some(arg) = args.last() {
+ write!(f, "{:?}", &this.wrap(arg))?;
+ }
+
+ write!(f, ")")
+ }
+ ty::Expr::Cast(cast_kind, lhs, rhs) => {
+ write!(f, "({cast_kind:?}: {:?}, {:?})", &this.wrap(lhs), &this.wrap(rhs))
+ }
+ }
+ }
+}
+
+impl<'tcx> fmt::Debug for ty::UnevaluatedConst<'tcx> {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ OptWithInfcx::new_no_ctx(self).fmt(f)
+ }
+}
+impl<'tcx> DebugWithInfcx<TyCtxt<'tcx>> for ty::UnevaluatedConst<'tcx> {
+ fn fmt<InfCtx: InferCtxtLike<TyCtxt<'tcx>>>(
+ this: OptWithInfcx<'_, TyCtxt<'tcx>, InfCtx, &Self>,
+ f: &mut core::fmt::Formatter<'_>,
+ ) -> core::fmt::Result {
+ f.debug_struct("UnevaluatedConst")
+ .field("def", &this.data.def)
+ .field("args", &this.wrap(this.data.args))
+ .finish()
+ }
+}
impl<'tcx> fmt::Debug for ty::Const<'tcx> {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ OptWithInfcx::new_no_ctx(self).fmt(f)
+ }
+}
+impl<'tcx> DebugWithInfcx<TyCtxt<'tcx>> for ty::Const<'tcx> {
+ fn fmt<InfCtx: InferCtxtLike<TyCtxt<'tcx>>>(
+ this: OptWithInfcx<'_, TyCtxt<'tcx>, InfCtx, &Self>,
+ f: &mut core::fmt::Formatter<'_>,
+ ) -> core::fmt::Result {
// This reflects what `Const` looked liked before `Interned` was
// introduced. We print it like this to avoid having to update expected
// output in a lot of tests.
- write!(f, "Const {{ ty: {:?}, kind: {:?} }}", self.ty(), self.kind())
+ write!(
+ f,
+ "Const {{ ty: {:?}, kind: {:?} }}",
+ &this.map(|data| data.ty()),
+ &this.map(|data| data.kind())
+ )
}
}
@@ -261,6 +366,66 @@ impl<T: fmt::Debug> fmt::Debug for ty::Placeholder<T> {
}
}
+impl<'tcx> fmt::Debug for GenericArg<'tcx> {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ match self.unpack() {
+ GenericArgKind::Lifetime(lt) => lt.fmt(f),
+ GenericArgKind::Type(ty) => ty.fmt(f),
+ GenericArgKind::Const(ct) => ct.fmt(f),
+ }
+ }
+}
+impl<'tcx> DebugWithInfcx<TyCtxt<'tcx>> for GenericArg<'tcx> {
+ fn fmt<InfCtx: InferCtxtLike<TyCtxt<'tcx>>>(
+ this: OptWithInfcx<'_, TyCtxt<'tcx>, InfCtx, &Self>,
+ f: &mut core::fmt::Formatter<'_>,
+ ) -> core::fmt::Result {
+ match this.data.unpack() {
+ GenericArgKind::Lifetime(lt) => write!(f, "{:?}", &this.wrap(lt)),
+ GenericArgKind::Const(ct) => write!(f, "{:?}", &this.wrap(ct)),
+ GenericArgKind::Type(ty) => write!(f, "{:?}", &this.wrap(ty)),
+ }
+ }
+}
+
+impl<'tcx> fmt::Debug for Region<'tcx> {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ write!(f, "{:?}", self.kind())
+ }
+}
+impl<'tcx> DebugWithInfcx<TyCtxt<'tcx>> for Region<'tcx> {
+ fn fmt<InfCtx: InferCtxtLike<TyCtxt<'tcx>>>(
+ this: OptWithInfcx<'_, TyCtxt<'tcx>, InfCtx, &Self>,
+ f: &mut core::fmt::Formatter<'_>,
+ ) -> core::fmt::Result {
+ write!(f, "{:?}", &this.map(|data| data.kind()))
+ }
+}
+
+impl<'tcx> DebugWithInfcx<TyCtxt<'tcx>> for ty::RegionVid {
+ fn fmt<InfCtx: InferCtxtLike<TyCtxt<'tcx>>>(
+ this: OptWithInfcx<'_, TyCtxt<'tcx>, InfCtx, &Self>,
+ f: &mut core::fmt::Formatter<'_>,
+ ) -> core::fmt::Result {
+ match this.infcx.and_then(|infcx| infcx.universe_of_lt(*this.data)) {
+ Some(universe) => write!(f, "'?{}_{}", this.data.index(), universe.index()),
+ None => write!(f, "{:?}", this.data),
+ }
+ }
+}
+
+impl<'tcx, T: DebugWithInfcx<TyCtxt<'tcx>>> DebugWithInfcx<TyCtxt<'tcx>> for ty::Binder<'tcx, T> {
+ fn fmt<InfCtx: InferCtxtLike<TyCtxt<'tcx>>>(
+ this: OptWithInfcx<'_, TyCtxt<'tcx>, InfCtx, &Self>,
+ f: &mut core::fmt::Formatter<'_>,
+ ) -> core::fmt::Result {
+ f.debug_tuple("Binder")
+ .field(&this.map(|data| data.as_ref().skip_binder()))
+ .field(&this.data.bound_vars())
+ .finish()
+ }
+}
+
///////////////////////////////////////////////////////////////////////////
// Atomic structs
//
@@ -273,7 +438,6 @@ CloneLiftImpls! {
(),
bool,
usize,
- u8,
u16,
u32,
u64,
@@ -303,10 +467,8 @@ TrivialTypeTraversalAndLiftImpls! {
::rustc_hir::Unsafety,
::rustc_target::asm::InlineAsmRegOrRegClass,
::rustc_target::spec::abi::Abi,
- crate::mir::coverage::ExpressionOperandId,
- crate::mir::coverage::CounterValueReference,
- crate::mir::coverage::InjectedExpressionId,
- crate::mir::coverage::InjectedExpressionIndex,
+ crate::mir::coverage::CounterId,
+ crate::mir::coverage::ExpressionId,
crate::mir::coverage::MappedExpressionIndex,
crate::mir::Local,
crate::mir::Promoted,
@@ -435,7 +597,7 @@ impl<'a, 'tcx> Lift<'tcx> for ty::ParamEnv<'a> {
type Lifted = ty::ParamEnv<'tcx>;
fn lift_to_tcx(self, tcx: TyCtxt<'tcx>) -> Option<Self::Lifted> {
tcx.lift(self.caller_bounds())
- .map(|caller_bounds| ty::ParamEnv::new(caller_bounds, self.reveal(), self.constness()))
+ .map(|caller_bounds| ty::ParamEnv::new(caller_bounds, self.reveal()))
}
}
@@ -528,26 +690,26 @@ impl<'tcx> TypeSuperFoldable<TyCtxt<'tcx>> for Ty<'tcx> {
ty::RawPtr(tm) => ty::RawPtr(tm.try_fold_with(folder)?),
ty::Array(typ, sz) => ty::Array(typ.try_fold_with(folder)?, sz.try_fold_with(folder)?),
ty::Slice(typ) => ty::Slice(typ.try_fold_with(folder)?),
- ty::Adt(tid, substs) => ty::Adt(tid, substs.try_fold_with(folder)?),
+ ty::Adt(tid, args) => ty::Adt(tid, args.try_fold_with(folder)?),
ty::Dynamic(trait_ty, region, representation) => ty::Dynamic(
trait_ty.try_fold_with(folder)?,
region.try_fold_with(folder)?,
representation,
),
ty::Tuple(ts) => ty::Tuple(ts.try_fold_with(folder)?),
- ty::FnDef(def_id, substs) => ty::FnDef(def_id, substs.try_fold_with(folder)?),
+ ty::FnDef(def_id, args) => ty::FnDef(def_id, args.try_fold_with(folder)?),
ty::FnPtr(f) => ty::FnPtr(f.try_fold_with(folder)?),
ty::Ref(r, ty, mutbl) => {
ty::Ref(r.try_fold_with(folder)?, ty.try_fold_with(folder)?, mutbl)
}
- ty::Generator(did, substs, movability) => {
- ty::Generator(did, substs.try_fold_with(folder)?, movability)
+ ty::Generator(did, args, movability) => {
+ ty::Generator(did, args.try_fold_with(folder)?, movability)
}
ty::GeneratorWitness(types) => ty::GeneratorWitness(types.try_fold_with(folder)?),
- ty::GeneratorWitnessMIR(did, substs) => {
- ty::GeneratorWitnessMIR(did, substs.try_fold_with(folder)?)
+ ty::GeneratorWitnessMIR(did, args) => {
+ ty::GeneratorWitnessMIR(did, args.try_fold_with(folder)?)
}
- ty::Closure(did, substs) => ty::Closure(did, substs.try_fold_with(folder)?),
+ ty::Closure(did, args) => ty::Closure(did, args.try_fold_with(folder)?),
ty::Alias(kind, data) => ty::Alias(kind, data.try_fold_with(folder)?),
ty::Bool
@@ -581,22 +743,22 @@ impl<'tcx> TypeSuperVisitable<TyCtxt<'tcx>> for Ty<'tcx> {
sz.visit_with(visitor)
}
ty::Slice(typ) => typ.visit_with(visitor),
- ty::Adt(_, substs) => substs.visit_with(visitor),
+ ty::Adt(_, args) => args.visit_with(visitor),
ty::Dynamic(ref trait_ty, ref reg, _) => {
trait_ty.visit_with(visitor)?;
reg.visit_with(visitor)
}
ty::Tuple(ts) => ts.visit_with(visitor),
- ty::FnDef(_, substs) => substs.visit_with(visitor),
+ ty::FnDef(_, args) => args.visit_with(visitor),
ty::FnPtr(ref f) => f.visit_with(visitor),
ty::Ref(r, ty, _) => {
r.visit_with(visitor)?;
ty.visit_with(visitor)
}
- ty::Generator(_did, ref substs, _) => substs.visit_with(visitor),
+ ty::Generator(_did, ref args, _) => args.visit_with(visitor),
ty::GeneratorWitness(ref types) => types.visit_with(visitor),
- ty::GeneratorWitnessMIR(_did, ref substs) => substs.visit_with(visitor),
- ty::Closure(_did, ref substs) => substs.visit_with(visitor),
+ ty::GeneratorWitnessMIR(_did, ref args) => args.visit_with(visitor),
+ ty::Closure(_did, ref args) => args.visit_with(visitor),
ty::Alias(_, ref data) => data.visit_with(visitor),
ty::Bool
@@ -776,7 +938,7 @@ impl<'tcx> TypeSuperVisitable<TyCtxt<'tcx>> for ty::UnevaluatedConst<'tcx> {
&self,
visitor: &mut V,
) -> ControlFlow<V::BreakTy> {
- self.substs.visit_with(visitor)
+ self.args.visit_with(visitor)
}
}
diff --git a/compiler/rustc_middle/src/ty/sty.rs b/compiler/rustc_middle/src/ty/sty.rs
index 94746fbdc..0291cdd6c 100644
--- a/compiler/rustc_middle/src/ty/sty.rs
+++ b/compiler/rustc_middle/src/ty/sty.rs
@@ -3,19 +3,18 @@
#![allow(rustc::usage_of_ty_tykind)]
use crate::infer::canonical::Canonical;
-use crate::ty::subst::{GenericArg, InternalSubsts, SubstsRef};
use crate::ty::visit::ValidateBoundVars;
use crate::ty::InferTy::*;
use crate::ty::{
self, AdtDef, Discr, Term, Ty, TyCtxt, TypeFlags, TypeSuperVisitable, TypeVisitable,
TypeVisitableExt, TypeVisitor,
};
+use crate::ty::{GenericArg, GenericArgs, GenericArgsRef};
use crate::ty::{List, ParamEnv};
use hir::def::DefKind;
use polonius_engine::Atom;
use rustc_data_structures::captures::Captures;
use rustc_data_structures::intern::Interned;
-use rustc_error_messages::DiagnosticMessage;
use rustc_errors::{DiagnosticArgValue, ErrorGuaranteed, IntoDiagnosticArg, MultiSpan};
use rustc_hir as hir;
use rustc_hir::def_id::DefId;
@@ -35,9 +34,12 @@ use std::ops::{ControlFlow, Deref, Range};
use ty::util::IntTypeExt;
use rustc_type_ir::sty::TyKind::*;
+use rustc_type_ir::CollectAndApply;
+use rustc_type_ir::ConstKind as IrConstKind;
+use rustc_type_ir::DebugWithInfcx;
+use rustc_type_ir::DynKind;
+use rustc_type_ir::RegionKind as IrRegionKind;
use rustc_type_ir::TyKind as IrTyKind;
-use rustc_type_ir::{CollectAndApply, ConstKind as IrConstKind};
-use rustc_type_ir::{DynKind, RegionKind as IrRegionKind};
use super::GenericParamDefKind;
@@ -215,7 +217,7 @@ impl<'tcx> Article for TyKind<'tcx> {
///
/// ## Generators
///
-/// Generators are handled similarly in `GeneratorSubsts`. The set of
+/// Generators are handled similarly in `GeneratorArgs`. The set of
/// type parameters is similar, but `CK` and `CS` are replaced by the
/// following type parameters:
///
@@ -228,33 +230,30 @@ impl<'tcx> Article for TyKind<'tcx> {
/// completion of the generator.
/// * `GW`: The "generator witness".
#[derive(Copy, Clone, PartialEq, Eq, Debug, TypeFoldable, TypeVisitable, Lift)]
-pub struct ClosureSubsts<'tcx> {
+pub struct ClosureArgs<'tcx> {
/// Lifetime and type parameters from the enclosing function,
/// concatenated with a tuple containing the types of the upvars.
///
/// These are separated out because codegen wants to pass them around
/// when monomorphizing.
- pub substs: SubstsRef<'tcx>,
+ pub args: GenericArgsRef<'tcx>,
}
/// Struct returned by `split()`.
-pub struct ClosureSubstsParts<'tcx, T> {
- pub parent_substs: &'tcx [GenericArg<'tcx>],
+pub struct ClosureArgsParts<'tcx, T> {
+ pub parent_args: &'tcx [GenericArg<'tcx>],
pub closure_kind_ty: T,
pub closure_sig_as_fn_ptr_ty: T,
pub tupled_upvars_ty: T,
}
-impl<'tcx> ClosureSubsts<'tcx> {
- /// Construct `ClosureSubsts` from `ClosureSubstsParts`, containing `Substs`
+impl<'tcx> ClosureArgs<'tcx> {
+ /// Construct `ClosureArgs` from `ClosureArgsParts`, containing `Args`
/// for the closure parent, alongside additional closure-specific components.
- pub fn new(
- tcx: TyCtxt<'tcx>,
- parts: ClosureSubstsParts<'tcx, Ty<'tcx>>,
- ) -> ClosureSubsts<'tcx> {
- ClosureSubsts {
- substs: tcx.mk_substs_from_iter(
- parts.parent_substs.iter().copied().chain(
+ pub fn new(tcx: TyCtxt<'tcx>, parts: ClosureArgsParts<'tcx, Ty<'tcx>>) -> ClosureArgs<'tcx> {
+ ClosureArgs {
+ args: tcx.mk_args_from_iter(
+ parts.parent_args.iter().copied().chain(
[parts.closure_kind_ty, parts.closure_sig_as_fn_ptr_ty, parts.tupled_upvars_ty]
.iter()
.map(|&ty| ty.into()),
@@ -263,53 +262,47 @@ impl<'tcx> ClosureSubsts<'tcx> {
}
}
- /// Divides the closure substs into their respective components.
- /// The ordering assumed here must match that used by `ClosureSubsts::new` above.
- fn split(self) -> ClosureSubstsParts<'tcx, GenericArg<'tcx>> {
- match self.substs[..] {
- [
- ref parent_substs @ ..,
- closure_kind_ty,
- closure_sig_as_fn_ptr_ty,
- tupled_upvars_ty,
- ] => ClosureSubstsParts {
- parent_substs,
- closure_kind_ty,
- closure_sig_as_fn_ptr_ty,
- tupled_upvars_ty,
- },
- _ => bug!("closure substs missing synthetics"),
+ /// Divides the closure args into their respective components.
+ /// The ordering assumed here must match that used by `ClosureArgs::new` above.
+ fn split(self) -> ClosureArgsParts<'tcx, GenericArg<'tcx>> {
+ match self.args[..] {
+ [ref parent_args @ .., closure_kind_ty, closure_sig_as_fn_ptr_ty, tupled_upvars_ty] => {
+ ClosureArgsParts {
+ parent_args,
+ closure_kind_ty,
+ closure_sig_as_fn_ptr_ty,
+ tupled_upvars_ty,
+ }
+ }
+ _ => bug!("closure args missing synthetics"),
}
}
/// Returns `true` only if enough of the synthetic types are known to
- /// allow using all of the methods on `ClosureSubsts` without panicking.
+ /// allow using all of the methods on `ClosureArgs` without panicking.
///
/// Used primarily by `ty::print::pretty` to be able to handle closure
/// types that haven't had their synthetic types substituted in.
pub fn is_valid(self) -> bool {
- self.substs.len() >= 3
- && matches!(self.split().tupled_upvars_ty.expect_ty().kind(), Tuple(_))
+ self.args.len() >= 3 && matches!(self.split().tupled_upvars_ty.expect_ty().kind(), Tuple(_))
}
/// Returns the substitutions of the closure's parent.
- pub fn parent_substs(self) -> &'tcx [GenericArg<'tcx>] {
- self.split().parent_substs
+ pub fn parent_args(self) -> &'tcx [GenericArg<'tcx>] {
+ self.split().parent_args
}
/// Returns an iterator over the list of types of captured paths by the closure.
/// In case there was a type error in figuring out the types of the captured path, an
/// empty iterator is returned.
#[inline]
- pub fn upvar_tys(self) -> impl Iterator<Item = Ty<'tcx>> + 'tcx {
+ pub fn upvar_tys(self) -> &'tcx List<Ty<'tcx>> {
match self.tupled_upvars_ty().kind() {
- TyKind::Error(_) => None,
- TyKind::Tuple(..) => Some(self.tupled_upvars_ty().tuple_fields()),
+ TyKind::Error(_) => ty::List::empty(),
+ TyKind::Tuple(..) => self.tupled_upvars_ty().tuple_fields(),
TyKind::Infer(_) => bug!("upvar_tys called before capture types are inferred"),
ty => bug!("Unexpected representation of upvar types tuple {:?}", ty),
}
- .into_iter()
- .flatten()
}
/// Returns the tuple type representing the upvars for this closure.
@@ -320,7 +313,7 @@ impl<'tcx> ClosureSubsts<'tcx> {
/// Returns the closure kind for this closure; may return a type
/// variable during inference. To get the closure kind during
- /// inference, use `infcx.closure_kind(substs)`.
+ /// inference, use `infcx.closure_kind(args)`.
pub fn kind_ty(self) -> Ty<'tcx> {
self.split().closure_kind_ty.expect_ty()
}
@@ -328,7 +321,7 @@ impl<'tcx> ClosureSubsts<'tcx> {
/// Returns the `fn` pointer type representing the closure signature for this
/// closure.
// FIXME(eddyb) this should be unnecessary, as the shallowly resolved
- // type is known at the time of the creation of `ClosureSubsts`,
+ // type is known at the time of the creation of `ClosureArgs`,
// see `rustc_hir_analysis::check::closure`.
pub fn sig_as_fn_ptr_ty(self) -> Ty<'tcx> {
self.split().closure_sig_as_fn_ptr_ty.expect_ty()
@@ -357,14 +350,14 @@ impl<'tcx> ClosureSubsts<'tcx> {
}
}
-/// Similar to `ClosureSubsts`; see the above documentation for more.
+/// Similar to `ClosureArgs`; see the above documentation for more.
#[derive(Copy, Clone, PartialEq, Eq, Debug, TypeFoldable, TypeVisitable, Lift)]
-pub struct GeneratorSubsts<'tcx> {
- pub substs: SubstsRef<'tcx>,
+pub struct GeneratorArgs<'tcx> {
+ pub args: GenericArgsRef<'tcx>,
}
-pub struct GeneratorSubstsParts<'tcx, T> {
- pub parent_substs: &'tcx [GenericArg<'tcx>],
+pub struct GeneratorArgsParts<'tcx, T> {
+ pub parent_args: &'tcx [GenericArg<'tcx>],
pub resume_ty: T,
pub yield_ty: T,
pub return_ty: T,
@@ -372,16 +365,16 @@ pub struct GeneratorSubstsParts<'tcx, T> {
pub tupled_upvars_ty: T,
}
-impl<'tcx> GeneratorSubsts<'tcx> {
- /// Construct `GeneratorSubsts` from `GeneratorSubstsParts`, containing `Substs`
+impl<'tcx> GeneratorArgs<'tcx> {
+ /// Construct `GeneratorArgs` from `GeneratorArgsParts`, containing `Args`
/// for the generator parent, alongside additional generator-specific components.
pub fn new(
tcx: TyCtxt<'tcx>,
- parts: GeneratorSubstsParts<'tcx, Ty<'tcx>>,
- ) -> GeneratorSubsts<'tcx> {
- GeneratorSubsts {
- substs: tcx.mk_substs_from_iter(
- parts.parent_substs.iter().copied().chain(
+ parts: GeneratorArgsParts<'tcx, Ty<'tcx>>,
+ ) -> GeneratorArgs<'tcx> {
+ GeneratorArgs {
+ args: tcx.mk_args_from_iter(
+ parts.parent_args.iter().copied().chain(
[
parts.resume_ty,
parts.yield_ty,
@@ -396,13 +389,13 @@ impl<'tcx> GeneratorSubsts<'tcx> {
}
}
- /// Divides the generator substs into their respective components.
- /// The ordering assumed here must match that used by `GeneratorSubsts::new` above.
- fn split(self) -> GeneratorSubstsParts<'tcx, GenericArg<'tcx>> {
- match self.substs[..] {
- [ref parent_substs @ .., resume_ty, yield_ty, return_ty, witness, tupled_upvars_ty] => {
- GeneratorSubstsParts {
- parent_substs,
+ /// Divides the generator args into their respective components.
+ /// The ordering assumed here must match that used by `GeneratorArgs::new` above.
+ fn split(self) -> GeneratorArgsParts<'tcx, GenericArg<'tcx>> {
+ match self.args[..] {
+ [ref parent_args @ .., resume_ty, yield_ty, return_ty, witness, tupled_upvars_ty] => {
+ GeneratorArgsParts {
+ parent_args,
resume_ty,
yield_ty,
return_ty,
@@ -410,23 +403,22 @@ impl<'tcx> GeneratorSubsts<'tcx> {
tupled_upvars_ty,
}
}
- _ => bug!("generator substs missing synthetics"),
+ _ => bug!("generator args missing synthetics"),
}
}
/// Returns `true` only if enough of the synthetic types are known to
- /// allow using all of the methods on `GeneratorSubsts` without panicking.
+ /// allow using all of the methods on `GeneratorArgs` without panicking.
///
/// Used primarily by `ty::print::pretty` to be able to handle generator
/// types that haven't had their synthetic types substituted in.
pub fn is_valid(self) -> bool {
- self.substs.len() >= 5
- && matches!(self.split().tupled_upvars_ty.expect_ty().kind(), Tuple(_))
+ self.args.len() >= 5 && matches!(self.split().tupled_upvars_ty.expect_ty().kind(), Tuple(_))
}
/// Returns the substitutions of the generator's parent.
- pub fn parent_substs(self) -> &'tcx [GenericArg<'tcx>] {
- self.split().parent_substs
+ pub fn parent_args(self) -> &'tcx [GenericArg<'tcx>] {
+ self.split().parent_args
}
/// This describes the types that can be contained in a generator.
@@ -442,15 +434,13 @@ impl<'tcx> GeneratorSubsts<'tcx> {
/// In case there was a type error in figuring out the types of the captured path, an
/// empty iterator is returned.
#[inline]
- pub fn upvar_tys(self) -> impl Iterator<Item = Ty<'tcx>> + 'tcx {
+ pub fn upvar_tys(self) -> &'tcx List<Ty<'tcx>> {
match self.tupled_upvars_ty().kind() {
- TyKind::Error(_) => None,
- TyKind::Tuple(..) => Some(self.tupled_upvars_ty().tuple_fields()),
+ TyKind::Error(_) => ty::List::empty(),
+ TyKind::Tuple(..) => self.tupled_upvars_ty().tuple_fields(),
TyKind::Infer(_) => bug!("upvar_tys called before capture types are inferred"),
ty => bug!("Unexpected representation of upvar types tuple {:?}", ty),
}
- .into_iter()
- .flatten()
}
/// Returns the tuple type representing the upvars for this generator.
@@ -495,7 +485,7 @@ impl<'tcx> GeneratorSubsts<'tcx> {
}
}
-impl<'tcx> GeneratorSubsts<'tcx> {
+impl<'tcx> GeneratorArgs<'tcx> {
/// Generator has not been resumed yet.
pub const UNRESUMED: usize = 0;
/// Generator has returned or is completed.
@@ -574,7 +564,7 @@ impl<'tcx> GeneratorSubsts<'tcx> {
let layout = tcx.generator_layout(def_id).unwrap();
layout.variant_fields.iter().map(move |variant| {
variant.iter().map(move |field| {
- ty::EarlyBinder::bind(layout.field_tys[*field].ty).subst(tcx, self.substs)
+ ty::EarlyBinder::bind(layout.field_tys[*field].ty).instantiate(tcx, self.args)
})
})
}
@@ -582,43 +572,41 @@ impl<'tcx> GeneratorSubsts<'tcx> {
/// This is the types of the fields of a generator which are not stored in a
/// variant.
#[inline]
- pub fn prefix_tys(self) -> impl Iterator<Item = Ty<'tcx>> {
+ pub fn prefix_tys(self) -> &'tcx List<Ty<'tcx>> {
self.upvar_tys()
}
}
#[derive(Debug, Copy, Clone, HashStable)]
-pub enum UpvarSubsts<'tcx> {
- Closure(SubstsRef<'tcx>),
- Generator(SubstsRef<'tcx>),
+pub enum UpvarArgs<'tcx> {
+ Closure(GenericArgsRef<'tcx>),
+ Generator(GenericArgsRef<'tcx>),
}
-impl<'tcx> UpvarSubsts<'tcx> {
+impl<'tcx> UpvarArgs<'tcx> {
/// Returns an iterator over the list of types of captured paths by the closure/generator.
/// In case there was a type error in figuring out the types of the captured path, an
/// empty iterator is returned.
#[inline]
- pub fn upvar_tys(self) -> impl Iterator<Item = Ty<'tcx>> + 'tcx {
+ pub fn upvar_tys(self) -> &'tcx List<Ty<'tcx>> {
let tupled_tys = match self {
- UpvarSubsts::Closure(substs) => substs.as_closure().tupled_upvars_ty(),
- UpvarSubsts::Generator(substs) => substs.as_generator().tupled_upvars_ty(),
+ UpvarArgs::Closure(args) => args.as_closure().tupled_upvars_ty(),
+ UpvarArgs::Generator(args) => args.as_generator().tupled_upvars_ty(),
};
match tupled_tys.kind() {
- TyKind::Error(_) => None,
- TyKind::Tuple(..) => Some(self.tupled_upvars_ty().tuple_fields()),
+ TyKind::Error(_) => ty::List::empty(),
+ TyKind::Tuple(..) => self.tupled_upvars_ty().tuple_fields(),
TyKind::Infer(_) => bug!("upvar_tys called before capture types are inferred"),
ty => bug!("Unexpected representation of upvar types tuple {:?}", ty),
}
- .into_iter()
- .flatten()
}
#[inline]
pub fn tupled_upvars_ty(self) -> Ty<'tcx> {
match self {
- UpvarSubsts::Closure(substs) => substs.as_closure().tupled_upvars_ty(),
- UpvarSubsts::Generator(substs) => substs.as_generator().tupled_upvars_ty(),
+ UpvarArgs::Closure(args) => args.as_closure().tupled_upvars_ty(),
+ UpvarArgs::Generator(args) => args.as_generator().tupled_upvars_ty(),
}
}
}
@@ -635,46 +623,46 @@ impl<'tcx> UpvarSubsts<'tcx> {
///
/// When the inline const is instantiated, `R` is substituted as the actual inferred
/// type of the constant. The reason that `R` is represented as an extra type parameter
-/// is the same reason that [`ClosureSubsts`] have `CS` and `U` as type parameters:
+/// is the same reason that [`ClosureArgs`] have `CS` and `U` as type parameters:
/// inline const can reference lifetimes that are internal to the creating function.
#[derive(Copy, Clone, Debug)]
-pub struct InlineConstSubsts<'tcx> {
+pub struct InlineConstArgs<'tcx> {
/// Generic parameters from the enclosing item,
/// concatenated with the inferred type of the constant.
- pub substs: SubstsRef<'tcx>,
+ pub args: GenericArgsRef<'tcx>,
}
/// Struct returned by `split()`.
-pub struct InlineConstSubstsParts<'tcx, T> {
- pub parent_substs: &'tcx [GenericArg<'tcx>],
+pub struct InlineConstArgsParts<'tcx, T> {
+ pub parent_args: &'tcx [GenericArg<'tcx>],
pub ty: T,
}
-impl<'tcx> InlineConstSubsts<'tcx> {
- /// Construct `InlineConstSubsts` from `InlineConstSubstsParts`.
+impl<'tcx> InlineConstArgs<'tcx> {
+ /// Construct `InlineConstArgs` from `InlineConstArgsParts`.
pub fn new(
tcx: TyCtxt<'tcx>,
- parts: InlineConstSubstsParts<'tcx, Ty<'tcx>>,
- ) -> InlineConstSubsts<'tcx> {
- InlineConstSubsts {
- substs: tcx.mk_substs_from_iter(
- parts.parent_substs.iter().copied().chain(std::iter::once(parts.ty.into())),
+ parts: InlineConstArgsParts<'tcx, Ty<'tcx>>,
+ ) -> InlineConstArgs<'tcx> {
+ InlineConstArgs {
+ args: tcx.mk_args_from_iter(
+ parts.parent_args.iter().copied().chain(std::iter::once(parts.ty.into())),
),
}
}
- /// Divides the inline const substs into their respective components.
- /// The ordering assumed here must match that used by `InlineConstSubsts::new` above.
- fn split(self) -> InlineConstSubstsParts<'tcx, GenericArg<'tcx>> {
- match self.substs[..] {
- [ref parent_substs @ .., ty] => InlineConstSubstsParts { parent_substs, ty },
- _ => bug!("inline const substs missing synthetics"),
+ /// Divides the inline const args into their respective components.
+ /// The ordering assumed here must match that used by `InlineConstArgs::new` above.
+ fn split(self) -> InlineConstArgsParts<'tcx, GenericArg<'tcx>> {
+ match self.args[..] {
+ [ref parent_args @ .., ty] => InlineConstArgsParts { parent_args, ty },
+ _ => bug!("inline const args missing synthetics"),
}
}
/// Returns the substitutions of the inline const's parent.
- pub fn parent_substs(self) -> &'tcx [GenericArg<'tcx>] {
- self.split().parent_substs
+ pub fn parent_args(self) -> &'tcx [GenericArg<'tcx>] {
+ self.split().parent_args
}
/// Returns the type of this inline const.
@@ -694,6 +682,15 @@ pub enum ExistentialPredicate<'tcx> {
AutoTrait(DefId),
}
+impl<'tcx> DebugWithInfcx<TyCtxt<'tcx>> for ExistentialPredicate<'tcx> {
+ fn fmt<InfCtx: rustc_type_ir::InferCtxtLike<TyCtxt<'tcx>>>(
+ this: rustc_type_ir::OptWithInfcx<'_, TyCtxt<'tcx>, InfCtx, &Self>,
+ f: &mut core::fmt::Formatter<'_>,
+ ) -> core::fmt::Result {
+ fmt::Debug::fmt(&this.data, f)
+ }
+}
+
impl<'tcx> ExistentialPredicate<'tcx> {
/// Compares via an ordering that will not change if modules are reordered or other changes are
/// made to the tree. In particular, this ordering is preserved across incremental compilations.
@@ -725,7 +722,7 @@ impl<'tcx> PolyExistentialPredicate<'tcx> {
use crate::ty::ToPredicate;
match self.skip_binder() {
ExistentialPredicate::Trait(tr) => {
- self.rebind(tr).with_self_ty(tcx, self_ty).without_const().to_predicate(tcx)
+ self.rebind(tr).with_self_ty(tcx, self_ty).to_predicate(tcx)
}
ExistentialPredicate::Projection(p) => {
self.rebind(p.with_self_ty(tcx, self_ty)).to_predicate(tcx)
@@ -736,12 +733,11 @@ impl<'tcx> PolyExistentialPredicate<'tcx> {
ty::TraitRef::new(tcx, did, [self_ty])
} else {
// If this is an ill-formed auto trait, then synthesize
- // new error substs for the missing generics.
- let err_substs =
- ty::InternalSubsts::extend_with_error(tcx, did, &[self_ty.into()]);
- ty::TraitRef::new(tcx, did, err_substs)
+ // new error args for the missing generics.
+ let err_args = ty::GenericArgs::extend_with_error(tcx, did, &[self_ty.into()]);
+ ty::TraitRef::new(tcx, did, err_args)
};
- self.rebind(trait_ref).without_const().to_predicate(tcx)
+ self.rebind(trait_ref).to_predicate(tcx)
}
}
}
@@ -815,7 +811,7 @@ impl<'tcx> List<ty::PolyExistentialPredicate<'tcx>> {
/// T: Foo<U>
/// ```
/// This would be represented by a trait-reference where the `DefId` is the
-/// `DefId` for the trait `Foo` and the substs define `T` as parameter 0,
+/// `DefId` for the trait `Foo` and the args define `T` as parameter 0,
/// and `U` as parameter 1.
///
/// Trait references also appear in object types like `Foo<U>`, but in
@@ -824,7 +820,7 @@ impl<'tcx> List<ty::PolyExistentialPredicate<'tcx>> {
#[derive(HashStable, TypeFoldable, TypeVisitable, Lift)]
pub struct TraitRef<'tcx> {
pub def_id: DefId,
- pub substs: SubstsRef<'tcx>,
+ pub args: GenericArgsRef<'tcx>,
/// This field exists to prevent the creation of `TraitRef` without
/// calling [`TraitRef::new`].
pub(super) _use_trait_ref_new_instead: (),
@@ -834,60 +830,48 @@ impl<'tcx> TraitRef<'tcx> {
pub fn new(
tcx: TyCtxt<'tcx>,
trait_def_id: DefId,
- substs: impl IntoIterator<Item: Into<GenericArg<'tcx>>>,
+ args: impl IntoIterator<Item: Into<GenericArg<'tcx>>>,
) -> Self {
- let substs = tcx.check_and_mk_substs(trait_def_id, substs);
- Self { def_id: trait_def_id, substs, _use_trait_ref_new_instead: () }
+ let args = tcx.check_and_mk_args(trait_def_id, args);
+ Self { def_id: trait_def_id, args, _use_trait_ref_new_instead: () }
}
pub fn from_lang_item(
tcx: TyCtxt<'tcx>,
trait_lang_item: LangItem,
span: Span,
- substs: impl IntoIterator<Item: Into<ty::GenericArg<'tcx>>>,
+ args: impl IntoIterator<Item: Into<ty::GenericArg<'tcx>>>,
) -> Self {
let trait_def_id = tcx.require_lang_item(trait_lang_item, Some(span));
- Self::new(tcx, trait_def_id, substs)
+ Self::new(tcx, trait_def_id, args)
}
pub fn from_method(
tcx: TyCtxt<'tcx>,
trait_id: DefId,
- substs: SubstsRef<'tcx>,
+ args: GenericArgsRef<'tcx>,
) -> ty::TraitRef<'tcx> {
let defs = tcx.generics_of(trait_id);
- ty::TraitRef::new(tcx, trait_id, tcx.mk_substs(&substs[..defs.params.len()]))
+ ty::TraitRef::new(tcx, trait_id, tcx.mk_args(&args[..defs.params.len()]))
}
/// Returns a `TraitRef` of the form `P0: Foo<P1..Pn>` where `Pi`
/// are the parameters defined on trait.
pub fn identity(tcx: TyCtxt<'tcx>, def_id: DefId) -> TraitRef<'tcx> {
- ty::TraitRef::new(tcx, def_id, InternalSubsts::identity_for_item(tcx, def_id))
+ ty::TraitRef::new(tcx, def_id, GenericArgs::identity_for_item(tcx, def_id))
}
pub fn with_self_ty(self, tcx: TyCtxt<'tcx>, self_ty: Ty<'tcx>) -> Self {
ty::TraitRef::new(
tcx,
self.def_id,
- [self_ty.into()].into_iter().chain(self.substs.iter().skip(1)),
+ [self_ty.into()].into_iter().chain(self.args.iter().skip(1)),
)
}
- /// Converts this trait ref to a trait predicate with a given `constness` and a positive polarity.
- #[inline]
- pub fn with_constness(self, constness: ty::BoundConstness) -> ty::TraitPredicate<'tcx> {
- ty::TraitPredicate { trait_ref: self, constness, polarity: ty::ImplPolarity::Positive }
- }
-
- /// Converts this trait ref to a trait predicate without `const` and a positive polarity.
- #[inline]
- pub fn without_const(self) -> ty::TraitPredicate<'tcx> {
- self.with_constness(ty::BoundConstness::NotConst)
- }
-
#[inline]
pub fn self_ty(&self) -> Ty<'tcx> {
- self.substs.type_at(0)
+ self.args.type_at(0)
}
}
@@ -920,7 +904,7 @@ impl<'tcx> IntoDiagnosticArg for TraitRef<'tcx> {
#[derive(HashStable, TypeFoldable, TypeVisitable, Lift)]
pub struct ExistentialTraitRef<'tcx> {
pub def_id: DefId,
- pub substs: SubstsRef<'tcx>,
+ pub args: GenericArgsRef<'tcx>,
}
impl<'tcx> ExistentialTraitRef<'tcx> {
@@ -929,11 +913,11 @@ impl<'tcx> ExistentialTraitRef<'tcx> {
trait_ref: ty::TraitRef<'tcx>,
) -> ty::ExistentialTraitRef<'tcx> {
// Assert there is a Self.
- trait_ref.substs.type_at(0);
+ trait_ref.args.type_at(0);
ty::ExistentialTraitRef {
def_id: trait_ref.def_id,
- substs: tcx.mk_substs(&trait_ref.substs[1..]),
+ args: tcx.mk_args(&trait_ref.args[1..]),
}
}
@@ -945,7 +929,7 @@ impl<'tcx> ExistentialTraitRef<'tcx> {
// otherwise the escaping vars would be captured by the binder
// debug_assert!(!self_ty.has_escaping_bound_vars());
- ty::TraitRef::new(tcx, self.def_id, [self_ty.into()].into_iter().chain(self.substs.iter()))
+ ty::TraitRef::new(tcx, self.def_id, [self_ty.into()].into_iter().chain(self.args.iter()))
}
}
@@ -1214,7 +1198,7 @@ pub struct AliasTy<'tcx> {
///
/// For RPIT the substitutions are for the generics of the function,
/// while for TAIT it is used for the generic parameters of the alias.
- pub substs: SubstsRef<'tcx>,
+ pub args: GenericArgsRef<'tcx>,
/// The `DefId` of the `TraitItem` or `ImplItem` for the associated type `N` depending on whether
/// this is a projection or an inherent projection or the `DefId` of the `OpaqueType` item if
@@ -1237,9 +1221,9 @@ impl<'tcx> AliasTy<'tcx> {
pub fn kind(self, tcx: TyCtxt<'tcx>) -> ty::AliasKind {
match tcx.def_kind(self.def_id) {
DefKind::AssocTy if let DefKind::Impl { of_trait: false } = tcx.def_kind(tcx.parent(self.def_id)) => ty::Inherent,
- DefKind::AssocTy | DefKind::ImplTraitPlaceholder => ty::Projection,
+ DefKind::AssocTy => ty::Projection,
DefKind::OpaqueTy => ty::Opaque,
- DefKind::TyAlias => ty::Weak,
+ DefKind::TyAlias { .. } => ty::Weak,
kind => bug!("unexpected DefKind in AliasTy: {kind:?}"),
}
}
@@ -1252,11 +1236,11 @@ impl<'tcx> AliasTy<'tcx> {
/// The following methods work only with associated type projections.
impl<'tcx> AliasTy<'tcx> {
pub fn self_ty(self) -> Ty<'tcx> {
- self.substs.type_at(0)
+ self.args.type_at(0)
}
pub fn with_self_ty(self, tcx: TyCtxt<'tcx>, self_ty: Ty<'tcx>) -> Self {
- tcx.mk_alias_ty(self.def_id, [self_ty.into()].into_iter().chain(self.substs.iter().skip(1)))
+ tcx.mk_alias_ty(self.def_id, [self_ty.into()].into_iter().chain(self.args.iter().skip(1)))
}
}
@@ -1265,17 +1249,14 @@ impl<'tcx> AliasTy<'tcx> {
pub fn trait_def_id(self, tcx: TyCtxt<'tcx>) -> DefId {
match tcx.def_kind(self.def_id) {
DefKind::AssocTy | DefKind::AssocConst => tcx.parent(self.def_id),
- DefKind::ImplTraitPlaceholder => {
- tcx.parent(tcx.impl_trait_in_trait_parent_fn(self.def_id))
- }
kind => bug!("expected a projection AliasTy; found {kind:?}"),
}
}
- /// Extracts the underlying trait reference and own substs from this projection.
+ /// Extracts the underlying trait reference and own args from this projection.
/// For example, if this is a projection of `<T as StreamingIterator>::Item<'a>`,
- /// then this function would return a `T: StreamingIterator` trait reference and `['a]` as the own substs
- pub fn trait_ref_and_own_substs(
+ /// then this function would return a `T: StreamingIterator` trait reference and `['a]` as the own args
+ pub fn trait_ref_and_own_args(
self,
tcx: TyCtxt<'tcx>,
) -> (ty::TraitRef<'tcx>, &'tcx [ty::GenericArg<'tcx>]) {
@@ -1283,8 +1264,8 @@ impl<'tcx> AliasTy<'tcx> {
let trait_def_id = self.trait_def_id(tcx);
let trait_generics = tcx.generics_of(trait_def_id);
(
- ty::TraitRef::new(tcx, trait_def_id, self.substs.truncate_to(tcx, trait_generics)),
- &self.substs[trait_generics.count()..],
+ ty::TraitRef::new(tcx, trait_def_id, self.args.truncate_to(tcx, trait_generics)),
+ &self.args[trait_generics.count()..],
)
}
@@ -1292,18 +1273,18 @@ impl<'tcx> AliasTy<'tcx> {
/// For example, if this is a projection of `<T as Iterator>::Item`,
/// then this function would return a `T: Iterator` trait reference.
///
- /// WARNING: This will drop the substs for generic associated types
- /// consider calling [Self::trait_ref_and_own_substs] to get those
+ /// WARNING: This will drop the args for generic associated types
+ /// consider calling [Self::trait_ref_and_own_args] to get those
/// as well.
pub fn trait_ref(self, tcx: TyCtxt<'tcx>) -> ty::TraitRef<'tcx> {
let def_id = self.trait_def_id(tcx);
- ty::TraitRef::new(tcx, def_id, self.substs.truncate_to(tcx, tcx.generics_of(def_id)))
+ ty::TraitRef::new(tcx, def_id, self.args.truncate_to(tcx, tcx.generics_of(def_id)))
}
}
/// The following methods work only with inherent associated type projections.
impl<'tcx> AliasTy<'tcx> {
- /// Transform the substitutions to have the given `impl` substs as the base and the GAT substs on top of that.
+ /// Transform the substitutions to have the given `impl` args as the base and the GAT args on top of that.
///
/// Does the following transformation:
///
@@ -1313,14 +1294,14 @@ impl<'tcx> AliasTy<'tcx> {
/// I_i impl subst
/// P_j GAT subst
/// ```
- pub fn rebase_substs_onto_impl(
+ pub fn rebase_inherent_args_onto_impl(
self,
- impl_substs: ty::SubstsRef<'tcx>,
+ impl_args: ty::GenericArgsRef<'tcx>,
tcx: TyCtxt<'tcx>,
- ) -> ty::SubstsRef<'tcx> {
+ ) -> ty::GenericArgsRef<'tcx> {
debug_assert_eq!(self.kind(tcx), ty::Inherent);
- tcx.mk_substs_from_iter(impl_substs.into_iter().chain(self.substs.into_iter().skip(1)))
+ tcx.mk_args_from_iter(impl_args.into_iter().chain(self.args.into_iter().skip(1)))
}
}
@@ -1574,12 +1555,6 @@ impl<'tcx> Deref for Region<'tcx> {
}
}
-impl<'tcx> fmt::Debug for Region<'tcx> {
- fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
- write!(f, "{:?}", self.kind())
- }
-}
-
#[derive(Copy, Clone, PartialEq, Eq, Hash, TyEncodable, TyDecodable, PartialOrd, Ord)]
#[derive(HashStable)]
pub struct EarlyBoundRegion {
@@ -1646,7 +1621,7 @@ impl From<BoundVar> for BoundTy {
#[derive(HashStable, TypeFoldable, TypeVisitable, Lift)]
pub struct ExistentialProjection<'tcx> {
pub def_id: DefId,
- pub substs: SubstsRef<'tcx>,
+ pub args: GenericArgsRef<'tcx>,
pub term: Term<'tcx>,
}
@@ -1660,8 +1635,8 @@ impl<'tcx> ExistentialProjection<'tcx> {
pub fn trait_ref(&self, tcx: TyCtxt<'tcx>) -> ty::ExistentialTraitRef<'tcx> {
let def_id = tcx.parent(self.def_id);
let subst_count = tcx.generics_of(def_id).count() - 1;
- let substs = tcx.mk_substs(&self.substs[..subst_count]);
- ty::ExistentialTraitRef { def_id, substs }
+ let args = tcx.mk_args(&self.args[..subst_count]);
+ ty::ExistentialTraitRef { def_id, args }
}
pub fn with_self_ty(
@@ -1674,7 +1649,7 @@ impl<'tcx> ExistentialProjection<'tcx> {
ty::ProjectionPredicate {
projection_ty: tcx
- .mk_alias_ty(self.def_id, [self_ty.into()].into_iter().chain(self.substs)),
+ .mk_alias_ty(self.def_id, [self_ty.into()].into_iter().chain(self.args)),
term: self.term,
}
}
@@ -1684,11 +1659,11 @@ impl<'tcx> ExistentialProjection<'tcx> {
projection_predicate: ty::ProjectionPredicate<'tcx>,
) -> Self {
// Assert there is a Self.
- projection_predicate.projection_ty.substs.type_at(0);
+ projection_predicate.projection_ty.args.type_at(0);
Self {
def_id: projection_predicate.projection_ty.def_id,
- substs: tcx.mk_substs(&projection_predicate.projection_ty.substs[1..]),
+ args: tcx.mk_args(&projection_predicate.projection_ty.args[1..]),
term: projection_predicate.term,
}
}
@@ -1970,15 +1945,14 @@ impl<'tcx> Ty<'tcx> {
(kind, tcx.def_kind(alias_ty.def_id)),
(ty::Opaque, DefKind::OpaqueTy)
| (ty::Projection | ty::Inherent, DefKind::AssocTy)
- | (ty::Opaque | ty::Projection, DefKind::ImplTraitPlaceholder)
- | (ty::Weak, DefKind::TyAlias)
+ | (ty::Weak, DefKind::TyAlias { .. })
);
Ty::new(tcx, Alias(kind, alias_ty))
}
#[inline]
- pub fn new_opaque(tcx: TyCtxt<'tcx>, def_id: DefId, substs: SubstsRef<'tcx>) -> Ty<'tcx> {
- Ty::new_alias(tcx, ty::Opaque, tcx.mk_alias_ty(def_id, substs))
+ pub fn new_opaque(tcx: TyCtxt<'tcx>, def_id: DefId, args: GenericArgsRef<'tcx>) -> Ty<'tcx> {
+ Ty::new_alias(tcx, ty::Opaque, tcx.mk_alias_ty(def_id, args))
}
/// Constructs a `TyKind::Error` type with current `ErrorGuaranteed`
@@ -1998,7 +1972,7 @@ impl<'tcx> Ty<'tcx> {
pub fn new_error_with_message<S: Into<MultiSpan>>(
tcx: TyCtxt<'tcx>,
span: S,
- msg: impl Into<DiagnosticMessage>,
+ msg: impl Into<String>,
) -> Ty<'tcx> {
let reported = tcx.sess.delay_span_bug(span, msg);
Ty::new(tcx, Error(reported))
@@ -2070,8 +2044,8 @@ impl<'tcx> Ty<'tcx> {
}
#[inline]
- pub fn new_adt(tcx: TyCtxt<'tcx>, def: AdtDef<'tcx>, substs: SubstsRef<'tcx>) -> Ty<'tcx> {
- Ty::new(tcx, Adt(def, substs))
+ pub fn new_adt(tcx: TyCtxt<'tcx>, def: AdtDef<'tcx>, args: GenericArgsRef<'tcx>) -> Ty<'tcx> {
+ Ty::new(tcx, Adt(def, args))
}
#[inline]
@@ -2115,10 +2089,10 @@ impl<'tcx> Ty<'tcx> {
pub fn new_fn_def(
tcx: TyCtxt<'tcx>,
def_id: DefId,
- substs: impl IntoIterator<Item: Into<GenericArg<'tcx>>>,
+ args: impl IntoIterator<Item: Into<GenericArg<'tcx>>>,
) -> Ty<'tcx> {
- let substs = tcx.check_and_mk_substs(def_id, substs);
- Ty::new(tcx, FnDef(def_id, substs))
+ let args = tcx.check_and_mk_args(def_id, args);
+ Ty::new(tcx, FnDef(def_id, args))
}
#[inline]
@@ -2140,38 +2114,38 @@ impl<'tcx> Ty<'tcx> {
pub fn new_projection(
tcx: TyCtxt<'tcx>,
item_def_id: DefId,
- substs: impl IntoIterator<Item: Into<GenericArg<'tcx>>>,
+ args: impl IntoIterator<Item: Into<GenericArg<'tcx>>>,
) -> Ty<'tcx> {
- Ty::new_alias(tcx, ty::Projection, tcx.mk_alias_ty(item_def_id, substs))
+ Ty::new_alias(tcx, ty::Projection, tcx.mk_alias_ty(item_def_id, args))
}
#[inline]
pub fn new_closure(
tcx: TyCtxt<'tcx>,
def_id: DefId,
- closure_substs: SubstsRef<'tcx>,
+ closure_args: GenericArgsRef<'tcx>,
) -> Ty<'tcx> {
debug_assert_eq!(
- closure_substs.len(),
+ closure_args.len(),
tcx.generics_of(tcx.typeck_root_def_id(def_id)).count() + 3,
"closure constructed with incorrect substitutions"
);
- Ty::new(tcx, Closure(def_id, closure_substs))
+ Ty::new(tcx, Closure(def_id, closure_args))
}
#[inline]
pub fn new_generator(
tcx: TyCtxt<'tcx>,
def_id: DefId,
- generator_substs: SubstsRef<'tcx>,
+ generator_args: GenericArgsRef<'tcx>,
movability: hir::Movability,
) -> Ty<'tcx> {
debug_assert_eq!(
- generator_substs.len(),
+ generator_args.len(),
tcx.generics_of(tcx.typeck_root_def_id(def_id)).count() + 5,
"generator constructed with incorrect number of substitutions"
);
- Ty::new(tcx, Generator(def_id, generator_substs, movability))
+ Ty::new(tcx, Generator(def_id, generator_args, movability))
}
#[inline]
@@ -2186,9 +2160,9 @@ impl<'tcx> Ty<'tcx> {
pub fn new_generator_witness_mir(
tcx: TyCtxt<'tcx>,
id: DefId,
- substs: SubstsRef<'tcx>,
+ args: GenericArgsRef<'tcx>,
) -> Ty<'tcx> {
- Ty::new(tcx, GeneratorWitnessMIR(id, substs))
+ Ty::new(tcx, GeneratorWitnessMIR(id, args))
}
// misc
@@ -2212,19 +2186,18 @@ impl<'tcx> Ty<'tcx> {
fn new_generic_adt(tcx: TyCtxt<'tcx>, wrapper_def_id: DefId, ty_param: Ty<'tcx>) -> Ty<'tcx> {
let adt_def = tcx.adt_def(wrapper_def_id);
- let substs =
- InternalSubsts::for_item(tcx, wrapper_def_id, |param, substs| match param.kind {
- GenericParamDefKind::Lifetime | GenericParamDefKind::Const { .. } => bug!(),
- GenericParamDefKind::Type { has_default, .. } => {
- if param.index == 0 {
- ty_param.into()
- } else {
- assert!(has_default);
- tcx.type_of(param.def_id).subst(tcx, substs).into()
- }
+ let args = GenericArgs::for_item(tcx, wrapper_def_id, |param, args| match param.kind {
+ GenericParamDefKind::Lifetime | GenericParamDefKind::Const { .. } => bug!(),
+ GenericParamDefKind::Type { has_default, .. } => {
+ if param.index == 0 {
+ ty_param.into()
+ } else {
+ assert!(has_default);
+ tcx.type_of(param.def_id).instantiate(tcx, args).into()
}
- });
- Ty::new(tcx, Adt(adt_def, substs))
+ }
+ });
+ Ty::new(tcx, Adt(adt_def, args))
}
#[inline]
@@ -2255,8 +2228,8 @@ impl<'tcx> Ty<'tcx> {
pub fn new_task_context(tcx: TyCtxt<'tcx>) -> Ty<'tcx> {
let context_did = tcx.require_lang_item(LangItem::Context, None);
let context_adt_ref = tcx.adt_def(context_did);
- let context_substs = tcx.mk_substs(&[tcx.lifetimes.re_erased.into()]);
- let context_ty = Ty::new_adt(tcx, context_adt_ref, context_substs);
+ let context_args = tcx.mk_args(&[tcx.lifetimes.re_erased.into()]);
+ let context_ty = Ty::new_adt(tcx, context_adt_ref, context_args);
Ty::new_mut_ref(tcx, tcx.lifetimes.re_erased, context_ty)
}
}
@@ -2380,10 +2353,10 @@ impl<'tcx> Ty<'tcx> {
pub fn simd_size_and_type(self, tcx: TyCtxt<'tcx>) -> (u64, Ty<'tcx>) {
match self.kind() {
- Adt(def, substs) => {
+ Adt(def, args) => {
assert!(def.repr().simd(), "`simd_size_and_type` called on non-SIMD type");
let variant = def.non_enum_variant();
- let f0_ty = variant.fields[FieldIdx::from_u32(0)].ty(tcx, substs);
+ let f0_ty = variant.fields[FieldIdx::from_u32(0)].ty(tcx, args);
match f0_ty.kind() {
// If the first field is an array, we assume it is the only field and its
@@ -2444,7 +2417,7 @@ impl<'tcx> Ty<'tcx> {
/// Panics if called on any type other than `Box<T>`.
pub fn boxed_ty(self) -> Ty<'tcx> {
match self.kind() {
- Adt(def, substs) if def.is_box() => substs.type_at(0),
+ Adt(def, args) if def.is_box() => args.type_at(0),
_ => bug!("`boxed_ty` is called on non-box type {:?}", self),
}
}
@@ -2608,14 +2581,14 @@ impl<'tcx> Ty<'tcx> {
pub fn fn_sig(self, tcx: TyCtxt<'tcx>) -> PolyFnSig<'tcx> {
match self.kind() {
- FnDef(def_id, substs) => tcx.fn_sig(*def_id).subst(tcx, substs),
+ FnDef(def_id, args) => tcx.fn_sig(*def_id).instantiate(tcx, args),
FnPtr(f) => *f,
Error(_) => {
// ignore errors (#54954)
ty::Binder::dummy(FnSig::fake())
}
Closure(..) => bug!(
- "to get the signature of a closure, use `substs.as_closure().sig()` not `fn_sig()`",
+ "to get the signature of a closure, use `args.as_closure().sig()` not `fn_sig()`",
),
_ => bug!("Ty::fn_sig() called on non-fn type: {:?}", self),
}
@@ -2649,7 +2622,7 @@ impl<'tcx> Ty<'tcx> {
#[inline]
pub fn tuple_fields(self) -> &'tcx List<Ty<'tcx>> {
match self.kind() {
- Tuple(substs) => substs,
+ Tuple(args) => args,
_ => bug!("tuple_fields called on non-tuple"),
}
}
@@ -2661,8 +2634,8 @@ impl<'tcx> Ty<'tcx> {
pub fn variant_range(self, tcx: TyCtxt<'tcx>) -> Option<Range<VariantIdx>> {
match self.kind() {
TyKind::Adt(adt, _) => Some(adt.variant_range()),
- TyKind::Generator(def_id, substs, _) => {
- Some(substs.as_generator().variant_range(*def_id, tcx))
+ TyKind::Generator(def_id, args, _) => {
+ Some(args.as_generator().variant_range(*def_id, tcx))
}
_ => None,
}
@@ -2679,16 +2652,11 @@ impl<'tcx> Ty<'tcx> {
variant_index: VariantIdx,
) -> Option<Discr<'tcx>> {
match self.kind() {
- TyKind::Adt(adt, _) if adt.variants().is_empty() => {
- // This can actually happen during CTFE, see
- // https://github.com/rust-lang/rust/issues/89765.
- None
- }
TyKind::Adt(adt, _) if adt.is_enum() => {
Some(adt.discriminant_for_variant(tcx, variant_index))
}
- TyKind::Generator(def_id, substs, _) => {
- Some(substs.as_generator().discriminant_for_variant(*def_id, tcx, variant_index))
+ TyKind::Generator(def_id, args, _) => {
+ Some(args.as_generator().discriminant_for_variant(*def_id, tcx, variant_index))
}
_ => None,
}
@@ -2698,13 +2666,13 @@ impl<'tcx> Ty<'tcx> {
pub fn discriminant_ty(self, tcx: TyCtxt<'tcx>) -> Ty<'tcx> {
match self.kind() {
ty::Adt(adt, _) if adt.is_enum() => adt.repr().discr_type().to_ty(tcx),
- ty::Generator(_, substs, _) => substs.as_generator().discr_ty(tcx),
+ ty::Generator(_, args, _) => args.as_generator().discr_ty(tcx),
ty::Param(_) | ty::Alias(..) | ty::Infer(ty::TyVar(_)) => {
let assoc_items = tcx.associated_item_def_ids(
tcx.require_lang_item(hir::LangItem::DiscriminantKind, None),
);
- Ty::new_projection(tcx, assoc_items[0], tcx.mk_substs(&[self.into()]))
+ Ty::new_projection(tcx, assoc_items[0], tcx.mk_args(&[self.into()]))
}
ty::Bool
@@ -2777,7 +2745,7 @@ impl<'tcx> Ty<'tcx> {
ty::Str | ty::Slice(_) => (tcx.types.usize, false),
ty::Dynamic(..) => {
let dyn_metadata = tcx.require_lang_item(LangItem::DynMetadata, None);
- (tcx.type_of(dyn_metadata).subst(tcx, &[tail.into()]), false)
+ (tcx.type_of(dyn_metadata).instantiate(tcx, &[tail.into()]), false)
},
// type parameters only have unit metadata if they're sized, so return true
@@ -2794,7 +2762,7 @@ impl<'tcx> Ty<'tcx> {
}
/// When we create a closure, we record its kind (i.e., what trait
- /// it implements) into its `ClosureSubsts` using a type
+ /// it implements) into its `ClosureArgs` using a type
/// parameter. This is kind of a phantom type, except that the
/// most convenient thing for us to are the integral types. This
/// function converts such a special type into the closure
@@ -2857,13 +2825,13 @@ impl<'tcx> Ty<'tcx> {
ty::Tuple(tys) => tys.iter().all(|ty| ty.is_trivially_sized(tcx)),
- ty::Adt(def, _substs) => def.sized_constraint(tcx).skip_binder().is_empty(),
+ ty::Adt(def, _args) => def.sized_constraint(tcx).skip_binder().is_empty(),
- ty::Alias(..) | ty::Param(_) | ty::Placeholder(..) => false,
+ ty::Alias(..) | ty::Param(_) | ty::Placeholder(..) | ty::Bound(..) => false,
ty::Infer(ty::TyVar(_)) => false,
- ty::Bound(..) | ty::Infer(ty::FreshTy(_) | ty::FreshIntTy(_) | ty::FreshFloatTy(_)) => {
+ ty::Infer(ty::FreshTy(_) | ty::FreshIntTy(_) | ty::FreshFloatTy(_)) => {
bug!("`is_trivially_sized` applied to unexpected type: {:?}", self)
}
}
diff --git a/compiler/rustc_middle/src/ty/trait_def.rs b/compiler/rustc_middle/src/ty/trait_def.rs
index 98c70e330..6e55e7915 100644
--- a/compiler/rustc_middle/src/ty/trait_def.rs
+++ b/compiler/rustc_middle/src/ty/trait_def.rs
@@ -236,7 +236,7 @@ pub(super) fn trait_impls_of_provider(tcx: TyCtxt<'_>, trait_id: DefId) -> Trait
for &impl_def_id in tcx.hir().trait_impls(trait_id) {
let impl_def_id = impl_def_id.to_def_id();
- let impl_self_ty = tcx.type_of(impl_def_id).subst_identity();
+ let impl_self_ty = tcx.type_of(impl_def_id).instantiate_identity();
if impl_self_ty.references_error() {
continue;
}
diff --git a/compiler/rustc_middle/src/ty/typeck_results.rs b/compiler/rustc_middle/src/ty/typeck_results.rs
index 8cbffa148..327cd0a5d 100644
--- a/compiler/rustc_middle/src/ty/typeck_results.rs
+++ b/compiler/rustc_middle/src/ty/typeck_results.rs
@@ -4,13 +4,12 @@ use crate::{
traits::ObligationCause,
ty::{
self, tls, BindingMode, BoundVar, CanonicalPolyFnSig, ClosureSizeProfileData,
- GenericArgKind, InternalSubsts, SubstsRef, Ty, UserSubsts,
+ GenericArgKind, GenericArgs, GenericArgsRef, Ty, UserArgs,
},
};
use rustc_data_structures::{
- fx::{FxHashMap, FxIndexMap},
- sync::Lrc,
- unord::{UnordItems, UnordSet},
+ fx::FxIndexMap,
+ unord::{ExtendUnord, UnordItems, UnordSet},
};
use rustc_errors::ErrorGuaranteed;
use rustc_hir as hir;
@@ -54,7 +53,7 @@ pub struct TypeckResults<'tcx> {
/// of this node. This only applies to nodes that refer to entities
/// parameterized by type parameters, such as generic fns, types, or
/// other items.
- node_substs: ItemLocalMap<SubstsRef<'tcx>>,
+ node_args: ItemLocalMap<GenericArgsRef<'tcx>>,
/// This will either store the canonicalized types provided by the user
/// or the substitutions that the user explicitly gave (if any) attached
@@ -145,7 +144,7 @@ pub struct TypeckResults<'tcx> {
/// This is used for warning unused imports. During type
/// checking, this `Lrc` should not be cloned: it must have a ref-count
/// of 1 so that we can insert things into the set mutably.
- pub used_trait_imports: Lrc<UnordSet<LocalDefId>>,
+ pub used_trait_imports: UnordSet<LocalDefId>,
/// If any errors occurred while type-checking this body,
/// this field will be set to `Some(ErrorGuaranteed)`.
@@ -183,7 +182,7 @@ pub struct TypeckResults<'tcx> {
/// we never capture `t`. This becomes an issue when we build MIR as we require
/// information on `t` in order to create place `t.0` and `t.1`. We can solve this
/// issue by fake reading `t`.
- pub closure_fake_reads: FxHashMap<LocalDefId, Vec<(HirPlace<'tcx>, FakeReadCause, hir::HirId)>>,
+ pub closure_fake_reads: LocalDefIdMap<Vec<(HirPlace<'tcx>, FakeReadCause, hir::HirId)>>,
/// Tracks the rvalue scoping rules which defines finer scoping for rvalue expressions
/// by applying extended parameter rules.
@@ -197,7 +196,7 @@ pub struct TypeckResults<'tcx> {
/// Stores the predicates that apply on generator witness types.
/// formatting modified file tests/ui/generator/retain-resume-ref.rs
pub generator_interior_predicates:
- FxHashMap<LocalDefId, Vec<(ty::Predicate<'tcx>, ObligationCause<'tcx>)>>,
+ LocalDefIdMap<Vec<(ty::Predicate<'tcx>, ObligationCause<'tcx>)>>,
/// We sometimes treat byte string literals (which are of type `&[u8; N]`)
/// as `&[u8]`, depending on the pattern in which they are used.
@@ -207,7 +206,7 @@ pub struct TypeckResults<'tcx> {
/// Contains the data for evaluating the effect of feature `capture_disjoint_fields`
/// on closure size.
- pub closure_size_eval: FxHashMap<LocalDefId, ClosureSizeProfileData<'tcx>>,
+ pub closure_size_eval: LocalDefIdMap<ClosureSizeProfileData<'tcx>>,
/// Container types and field indices of `offset_of!` expressions
offset_of_data: ItemLocalMap<(Ty<'tcx>, Vec<FieldIdx>)>,
@@ -265,7 +264,7 @@ impl<'tcx> TypeckResults<'tcx> {
user_provided_types: Default::default(),
user_provided_sigs: Default::default(),
node_types: Default::default(),
- node_substs: Default::default(),
+ node_args: Default::default(),
adjustments: Default::default(),
pat_binding_modes: Default::default(),
pat_adjustments: Default::default(),
@@ -273,7 +272,7 @@ impl<'tcx> TypeckResults<'tcx> {
liberated_fn_sigs: Default::default(),
fru_field_types: Default::default(),
coercion_casts: Default::default(),
- used_trait_imports: Lrc::new(Default::default()),
+ used_trait_imports: Default::default(),
tainted_by_errors: None,
concrete_opaque_types: Default::default(),
closure_min_captures: Default::default(),
@@ -385,18 +384,18 @@ impl<'tcx> TypeckResults<'tcx> {
self.node_types.get(&id.local_id).cloned()
}
- pub fn node_substs_mut(&mut self) -> LocalTableInContextMut<'_, SubstsRef<'tcx>> {
- LocalTableInContextMut { hir_owner: self.hir_owner, data: &mut self.node_substs }
+ pub fn node_args_mut(&mut self) -> LocalTableInContextMut<'_, GenericArgsRef<'tcx>> {
+ LocalTableInContextMut { hir_owner: self.hir_owner, data: &mut self.node_args }
}
- pub fn node_substs(&self, id: hir::HirId) -> SubstsRef<'tcx> {
+ pub fn node_args(&self, id: hir::HirId) -> GenericArgsRef<'tcx> {
validate_hir_id_for_typeck_results(self.hir_owner, id);
- self.node_substs.get(&id.local_id).cloned().unwrap_or_else(|| InternalSubsts::empty())
+ self.node_args.get(&id.local_id).cloned().unwrap_or_else(|| GenericArgs::empty())
}
- pub fn node_substs_opt(&self, id: hir::HirId) -> Option<SubstsRef<'tcx>> {
+ pub fn node_args_opt(&self, id: hir::HirId) -> Option<GenericArgsRef<'tcx>> {
validate_hir_id_for_typeck_results(self.hir_owner, id);
- self.node_substs.get(&id.local_id).cloned()
+ self.node_args.get(&id.local_id).cloned()
}
/// Returns the type of a pattern as a monotype. Like [`expr_ty`], this function
@@ -636,7 +635,7 @@ impl<'a, V> LocalTableInContextMut<'a, V> {
&mut self,
items: UnordItems<(hir::HirId, V), impl Iterator<Item = (hir::HirId, V)>>,
) {
- self.data.extend(items.map(|(id, value)| {
+ self.data.extend_unord(items.map(|(id, value)| {
validate_hir_id_for_typeck_results(self.hir_owner, id);
(id.local_id, value)
}))
@@ -671,12 +670,12 @@ impl<'tcx> CanonicalUserType<'tcx> {
pub fn is_identity(&self) -> bool {
match self.value {
UserType::Ty(_) => false,
- UserType::TypeOf(_, user_substs) => {
- if user_substs.user_self_ty.is_some() {
+ UserType::TypeOf(_, user_args) => {
+ if user_args.user_self_ty.is_some() {
return false;
}
- iter::zip(user_substs.substs, BoundVar::new(0)..).all(|(kind, cvar)| {
+ iter::zip(user_args.args, BoundVar::new(0)..).all(|(kind, cvar)| {
match kind.unpack() {
GenericArgKind::Type(ty) => match ty.kind() {
ty::Bound(debruijn, b) => {
@@ -721,5 +720,5 @@ pub enum UserType<'tcx> {
/// The canonical type is the result of `type_of(def_id)` with the
/// given substitutions applied.
- TypeOf(DefId, UserSubsts<'tcx>),
+ TypeOf(DefId, UserArgs<'tcx>),
}
diff --git a/compiler/rustc_middle/src/ty/util.rs b/compiler/rustc_middle/src/ty/util.rs
index e2e4a2dbd..564f982f8 100644
--- a/compiler/rustc_middle/src/ty/util.rs
+++ b/compiler/rustc_middle/src/ty/util.rs
@@ -7,7 +7,7 @@ use crate::ty::{
self, FallibleTypeFolder, ToPredicate, Ty, TyCtxt, TypeFoldable, TypeFolder, TypeSuperFoldable,
TypeVisitableExt,
};
-use crate::ty::{GenericArgKind, SubstsRef};
+use crate::ty::{GenericArgKind, GenericArgsRef};
use rustc_apfloat::Float as _;
use rustc_data_structures::fx::{FxHashMap, FxHashSet};
use rustc_data_structures::stable_hasher::{Hash128, HashStable, StableHasher};
@@ -19,7 +19,7 @@ use rustc_index::bit_set::GrowableBitSet;
use rustc_macros::HashStable;
use rustc_session::Limit;
use rustc_span::sym;
-use rustc_target::abi::{Integer, IntegerType, Size, TargetDataLayout};
+use rustc_target::abi::{Integer, IntegerType, Size};
use rustc_target::spec::abi::Abi;
use smallvec::SmallVec;
use std::{fmt, iter};
@@ -57,7 +57,7 @@ impl<'tcx> fmt::Display for Discr<'tcx> {
let x = self.val;
// sign extend the raw representation to be an i128
let x = size.sign_extend(x) as i128;
- write!(fmt, "{}", x)
+ write!(fmt, "{x}")
}
_ => write!(fmt, "{}", self.val),
}
@@ -156,7 +156,7 @@ impl<'tcx> TyCtxt<'tcx> {
| DefKind::Enum
| DefKind::Trait
| DefKind::OpaqueTy
- | DefKind::TyAlias
+ | DefKind::TyAlias { .. }
| DefKind::ForeignTy
| DefKind::TraitAlias
| DefKind::AssocTy
@@ -226,14 +226,14 @@ impl<'tcx> TyCtxt<'tcx> {
return Ty::new_error(self, reported);
}
match *ty.kind() {
- ty::Adt(def, substs) => {
+ ty::Adt(def, args) => {
if !def.is_struct() {
break;
}
match def.non_enum_variant().tail_opt() {
Some(field) => {
f();
- ty = field.ty(self, substs);
+ ty = field.ty(self, args);
}
None => break,
}
@@ -301,12 +301,12 @@ impl<'tcx> TyCtxt<'tcx> {
let (mut a, mut b) = (source, target);
loop {
match (&a.kind(), &b.kind()) {
- (&ty::Adt(a_def, a_substs), &ty::Adt(b_def, b_substs))
+ (&ty::Adt(a_def, a_args), &ty::Adt(b_def, b_args))
if a_def == b_def && a_def.is_struct() =>
{
if let Some(f) = a_def.non_enum_variant().tail_opt() {
- a = f.ty(self, a_substs);
- b = f.ty(self, b_substs);
+ a = f.ty(self, a_args);
+ b = f.ty(self, b_args);
} else {
break;
}
@@ -349,7 +349,7 @@ impl<'tcx> TyCtxt<'tcx> {
let drop_trait = self.lang_items().drop_trait()?;
self.ensure().coherent_trait(drop_trait);
- let ty = self.type_of(adt_did).subst_identity();
+ let ty = self.type_of(adt_did).instantiate_identity();
let mut dtor_candidate = None;
self.for_each_relevant_impl(drop_trait, ty, |impl_did| {
if validate(self, impl_did).is_err() {
@@ -358,7 +358,8 @@ impl<'tcx> TyCtxt<'tcx> {
}
let Some(item_id) = self.associated_item_def_ids(impl_did).first() else {
- self.sess.delay_span_bug(self.def_span(impl_did), "Drop impl without drop function");
+ self.sess
+ .delay_span_bug(self.def_span(impl_did), "Drop impl without drop function");
return;
};
@@ -383,7 +384,7 @@ impl<'tcx> TyCtxt<'tcx> {
/// Note that this returns only the constraints for the
/// destructor of `def` itself. For the destructors of the
/// contents, you need `adt_dtorck_constraint`.
- pub fn destructor_constraints(self, def: ty::AdtDef<'tcx>) -> Vec<ty::subst::GenericArg<'tcx>> {
+ pub fn destructor_constraints(self, def: ty::AdtDef<'tcx>) -> Vec<ty::GenericArg<'tcx>> {
let dtor = match def.destructor(self) {
None => {
debug!("destructor_constraints({:?}) - no dtor", def.did());
@@ -400,7 +401,7 @@ impl<'tcx> TyCtxt<'tcx> {
// must be live.
// We need to return the list of parameters from the ADTs
- // generics/substs that correspond to impure parameters on the
+ // generics/args that correspond to impure parameters on the
// impl's generics. This is a bit ugly, but conceptually simple:
//
// Suppose our ADT looks like the following
@@ -412,21 +413,21 @@ impl<'tcx> TyCtxt<'tcx> {
// impl<#[may_dangle] P0, P1, P2> Drop for S<P1, P2, P0>
//
// We want to return the parameters (X, Y). For that, we match
- // up the item-substs <X, Y, Z> with the substs on the impl ADT,
- // <P1, P2, P0>, and then look up which of the impl substs refer to
+ // up the item-args <X, Y, Z> with the args on the impl ADT,
+ // <P1, P2, P0>, and then look up which of the impl args refer to
// parameters marked as pure.
- let impl_substs = match *self.type_of(impl_def_id).subst_identity().kind() {
- ty::Adt(def_, substs) if def_ == def => substs,
+ let impl_args = match *self.type_of(impl_def_id).instantiate_identity().kind() {
+ ty::Adt(def_, args) if def_ == def => args,
_ => bug!(),
};
- let item_substs = match *self.type_of(def.did()).subst_identity().kind() {
- ty::Adt(def_, substs) if def_ == def => substs,
+ let item_args = match *self.type_of(def.did()).instantiate_identity().kind() {
+ ty::Adt(def_, args) if def_ == def => args,
_ => bug!(),
};
- let result = iter::zip(item_substs, impl_substs)
+ let result = iter::zip(item_args, impl_args)
.filter(|&(_, k)| {
match k.unpack() {
GenericArgKind::Lifetime(region) => match region.kind() {
@@ -459,12 +460,12 @@ impl<'tcx> TyCtxt<'tcx> {
/// Checks whether each generic argument is simply a unique generic parameter.
pub fn uses_unique_generic_params(
self,
- substs: SubstsRef<'tcx>,
+ args: GenericArgsRef<'tcx>,
ignore_regions: CheckRegions,
) -> Result<(), NotUniqueParam<'tcx>> {
let mut seen = GrowableBitSet::default();
let mut seen_late = FxHashSet::default();
- for arg in substs {
+ for arg in args {
match arg.unpack() {
GenericArgKind::Lifetime(lt) => match (ignore_regions, lt.kind()) {
(CheckRegions::Bound, ty::ReLateBound(di, reg)) => {
@@ -510,10 +511,10 @@ impl<'tcx> TyCtxt<'tcx> {
/// for better caching.
pub fn uses_unique_placeholders_ignoring_regions(
self,
- substs: SubstsRef<'tcx>,
+ args: GenericArgsRef<'tcx>,
) -> Result<(), NotUniqueParam<'tcx>> {
let mut seen = GrowableBitSet::default();
- for arg in substs {
+ for arg in args {
match arg.unpack() {
// Ignore regions, since we can't resolve those in a canonicalized
// query in the trait solver.
@@ -594,7 +595,7 @@ impl<'tcx> TyCtxt<'tcx> {
def_id
}
- /// Given the `DefId` and substs a closure, creates the type of
+ /// Given the `DefId` and args a closure, creates the type of
/// `self` argument that the closure expects. For example, for a
/// `Fn` closure, this would return a reference type `&T` where
/// `T = closure_ty`.
@@ -607,11 +608,11 @@ impl<'tcx> TyCtxt<'tcx> {
pub fn closure_env_ty(
self,
closure_def_id: DefId,
- closure_substs: SubstsRef<'tcx>,
+ closure_args: GenericArgsRef<'tcx>,
env_region: ty::Region<'tcx>,
) -> Option<Ty<'tcx>> {
- let closure_ty = Ty::new_closure(self, closure_def_id, closure_substs);
- let closure_kind_ty = closure_substs.as_closure().kind_ty();
+ let closure_ty = Ty::new_closure(self, closure_def_id, closure_args);
+ let closure_kind_ty = closure_args.as_closure().kind_ty();
let closure_kind = closure_kind_ty.to_opt_closure_kind()?;
let env_ty = match closure_kind {
ty::ClosureKind::Fn => Ty::new_imm_ref(self, env_region, closure_ty),
@@ -654,7 +655,7 @@ impl<'tcx> TyCtxt<'tcx> {
/// Returns the type a reference to the thread local takes in MIR.
pub fn thread_local_ptr_ty(self, def_id: DefId) -> Ty<'tcx> {
- let static_ty = self.type_of(def_id).subst_identity();
+ let static_ty = self.type_of(def_id).instantiate_identity();
if self.is_mutable_static(def_id) {
Ty::new_mut_ptr(self, static_ty)
} else if self.is_foreign_item(def_id) {
@@ -670,7 +671,7 @@ impl<'tcx> TyCtxt<'tcx> {
// Make sure that any constants in the static's type are evaluated.
let static_ty = self.normalize_erasing_regions(
ty::ParamEnv::empty(),
- self.type_of(def_id).subst_identity(),
+ self.type_of(def_id).instantiate_identity(),
);
// Make sure that accesses to unsafe statics end up using raw pointers.
@@ -719,7 +720,7 @@ impl<'tcx> TyCtxt<'tcx> {
pub fn try_expand_impl_trait_type(
self,
def_id: DefId,
- substs: SubstsRef<'tcx>,
+ args: GenericArgsRef<'tcx>,
) -> Result<Ty<'tcx>, Ty<'tcx>> {
let mut visitor = OpaqueTypeExpander {
seen_opaque_tys: FxHashSet::default(),
@@ -732,7 +733,7 @@ impl<'tcx> TyCtxt<'tcx> {
tcx: self,
};
- let expanded_type = visitor.expand_opaque_ty(def_id, substs).unwrap();
+ let expanded_type = visitor.expand_opaque_ty(def_id, args).unwrap();
if visitor.found_recursion { Err(expanded_type) } else { Ok(expanded_type) }
}
@@ -799,7 +800,7 @@ struct OpaqueTypeExpander<'tcx> {
seen_opaque_tys: FxHashSet<DefId>,
// Cache of all expansions we've seen so far. This is a critical
// optimization for some large types produced by async fn trees.
- expanded_cache: FxHashMap<(DefId, SubstsRef<'tcx>), Ty<'tcx>>,
+ expanded_cache: FxHashMap<(DefId, GenericArgsRef<'tcx>), Ty<'tcx>>,
primary_def_id: Option<DefId>,
found_recursion: bool,
found_any_recursion: bool,
@@ -812,19 +813,19 @@ struct OpaqueTypeExpander<'tcx> {
}
impl<'tcx> OpaqueTypeExpander<'tcx> {
- fn expand_opaque_ty(&mut self, def_id: DefId, substs: SubstsRef<'tcx>) -> Option<Ty<'tcx>> {
+ fn expand_opaque_ty(&mut self, def_id: DefId, args: GenericArgsRef<'tcx>) -> Option<Ty<'tcx>> {
if self.found_any_recursion {
return None;
}
- let substs = substs.fold_with(self);
+ let args = args.fold_with(self);
if !self.check_recursion || self.seen_opaque_tys.insert(def_id) {
- let expanded_ty = match self.expanded_cache.get(&(def_id, substs)) {
+ let expanded_ty = match self.expanded_cache.get(&(def_id, args)) {
Some(expanded_ty) => *expanded_ty,
None => {
let generic_ty = self.tcx.type_of(def_id);
- let concrete_ty = generic_ty.subst(self.tcx, substs);
+ let concrete_ty = generic_ty.instantiate(self.tcx, args);
let expanded_ty = self.fold_ty(concrete_ty);
- self.expanded_cache.insert((def_id, substs), expanded_ty);
+ self.expanded_cache.insert((def_id, args), expanded_ty);
expanded_ty
}
};
@@ -841,21 +842,21 @@ impl<'tcx> OpaqueTypeExpander<'tcx> {
}
}
- fn expand_generator(&mut self, def_id: DefId, substs: SubstsRef<'tcx>) -> Option<Ty<'tcx>> {
+ fn expand_generator(&mut self, def_id: DefId, args: GenericArgsRef<'tcx>) -> Option<Ty<'tcx>> {
if self.found_any_recursion {
return None;
}
- let substs = substs.fold_with(self);
+ let args = args.fold_with(self);
if !self.check_recursion || self.seen_opaque_tys.insert(def_id) {
- let expanded_ty = match self.expanded_cache.get(&(def_id, substs)) {
+ let expanded_ty = match self.expanded_cache.get(&(def_id, args)) {
Some(expanded_ty) => *expanded_ty,
None => {
for bty in self.tcx.generator_hidden_types(def_id) {
- let hidden_ty = bty.subst(self.tcx, substs);
+ let hidden_ty = bty.instantiate(self.tcx, args);
self.fold_ty(hidden_ty);
}
- let expanded_ty = Ty::new_generator_witness_mir(self.tcx, def_id, substs);
- self.expanded_cache.insert((def_id, substs), expanded_ty);
+ let expanded_ty = Ty::new_generator_witness_mir(self.tcx, def_id, args);
+ self.expanded_cache.insert((def_id, args), expanded_ty);
expanded_ty
}
};
@@ -879,16 +880,16 @@ impl<'tcx> TypeFolder<TyCtxt<'tcx>> for OpaqueTypeExpander<'tcx> {
}
fn fold_ty(&mut self, t: Ty<'tcx>) -> Ty<'tcx> {
- let mut t = if let ty::Alias(ty::Opaque, ty::AliasTy { def_id, substs, .. }) = *t.kind() {
- self.expand_opaque_ty(def_id, substs).unwrap_or(t)
+ let mut t = if let ty::Alias(ty::Opaque, ty::AliasTy { def_id, args, .. }) = *t.kind() {
+ self.expand_opaque_ty(def_id, args).unwrap_or(t)
} else if t.has_opaque_types() || t.has_generators() {
t.super_fold_with(self)
} else {
t
};
if self.expand_generators {
- if let ty::GeneratorWitnessMIR(def_id, substs) = *t.kind() {
- t = self.expand_generator(def_id, substs).unwrap_or(t);
+ if let ty::GeneratorWitnessMIR(def_id, args) = *t.kind() {
+ t = self.expand_generator(def_id, args).unwrap_or(t);
}
}
t
@@ -1084,7 +1085,7 @@ impl<'tcx> Ty<'tcx> {
#[inline]
pub fn needs_drop(self, tcx: TyCtxt<'tcx>, param_env: ty::ParamEnv<'tcx>) -> bool {
// Avoid querying in simple cases.
- match needs_drop_components(self, &tcx.data_layout) {
+ match needs_drop_components(tcx, self) {
Err(AlwaysRequiresDrop) => true,
Ok(components) => {
let query_ty = match *components {
@@ -1117,7 +1118,7 @@ impl<'tcx> Ty<'tcx> {
#[inline]
pub fn has_significant_drop(self, tcx: TyCtxt<'tcx>, param_env: ty::ParamEnv<'tcx>) -> bool {
// Avoid querying in simple cases.
- match needs_drop_components(self, &tcx.data_layout) {
+ match needs_drop_components(tcx, self) {
Err(AlwaysRequiresDrop) => true,
Ok(components) => {
let query_ty = match *components {
@@ -1277,10 +1278,10 @@ impl<'tcx> ExplicitSelf<'tcx> {
/// *any* of the returned types need drop. Returns `Err(AlwaysRequiresDrop)` if
/// this type always needs drop.
pub fn needs_drop_components<'tcx>(
+ tcx: TyCtxt<'tcx>,
ty: Ty<'tcx>,
- target_layout: &TargetDataLayout,
) -> Result<SmallVec<[Ty<'tcx>; 2]>, AlwaysRequiresDrop> {
- match ty.kind() {
+ match *ty.kind() {
ty::Infer(ty::FreshIntTy(_))
| ty::Infer(ty::FreshFloatTy(_))
| ty::Bool
@@ -1302,11 +1303,11 @@ pub fn needs_drop_components<'tcx>(
ty::Dynamic(..) | ty::Error(_) => Err(AlwaysRequiresDrop),
- ty::Slice(ty) => needs_drop_components(*ty, target_layout),
+ ty::Slice(ty) => needs_drop_components(tcx, ty),
ty::Array(elem_ty, size) => {
- match needs_drop_components(*elem_ty, target_layout) {
+ match needs_drop_components(tcx, elem_ty) {
Ok(v) if v.is_empty() => Ok(v),
- res => match size.try_to_bits(target_layout.pointer_size) {
+ res => match size.try_to_target_usize(tcx) {
// Arrays of size zero don't need drop, even if their element
// type does.
Some(0) => Ok(SmallVec::new()),
@@ -1320,7 +1321,7 @@ pub fn needs_drop_components<'tcx>(
}
// If any field needs drop, then the whole tuple does.
ty::Tuple(fields) => fields.iter().try_fold(SmallVec::new(), move |mut acc, elem| {
- acc.extend(needs_drop_components(elem, target_layout)?);
+ acc.extend(needs_drop_components(tcx, elem)?);
Ok(acc)
}),
diff --git a/compiler/rustc_middle/src/ty/visit.rs b/compiler/rustc_middle/src/ty/visit.rs
index 520bb55e0..156eda477 100644
--- a/compiler/rustc_middle/src/ty/visit.rs
+++ b/compiler/rustc_middle/src/ty/visit.rs
@@ -88,14 +88,10 @@ pub trait TypeVisitableExt<'tcx>: TypeVisitable<TyCtxt<'tcx>> {
self.has_type_flags(TypeFlags::HAS_INFER)
}
fn has_placeholders(&self) -> bool {
- self.has_type_flags(
- TypeFlags::HAS_RE_PLACEHOLDER
- | TypeFlags::HAS_TY_PLACEHOLDER
- | TypeFlags::HAS_CT_PLACEHOLDER,
- )
+ self.has_type_flags(TypeFlags::HAS_PLACEHOLDER)
}
fn has_non_region_placeholders(&self) -> bool {
- self.has_type_flags(TypeFlags::HAS_TY_PLACEHOLDER | TypeFlags::HAS_CT_PLACEHOLDER)
+ self.has_type_flags(TypeFlags::HAS_PLACEHOLDER - TypeFlags::HAS_RE_PLACEHOLDER)
}
fn has_param(&self) -> bool {
self.has_type_flags(TypeFlags::HAS_PARAM)
diff --git a/compiler/rustc_middle/src/ty/vtable.rs b/compiler/rustc_middle/src/ty/vtable.rs
index 443791d0a..97402caa0 100644
--- a/compiler/rustc_middle/src/ty/vtable.rs
+++ b/compiler/rustc_middle/src/ty/vtable.rs
@@ -29,8 +29,8 @@ impl<'tcx> fmt::Debug for VtblEntry<'tcx> {
VtblEntry::MetadataSize => write!(f, "MetadataSize"),
VtblEntry::MetadataAlign => write!(f, "MetadataAlign"),
VtblEntry::Vacant => write!(f, "Vacant"),
- VtblEntry::Method(instance) => write!(f, "Method({})", instance),
- VtblEntry::TraitVPtr(trait_ref) => write!(f, "TraitVPtr({})", trait_ref),
+ VtblEntry::Method(instance) => write!(f, "Method({instance})"),
+ VtblEntry::TraitVPtr(trait_ref) => write!(f, "TraitVPtr({trait_ref})"),
}
}
}
diff --git a/compiler/rustc_middle/src/ty/walk.rs b/compiler/rustc_middle/src/ty/walk.rs
index 04a635a68..7c3d9ed39 100644
--- a/compiler/rustc_middle/src/ty/walk.rs
+++ b/compiler/rustc_middle/src/ty/walk.rs
@@ -1,8 +1,8 @@
//! An iterator over the type substructure.
//! WARNING: this does not keep track of the region depth.
-use crate::ty::subst::{GenericArg, GenericArgKind};
use crate::ty::{self, Ty};
+use crate::ty::{GenericArg, GenericArgKind};
use rustc_data_structures::sso::SsoHashSet;
use smallvec::SmallVec;
@@ -166,33 +166,33 @@ fn push_inner<'tcx>(stack: &mut TypeWalkerStack<'tcx>, parent: GenericArg<'tcx>)
stack.push(lt.into());
}
ty::Alias(_, data) => {
- stack.extend(data.substs.iter().rev());
+ stack.extend(data.args.iter().rev());
}
ty::Dynamic(obj, lt, _) => {
stack.push(lt.into());
stack.extend(obj.iter().rev().flat_map(|predicate| {
- let (substs, opt_ty) = match predicate.skip_binder() {
- ty::ExistentialPredicate::Trait(tr) => (tr.substs, None),
- ty::ExistentialPredicate::Projection(p) => (p.substs, Some(p.term)),
+ let (args, opt_ty) = match predicate.skip_binder() {
+ ty::ExistentialPredicate::Trait(tr) => (tr.args, None),
+ ty::ExistentialPredicate::Projection(p) => (p.args, Some(p.term)),
ty::ExistentialPredicate::AutoTrait(_) =>
// Empty iterator
{
- (ty::InternalSubsts::empty(), None)
+ (ty::GenericArgs::empty(), None)
}
};
- substs.iter().rev().chain(opt_ty.map(|term| match term.unpack() {
+ args.iter().rev().chain(opt_ty.map(|term| match term.unpack() {
ty::TermKind::Ty(ty) => ty.into(),
ty::TermKind::Const(ct) => ct.into(),
}))
}));
}
- ty::Adt(_, substs)
- | ty::Closure(_, substs)
- | ty::Generator(_, substs, _)
- | ty::GeneratorWitnessMIR(_, substs)
- | ty::FnDef(_, substs) => {
- stack.extend(substs.iter().rev());
+ ty::Adt(_, args)
+ | ty::Closure(_, args)
+ | ty::Generator(_, args, _)
+ | ty::GeneratorWitnessMIR(_, args)
+ | ty::FnDef(_, args) => {
+ stack.extend(args.iter().rev());
}
ty::Tuple(ts) => stack.extend(ts.iter().rev().map(GenericArg::from)),
ty::GeneratorWitness(ts) => {
@@ -233,7 +233,7 @@ fn push_inner<'tcx>(stack: &mut TypeWalkerStack<'tcx>, parent: GenericArg<'tcx>)
},
ty::ConstKind::Unevaluated(ct) => {
- stack.extend(ct.substs.iter().rev());
+ stack.extend(ct.args.iter().rev());
}
}
}
diff --git a/compiler/rustc_middle/src/util/bug.rs b/compiler/rustc_middle/src/util/bug.rs
index 43ee0343f..634ed5ec5 100644
--- a/compiler/rustc_middle/src/util/bug.rs
+++ b/compiler/rustc_middle/src/util/bug.rs
@@ -29,7 +29,7 @@ fn opt_span_bug_fmt<S: Into<MultiSpan>>(
location: &Location<'_>,
) -> ! {
tls::with_opt(move |tcx| {
- let msg = format!("{}: {}", location, args);
+ let msg = format!("{location}: {args}");
match (tcx, span) {
(Some(tcx), Some(span)) => tcx.sess.diagnostic().span_bug(span, msg),
(Some(tcx), None) => tcx.sess.diagnostic().bug(msg),
diff --git a/compiler/rustc_middle/src/util/call_kind.rs b/compiler/rustc_middle/src/util/call_kind.rs
index 98d55ea6d..4e2a2c6ae 100644
--- a/compiler/rustc_middle/src/util/call_kind.rs
+++ b/compiler/rustc_middle/src/util/call_kind.rs
@@ -2,7 +2,7 @@
//! as well as errors when attempting to call a non-const function in a const
//! context.
-use crate::ty::subst::SubstsRef;
+use crate::ty::GenericArgsRef;
use crate::ty::{AssocItemContainer, Instance, ParamEnv, Ty, TyCtxt};
use rustc_hir::def_id::DefId;
use rustc_hir::{lang_items, LangItem};
@@ -43,7 +43,7 @@ pub enum CallKind<'tcx> {
self_arg: Option<Ident>,
desugaring: Option<(CallDesugaringKind, Ty<'tcx>)>,
method_did: DefId,
- method_substs: SubstsRef<'tcx>,
+ method_args: GenericArgsRef<'tcx>,
},
/// A call to `Fn(..)::call(..)`, desugared from `my_closure(a, b, c)`
FnCall { fn_trait_id: DefId, self_ty: Ty<'tcx> },
@@ -63,7 +63,7 @@ pub fn call_kind<'tcx>(
tcx: TyCtxt<'tcx>,
param_env: ParamEnv<'tcx>,
method_did: DefId,
- method_substs: SubstsRef<'tcx>,
+ method_args: GenericArgsRef<'tcx>,
fn_call_span: Span,
from_hir_call: bool,
self_arg: Option<Ident>,
@@ -92,19 +92,19 @@ pub fn call_kind<'tcx>(
// an FnOnce call, an operator (e.g. `<<`), or a
// deref coercion.
let kind = if let Some(trait_id) = fn_call {
- Some(CallKind::FnCall { fn_trait_id: trait_id, self_ty: method_substs.type_at(0) })
+ Some(CallKind::FnCall { fn_trait_id: trait_id, self_ty: method_args.type_at(0) })
} else if let Some(trait_id) = operator {
- Some(CallKind::Operator { self_arg, trait_id, self_ty: method_substs.type_at(0) })
+ Some(CallKind::Operator { self_arg, trait_id, self_ty: method_args.type_at(0) })
} else if is_deref {
let deref_target = tcx.get_diagnostic_item(sym::deref_target).and_then(|deref_target| {
- Instance::resolve(tcx, param_env, deref_target, method_substs).transpose()
+ Instance::resolve(tcx, param_env, deref_target, method_args).transpose()
});
if let Some(Ok(instance)) = deref_target {
let deref_target_ty = instance.ty(tcx, param_env);
Some(CallKind::DerefCoercion {
deref_target: tcx.def_span(instance.def_id()),
deref_target_ty,
- self_ty: method_substs.type_at(0),
+ self_ty: method_args.type_at(0),
})
} else {
None
@@ -119,24 +119,24 @@ pub fn call_kind<'tcx>(
let desugaring = if Some(method_did) == tcx.lang_items().into_iter_fn()
&& fn_call_span.desugaring_kind() == Some(DesugaringKind::ForLoop)
{
- Some((CallDesugaringKind::ForLoopIntoIter, method_substs.type_at(0)))
+ Some((CallDesugaringKind::ForLoopIntoIter, method_args.type_at(0)))
} else if fn_call_span.desugaring_kind() == Some(DesugaringKind::QuestionMark) {
if Some(method_did) == tcx.lang_items().branch_fn() {
- Some((CallDesugaringKind::QuestionBranch, method_substs.type_at(0)))
+ Some((CallDesugaringKind::QuestionBranch, method_args.type_at(0)))
} else if Some(method_did) == tcx.lang_items().from_residual_fn() {
- Some((CallDesugaringKind::QuestionFromResidual, method_substs.type_at(0)))
+ Some((CallDesugaringKind::QuestionFromResidual, method_args.type_at(0)))
} else {
None
}
} else if Some(method_did) == tcx.lang_items().from_output_fn()
&& fn_call_span.desugaring_kind() == Some(DesugaringKind::TryBlock)
{
- Some((CallDesugaringKind::TryBlockFromOutput, method_substs.type_at(0)))
+ Some((CallDesugaringKind::TryBlockFromOutput, method_args.type_at(0)))
} else if fn_call_span.is_desugaring(DesugaringKind::Await) {
- Some((CallDesugaringKind::Await, method_substs.type_at(0)))
+ Some((CallDesugaringKind::Await, method_args.type_at(0)))
} else {
None
};
- CallKind::Normal { self_arg, desugaring, method_did, method_substs }
+ CallKind::Normal { self_arg, desugaring, method_did, method_args }
})
}
diff --git a/compiler/rustc_middle/src/util/common.rs b/compiler/rustc_middle/src/util/common.rs
index 08977049d..df101a2f6 100644
--- a/compiler/rustc_middle/src/util/common.rs
+++ b/compiler/rustc_middle/src/util/common.rs
@@ -17,7 +17,7 @@ pub fn to_readable_str(mut val: usize) -> String {
groups.push(group.to_string());
break;
} else {
- groups.push(format!("{:03}", group));
+ groups.push(format!("{group:03}"));
}
}
diff --git a/compiler/rustc_middle/src/util/find_self_call.rs b/compiler/rustc_middle/src/util/find_self_call.rs
index 0eab0adf0..1b845334c 100644
--- a/compiler/rustc_middle/src/util/find_self_call.rs
+++ b/compiler/rustc_middle/src/util/find_self_call.rs
@@ -1,5 +1,5 @@
use crate::mir::*;
-use crate::ty::subst::SubstsRef;
+use crate::ty::GenericArgsRef;
use crate::ty::{self, TyCtxt};
use rustc_span::def_id::DefId;
@@ -11,21 +11,21 @@ pub fn find_self_call<'tcx>(
body: &Body<'tcx>,
local: Local,
block: BasicBlock,
-) -> Option<(DefId, SubstsRef<'tcx>)> {
+) -> Option<(DefId, GenericArgsRef<'tcx>)> {
debug!("find_self_call(local={:?}): terminator={:?}", local, &body[block].terminator);
if let Some(Terminator { kind: TerminatorKind::Call { func, args, .. }, .. }) =
&body[block].terminator
{
debug!("find_self_call: func={:?}", func);
if let Operand::Constant(box Constant { literal, .. }) = func {
- if let ty::FnDef(def_id, substs) = *literal.ty().kind() {
+ if let ty::FnDef(def_id, fn_args) = *literal.ty().kind() {
if let Some(ty::AssocItem { fn_has_self_parameter: true, .. }) =
tcx.opt_associated_item(def_id)
{
- debug!("find_self_call: args={:?}", args);
+ debug!("find_self_call: args={:?}", fn_args);
if let [Operand::Move(self_place) | Operand::Copy(self_place), ..] = **args {
if self_place.as_local() == Some(local) {
- return Some((def_id, substs));
+ return Some((def_id, fn_args));
}
}
}
diff --git a/compiler/rustc_middle/src/values.rs b/compiler/rustc_middle/src/values.rs
index b0961d917..384a36843 100644
--- a/compiler/rustc_middle/src/values.rs
+++ b/compiler/rustc_middle/src/values.rs
@@ -209,7 +209,7 @@ fn find_item_ty_spans(
match ty.kind {
hir::TyKind::Path(hir::QPath::Resolved(_, path)) => {
if let Res::Def(kind, def_id) = path.res
- && kind != DefKind::TyAlias {
+ && !matches!(kind, DefKind::TyAlias { .. }) {
let check_params = def_id.as_local().map_or(true, |def_id| {
if def_id == needle {
spans.push(ty.span);
diff --git a/compiler/rustc_mir_build/Cargo.toml b/compiler/rustc_mir_build/Cargo.toml
index 58449ee9e..c7e2c625c 100644
--- a/compiler/rustc_mir_build/Cargo.toml
+++ b/compiler/rustc_mir_build/Cargo.toml
@@ -10,7 +10,7 @@ rustc_arena = { path = "../rustc_arena" }
tracing = "0.1"
either = "1"
rustc_middle = { path = "../rustc_middle" }
-rustc_apfloat = { path = "../rustc_apfloat" }
+rustc_apfloat = "0.2.0"
rustc_data_structures = { path = "../rustc_data_structures" }
rustc_index = { path = "../rustc_index" }
rustc_errors = { path = "../rustc_errors" }
diff --git a/compiler/rustc_mir_build/messages.ftl b/compiler/rustc_mir_build/messages.ftl
index 98156840d..938f3edd3 100644
--- a/compiler/rustc_mir_build/messages.ftl
+++ b/compiler/rustc_mir_build/messages.ftl
@@ -312,6 +312,8 @@ mir_build_unreachable_pattern = unreachable pattern
.label = unreachable pattern
.catchall_label = matches any value
+mir_build_unsafe_not_inherited = items do not inherit unsafety from separate enclosing items
+
mir_build_unsafe_op_in_unsafe_fn_borrow_of_layout_constrained_field_requires_unsafe =
borrow of layout constrained field with interior mutability is unsafe and requires unsafe block (error E0133)
.note = references to fields of layout constrained fields lose the constraints. Coupled with interior mutability, the field can be changed to invalid values
diff --git a/compiler/rustc_mir_build/src/build/custom/parse.rs b/compiler/rustc_mir_build/src/build/custom/parse.rs
index 803207d9d..60c4a0416 100644
--- a/compiler/rustc_mir_build/src/build/custom/parse.rs
+++ b/compiler/rustc_mir_build/src/build/custom/parse.rs
@@ -74,7 +74,7 @@ impl<'tcx, 'body> ParseCtxt<'tcx, 'body> {
kind @ StmtKind::Let { pattern, .. } => {
return Err(ParseError {
span: pattern.span,
- item_description: format!("{:?}", kind),
+ item_description: format!("{kind:?}"),
expected: "expression".to_string(),
});
}
@@ -241,9 +241,7 @@ impl<'tcx, 'body> ParseCtxt<'tcx, 'body> {
});
}
- let Some(trailing) = block.expr else {
- return Err(self.expr_error(expr_id, "terminator"))
- };
+ let Some(trailing) = block.expr else { return Err(self.expr_error(expr_id, "terminator")) };
let span = self.thir[trailing].span;
let terminator = self.parse_terminator(trailing)?;
data.terminator = Some(Terminator {
diff --git a/compiler/rustc_mir_build/src/build/custom/parse/instruction.rs b/compiler/rustc_mir_build/src/build/custom/parse/instruction.rs
index 4cb9d7bab..26662f5de 100644
--- a/compiler/rustc_mir_build/src/build/custom/parse/instruction.rs
+++ b/compiler/rustc_mir_build/src/build/custom/parse/instruction.rs
@@ -61,11 +61,9 @@ impl<'tcx, 'body> ParseCtxt<'tcx, 'body> {
})
},
@call("mir_call", args) => {
- let destination = self.parse_place(args[0])?;
- let target = self.parse_block(args[1])?;
- self.parse_call(args[2], destination, target)
+ self.parse_call(args)
},
- ExprKind::Match { scrutinee, arms } => {
+ ExprKind::Match { scrutinee, arms, .. } => {
let discr = self.parse_operand(*scrutinee)?;
self.parse_match(arms, expr.span).map(|t| TerminatorKind::SwitchInt { discr, targets: t })
},
@@ -78,7 +76,7 @@ impl<'tcx, 'body> ParseCtxt<'tcx, 'body> {
span,
item_description: "no arms".to_string(),
expected: "at least one arm".to_string(),
- })
+ });
};
let otherwise = &self.thir[*otherwise];
@@ -87,7 +85,7 @@ impl<'tcx, 'body> ParseCtxt<'tcx, 'body> {
span: otherwise.span,
item_description: format!("{:?}", otherwise.pattern.kind),
expected: "wildcard pattern".to_string(),
- })
+ });
};
let otherwise = self.parse_block(otherwise.body)?;
@@ -100,7 +98,7 @@ impl<'tcx, 'body> ParseCtxt<'tcx, 'body> {
span: arm.pattern.span,
item_description: format!("{:?}", arm.pattern.kind),
expected: "constant pattern".to_string(),
- })
+ });
};
values.push(value.eval_bits(self.tcx, self.param_env, arm.pattern.ty));
targets.push(self.parse_block(arm.body)?);
@@ -109,13 +107,14 @@ impl<'tcx, 'body> ParseCtxt<'tcx, 'body> {
Ok(SwitchTargets::new(values.into_iter().zip(targets), otherwise))
}
- fn parse_call(
- &self,
- expr_id: ExprId,
- destination: Place<'tcx>,
- target: BasicBlock,
- ) -> PResult<TerminatorKind<'tcx>> {
- parse_by_kind!(self, expr_id, _, "function call",
+ fn parse_call(&self, args: &[ExprId]) -> PResult<TerminatorKind<'tcx>> {
+ let (destination, call) = parse_by_kind!(self, args[0], _, "function call",
+ ExprKind::Assign { lhs, rhs } => (*lhs, *rhs),
+ );
+ let destination = self.parse_place(destination)?;
+ let target = self.parse_block(args[1])?;
+
+ parse_by_kind!(self, call, _, "function call",
ExprKind::Call { fun, args, from_hir_call, fn_span, .. } => {
let fun = self.parse_operand(*fun)?;
let args = args
@@ -192,12 +191,12 @@ impl<'tcx, 'body> ParseCtxt<'tcx, 'body> {
fields.iter().map(|e| self.parse_operand(*e)).collect::<Result<_, _>>()?
))
},
- ExprKind::Adt(box AdtExpr{ adt_def, variant_index, substs, fields, .. }) => {
+ ExprKind::Adt(box AdtExpr{ adt_def, variant_index, args, fields, .. }) => {
let is_union = adt_def.is_union();
let active_field_index = is_union.then(|| fields[0].name);
Ok(Rvalue::Aggregate(
- Box::new(AggregateKind::Adt(adt_def.did(), *variant_index, substs, None, active_field_index)),
+ Box::new(AggregateKind::Adt(adt_def.did(), *variant_index, args, None, active_field_index)),
fields.iter().map(|f| self.parse_operand(f.expr)).collect::<Result<_, _>>()?
))
},
diff --git a/compiler/rustc_mir_build/src/build/expr/as_constant.rs b/compiler/rustc_mir_build/src/build/expr/as_constant.rs
index 3fe751ae0..aaa37446e 100644
--- a/compiler/rustc_mir_build/src/build/expr/as_constant.rs
+++ b/compiler/rustc_mir_build/src/build/expr/as_constant.rs
@@ -75,10 +75,10 @@ pub fn as_constant_inner<'tcx>(
Constant { span, user_ty, literal }
}
- ExprKind::NamedConst { def_id, substs, ref user_ty } => {
+ ExprKind::NamedConst { def_id, args, ref user_ty } => {
let user_ty = user_ty.as_ref().and_then(push_cuta);
- let uneval = mir::UnevaluatedConst::new(def_id, substs);
+ let uneval = mir::UnevaluatedConst::new(def_id, args);
let literal = ConstantKind::Unevaluated(uneval, ty);
Constant { user_ty, span, literal }
@@ -89,8 +89,8 @@ pub fn as_constant_inner<'tcx>(
Constant { user_ty: None, span, literal }
}
- ExprKind::ConstBlock { did: def_id, substs } => {
- let uneval = mir::UnevaluatedConst::new(def_id, substs);
+ ExprKind::ConstBlock { did: def_id, args } => {
+ let uneval = mir::UnevaluatedConst::new(def_id, args);
let literal = ConstantKind::Unevaluated(uneval, ty);
Constant { user_ty: None, span, literal }
diff --git a/compiler/rustc_mir_build/src/build/expr/as_place.rs b/compiler/rustc_mir_build/src/build/expr/as_place.rs
index 60acd279f..2e7ef265a 100644
--- a/compiler/rustc_mir_build/src/build/expr/as_place.rs
+++ b/compiler/rustc_mir_build/src/build/expr/as_place.rs
@@ -175,11 +175,8 @@ fn to_upvars_resolved_place_builder<'tcx>(
projection: &[PlaceElem<'tcx>],
) -> Option<PlaceBuilder<'tcx>> {
let Some((capture_index, capture)) =
- find_capture_matching_projections(
- &cx.upvars,
- var_hir_id,
- &projection,
- ) else {
+ find_capture_matching_projections(&cx.upvars, var_hir_id, &projection)
+ else {
let closure_span = cx.tcx.def_span(closure_def_id);
if !enable_precise_capture(closure_span) {
bug!(
@@ -189,10 +186,7 @@ fn to_upvars_resolved_place_builder<'tcx>(
projection
)
} else {
- debug!(
- "No associated capture found for {:?}[{:#?}]",
- var_hir_id, projection,
- );
+ debug!("No associated capture found for {:?}[{:#?}]", var_hir_id, projection,);
}
return None;
};
@@ -242,6 +236,9 @@ fn strip_prefix<'a, 'tcx>(
}
assert_matches!(iter.next(), Some(ProjectionElem::Field(..)));
}
+ HirProjectionKind::OpaqueCast => {
+ assert_matches!(iter.next(), Some(ProjectionElem::OpaqueCast(..)));
+ }
HirProjectionKind::Index | HirProjectionKind::Subslice => {
bug!("unexpected projection kind: {:?}", projection);
}
@@ -736,5 +733,5 @@ impl<'a, 'tcx> Builder<'a, 'tcx> {
/// Precise capture is enabled if user is using Rust Edition 2021 or higher.
fn enable_precise_capture(closure_span: Span) -> bool {
- closure_span.rust_2021()
+ closure_span.at_least_rust_2021()
}
diff --git a/compiler/rustc_mir_build/src/build/expr/as_rvalue.rs b/compiler/rustc_mir_build/src/build/expr/as_rvalue.rs
index 32ffb990b..3220a184d 100644
--- a/compiler/rustc_mir_build/src/build/expr/as_rvalue.rs
+++ b/compiler/rustc_mir_build/src/build/expr/as_rvalue.rs
@@ -16,7 +16,7 @@ use rustc_middle::mir::*;
use rustc_middle::thir::*;
use rustc_middle::ty::cast::{mir_cast_kind, CastTy};
use rustc_middle::ty::layout::IntegerExt;
-use rustc_middle::ty::{self, Ty, UpvarSubsts};
+use rustc_middle::ty::{self, Ty, UpvarArgs};
use rustc_span::Span;
impl<'a, 'tcx> Builder<'a, 'tcx> {
@@ -382,7 +382,7 @@ impl<'a, 'tcx> Builder<'a, 'tcx> {
}
ExprKind::Closure(box ClosureExpr {
closure_id,
- substs,
+ args,
ref upvars,
movability,
ref fake_reads,
@@ -470,19 +470,15 @@ impl<'a, 'tcx> Builder<'a, 'tcx> {
})
.collect();
- let result = match substs {
- UpvarSubsts::Generator(substs) => {
+ let result = match args {
+ UpvarArgs::Generator(args) => {
// We implicitly set the discriminant to 0. See
// librustc_mir/transform/deaggregator.rs for details.
let movability = movability.unwrap();
- Box::new(AggregateKind::Generator(
- closure_id.to_def_id(),
- substs,
- movability,
- ))
+ Box::new(AggregateKind::Generator(closure_id.to_def_id(), args, movability))
}
- UpvarSubsts::Closure(substs) => {
- Box::new(AggregateKind::Closure(closure_id.to_def_id(), substs))
+ UpvarArgs::Closure(args) => {
+ Box::new(AggregateKind::Closure(closure_id.to_def_id(), args))
}
};
block.and(Rvalue::Aggregate(result, operands))
@@ -778,8 +774,7 @@ impl<'a, 'tcx> Builder<'a, 'tcx> {
// Not in a closure
debug_assert!(
local == ty::CAPTURE_STRUCT_LOCAL,
- "Expected local to be Local(1), found {:?}",
- local
+ "Expected local to be Local(1), found {local:?}"
);
// Not in a closure
debug_assert!(
diff --git a/compiler/rustc_mir_build/src/build/expr/into.rs b/compiler/rustc_mir_build/src/build/expr/into.rs
index e30fdcbbe..a5c86e31a 100644
--- a/compiler/rustc_mir_build/src/build/expr/into.rs
+++ b/compiler/rustc_mir_build/src/build/expr/into.rs
@@ -47,7 +47,7 @@ impl<'a, 'tcx> Builder<'a, 'tcx> {
ExprKind::Block { block: ast_block } => {
this.ast_block(destination, block, ast_block, source_info)
}
- ExprKind::Match { scrutinee, ref arms } => {
+ ExprKind::Match { scrutinee, ref arms, .. } => {
this.match_expr(destination, expr_span, block, &this.thir[scrutinee], arms)
}
ExprKind::If { cond, then, else_opt, if_then_scope } => {
@@ -317,7 +317,7 @@ impl<'a, 'tcx> Builder<'a, 'tcx> {
ExprKind::Adt(box AdtExpr {
adt_def,
variant_index,
- substs,
+ args,
ref user_ty,
ref fields,
ref base,
@@ -382,7 +382,7 @@ impl<'a, 'tcx> Builder<'a, 'tcx> {
let adt = Box::new(AggregateKind::Adt(
adt_def.did(),
variant_index,
- substs,
+ args,
user_ty,
active_field_index,
));
diff --git a/compiler/rustc_mir_build/src/build/matches/mod.rs b/compiler/rustc_mir_build/src/build/matches/mod.rs
index 10770213c..3c4507407 100644
--- a/compiler/rustc_mir_build/src/build/matches/mod.rs
+++ b/compiler/rustc_mir_build/src/build/matches/mod.rs
@@ -607,9 +607,11 @@ impl<'a, 'tcx> Builder<'a, 'tcx> {
// };
// ```
if let Some(place) = initializer.try_to_place(self) {
- let LocalInfo::User(BindingForm::Var(
- VarBindingForm { opt_match_place: Some((ref mut match_place, _)), .. },
- )) = **self.local_decls[local].local_info.as_mut().assert_crate_local() else {
+ let LocalInfo::User(BindingForm::Var(VarBindingForm {
+ opt_match_place: Some((ref mut match_place, _)),
+ ..
+ })) = **self.local_decls[local].local_info.as_mut().assert_crate_local()
+ else {
bug!("Let binding to non-user variable.")
};
*match_place = Some(place);
@@ -804,7 +806,7 @@ impl<'a, 'tcx> Builder<'a, 'tcx> {
}
}
- PatKind::Variant { adt_def, substs: _, variant_index, ref subpatterns } => {
+ PatKind::Variant { adt_def, args: _, variant_index, ref subpatterns } => {
for subpattern in subpatterns {
let subpattern_user_ty =
pattern_user_ty.clone().variant(adt_def, variant_index, subpattern.field);
@@ -1625,9 +1627,7 @@ impl<'a, 'tcx> Builder<'a, 'tcx> {
// at least the first candidate ought to be tested
assert!(
total_candidate_count > candidates.len(),
- "{}, {:#?}",
- total_candidate_count,
- candidates
+ "{total_candidate_count}, {candidates:#?}"
);
debug!("tested_candidates: {}", total_candidate_count - candidates.len());
debug!("untested_candidates: {}", candidates.len());
@@ -2242,7 +2242,6 @@ impl<'a, 'tcx> Builder<'a, 'tcx> {
self.var_debug_info.push(VarDebugInfo {
name,
source_info: debug_source_info,
- references: 0,
value: VarDebugInfoContents::Place(for_arm_body.into()),
argument_index: None,
});
@@ -2262,7 +2261,6 @@ impl<'a, 'tcx> Builder<'a, 'tcx> {
self.var_debug_info.push(VarDebugInfo {
name,
source_info: debug_source_info,
- references: 0,
value: VarDebugInfoContents::Place(ref_for_guard.into()),
argument_index: None,
});
diff --git a/compiler/rustc_mir_build/src/build/matches/simplify.rs b/compiler/rustc_mir_build/src/build/matches/simplify.rs
index f6b1955fd..17ac1f4e0 100644
--- a/compiler/rustc_mir_build/src/build/matches/simplify.rs
+++ b/compiler/rustc_mir_build/src/build/matches/simplify.rs
@@ -259,13 +259,13 @@ impl<'a, 'tcx> Builder<'a, 'tcx> {
}
}
- PatKind::Variant { adt_def, substs, variant_index, ref subpatterns } => {
+ PatKind::Variant { adt_def, args, variant_index, ref subpatterns } => {
let irrefutable = adt_def.variants().iter_enumerated().all(|(i, v)| {
i == variant_index || {
self.tcx.features().exhaustive_patterns
&& !v
.inhabited_predicate(self.tcx, adt_def)
- .subst(self.tcx, substs)
+ .instantiate(self.tcx, args)
.apply_ignore_module(self.tcx, self.param_env)
}
}) && (adt_def.did().is_local()
diff --git a/compiler/rustc_mir_build/src/build/matches/test.rs b/compiler/rustc_mir_build/src/build/matches/test.rs
index e6806177d..484e84909 100644
--- a/compiler/rustc_mir_build/src/build/matches/test.rs
+++ b/compiler/rustc_mir_build/src/build/matches/test.rs
@@ -30,7 +30,7 @@ impl<'a, 'tcx> Builder<'a, 'tcx> {
/// It is a bug to call this with a not-fully-simplified pattern.
pub(super) fn test<'pat>(&mut self, match_pair: &MatchPair<'pat, 'tcx>) -> Test<'tcx> {
match match_pair.pattern.kind {
- PatKind::Variant { adt_def, substs: _, variant_index: _, subpatterns: _ } => Test {
+ PatKind::Variant { adt_def, args: _, variant_index: _, subpatterns: _ } => Test {
span: match_pair.pattern.span,
kind: TestKind::Switch {
adt_def,
@@ -88,7 +88,8 @@ impl<'a, 'tcx> Builder<'a, 'tcx> {
switch_ty: Ty<'tcx>,
options: &mut FxIndexMap<ConstantKind<'tcx>, u128>,
) -> bool {
- let Some(match_pair) = candidate.match_pairs.iter().find(|mp| mp.place == *test_place) else {
+ let Some(match_pair) = candidate.match_pairs.iter().find(|mp| mp.place == *test_place)
+ else {
return false;
};
@@ -126,7 +127,8 @@ impl<'a, 'tcx> Builder<'a, 'tcx> {
candidate: &Candidate<'pat, 'tcx>,
variants: &mut BitSet<VariantIdx>,
) -> bool {
- let Some(match_pair) = candidate.match_pairs.iter().find(|mp| mp.place == *test_place) else {
+ let Some(match_pair) = candidate.match_pairs.iter().find(|mp| mp.place == *test_place)
+ else {
return false;
};
@@ -173,16 +175,14 @@ impl<'a, 'tcx> Builder<'a, 'tcx> {
debug_assert_ne!(
target_blocks[idx.index()],
otherwise_block,
- "no candidates for tested discriminant: {:?}",
- discr,
+ "no candidates for tested discriminant: {discr:?}",
);
Some((discr.val, target_blocks[idx.index()]))
} else {
debug_assert_eq!(
target_blocks[idx.index()],
otherwise_block,
- "found candidates for untested discriminant: {:?}",
- discr,
+ "found candidates for untested discriminant: {discr:?}",
);
None
}
@@ -865,7 +865,7 @@ fn trait_method<'tcx>(
tcx: TyCtxt<'tcx>,
trait_def_id: DefId,
method_name: Symbol,
- substs: impl IntoIterator<Item: Into<GenericArg<'tcx>>>,
+ args: impl IntoIterator<Item: Into<GenericArg<'tcx>>>,
) -> ConstantKind<'tcx> {
// The unhygienic comparison here is acceptable because this is only
// used on known traits.
@@ -875,7 +875,7 @@ fn trait_method<'tcx>(
.find(|item| item.kind == ty::AssocKind::Fn)
.expect("trait method not found");
- let method_ty = Ty::new_fn_def(tcx, item.def_id, substs);
+ let method_ty = Ty::new_fn_def(tcx, item.def_id, args);
ConstantKind::zero_sized(method_ty)
}
diff --git a/compiler/rustc_mir_build/src/build/mod.rs b/compiler/rustc_mir_build/src/build/mod.rs
index d828e71c7..2a23a69b5 100644
--- a/compiler/rustc_mir_build/src/build/mod.rs
+++ b/compiler/rustc_mir_build/src/build/mod.rs
@@ -10,6 +10,7 @@ use rustc_hir as hir;
use rustc_hir::def::DefKind;
use rustc_hir::def_id::{DefId, LocalDefId};
use rustc_hir::{GeneratorKind, Node};
+use rustc_index::bit_set::GrowableBitSet;
use rustc_index::{Idx, IndexSlice, IndexVec};
use rustc_infer::infer::{InferCtxt, TyCtxtInferExt};
use rustc_middle::hir::place::PlaceBase as HirPlaceBase;
@@ -93,8 +94,7 @@ fn mir_build(tcx: TyCtxt<'_>, def: LocalDefId) -> Body<'_> {
|| body.basic_blocks.has_free_regions()
|| body.var_debug_info.has_free_regions()
|| body.yield_ty().has_free_regions()),
- "Unexpected free regions in MIR: {:?}",
- body,
+ "Unexpected free regions in MIR: {body:?}",
);
body
@@ -215,6 +215,14 @@ struct Builder<'a, 'tcx> {
unit_temp: Option<Place<'tcx>>,
var_debug_info: Vec<VarDebugInfo<'tcx>>,
+
+ // A cache for `maybe_lint_level_roots_bounded`. That function is called
+ // repeatedly, and each time it effectively traces a path through a tree
+ // structure from a node towards the root, doing an attribute check on each
+ // node along the way. This cache records which nodes trace all the way to
+ // the root (most of them do) and saves us from retracing many sub-paths
+ // many times, and rechecking many nodes.
+ lint_level_roots_cache: GrowableBitSet<hir::ItemLocalId>,
}
type CaptureMap<'tcx> = SortedIndexMultiMap<usize, hir::HirId, Capture<'tcx>>;
@@ -473,7 +481,7 @@ fn construct_fn<'tcx>(
let (yield_ty, return_ty) = if generator_kind.is_some() {
let gen_ty = arguments[thir::UPVAR_ENV_PARAM].ty;
let gen_sig = match gen_ty.kind() {
- ty::Generator(_, gen_substs, ..) => gen_substs.as_generator().sig(),
+ ty::Generator(_, gen_args, ..) => gen_args.as_generator().sig(),
_ => {
span_bug!(span, "generator w/o generator type: {:?}", gen_ty)
}
@@ -562,7 +570,7 @@ fn construct_const<'a, 'tcx>(
// Figure out what primary body this item has.
let (span, const_ty_span) = match tcx.hir().get(hir_id) {
Node::Item(hir::Item {
- kind: hir::ItemKind::Static(ty, _, _) | hir::ItemKind::Const(ty, _),
+ kind: hir::ItemKind::Static(ty, _, _) | hir::ItemKind::Const(ty, _, _),
span,
..
})
@@ -618,11 +626,9 @@ fn construct_error(tcx: TyCtxt<'_>, def: LocalDefId, err: ErrorGuaranteed) -> Bo
let num_params = match body_owner_kind {
hir::BodyOwnerKind::Fn => tcx.fn_sig(def).skip_binder().inputs().skip_binder().len(),
hir::BodyOwnerKind::Closure => {
- let ty = tcx.type_of(def).subst_identity();
+ let ty = tcx.type_of(def).instantiate_identity();
match ty.kind() {
- ty::Closure(_, substs) => {
- 1 + substs.as_closure().sig().inputs().skip_binder().len()
- }
+ ty::Closure(_, args) => 1 + args.as_closure().sig().inputs().skip_binder().len(),
ty::Generator(..) => 2,
_ => bug!("expected closure or generator, found {ty:?}"),
}
@@ -725,6 +731,7 @@ impl<'a, 'tcx> Builder<'a, 'tcx> {
var_indices: Default::default(),
unit_temp: None,
var_debug_info: vec![],
+ lint_level_roots_cache: GrowableBitSet::new_empty(),
};
assert_eq!(builder.cfg.start_new_block(), START_BLOCK);
@@ -768,9 +775,9 @@ impl<'a, 'tcx> Builder<'a, 'tcx> {
closure_ty = *ty;
}
- let upvar_substs = match closure_ty.kind() {
- ty::Closure(_, substs) => ty::UpvarSubsts::Closure(substs),
- ty::Generator(_, substs, _) => ty::UpvarSubsts::Generator(substs),
+ let upvar_args = match closure_ty.kind() {
+ ty::Closure(_, args) => ty::UpvarArgs::Closure(args),
+ ty::Generator(_, args, _) => ty::UpvarArgs::Generator(args),
_ => return,
};
@@ -779,7 +786,7 @@ impl<'a, 'tcx> Builder<'a, 'tcx> {
// with the closure's DefId. Here, we run through that vec of UpvarIds for
// the given closure and use the necessary information to create upvar
// debuginfo and to fill `self.upvars`.
- let capture_tys = upvar_substs.upvar_tys();
+ let capture_tys = upvar_args.upvar_tys();
let tcx = self.tcx;
self.upvars = tcx
@@ -813,7 +820,6 @@ impl<'a, 'tcx> Builder<'a, 'tcx> {
};
self.var_debug_info.push(VarDebugInfo {
name,
- references: 0,
source_info: SourceInfo::outermost(captured_place.var_ident.span),
value: VarDebugInfoContents::Place(use_place),
argument_index: None,
@@ -844,7 +850,6 @@ impl<'a, 'tcx> Builder<'a, 'tcx> {
self.var_debug_info.push(VarDebugInfo {
name,
source_info,
- references: 0,
value: VarDebugInfoContents::Place(arg_local.into()),
argument_index: Some(argument_index as u16 + 1),
});
@@ -969,9 +974,9 @@ pub(crate) fn parse_float_into_scalar(
match float_ty {
ty::FloatTy::F32 => {
let Ok(rust_f) = num.parse::<f32>() else { return None };
- let mut f = num.parse::<Single>().unwrap_or_else(|e| {
- panic!("apfloat::ieee::Single failed to parse `{}`: {:?}", num, e)
- });
+ let mut f = num
+ .parse::<Single>()
+ .unwrap_or_else(|e| panic!("apfloat::ieee::Single failed to parse `{num}`: {e:?}"));
assert!(
u128::from(rust_f.to_bits()) == f.to_bits(),
@@ -992,9 +997,9 @@ pub(crate) fn parse_float_into_scalar(
}
ty::FloatTy::F64 => {
let Ok(rust_f) = num.parse::<f64>() else { return None };
- let mut f = num.parse::<Double>().unwrap_or_else(|e| {
- panic!("apfloat::ieee::Double failed to parse `{}`: {:?}", num, e)
- });
+ let mut f = num
+ .parse::<Double>()
+ .unwrap_or_else(|e| panic!("apfloat::ieee::Double failed to parse `{num}`: {e:?}"));
assert!(
u128::from(rust_f.to_bits()) == f.to_bits(),
diff --git a/compiler/rustc_mir_build/src/build/scope.rs b/compiler/rustc_mir_build/src/build/scope.rs
index 72374102c..a96288a11 100644
--- a/compiler/rustc_mir_build/src/build/scope.rs
+++ b/compiler/rustc_mir_build/src/build/scope.rs
@@ -90,8 +90,8 @@ use rustc_index::{IndexSlice, IndexVec};
use rustc_middle::middle::region;
use rustc_middle::mir::*;
use rustc_middle::thir::{Expr, LintLevel};
-
use rustc_middle::ty::Ty;
+use rustc_session::lint::Level;
use rustc_span::{Span, DUMMY_SP};
#[derive(Debug)]
@@ -760,20 +760,25 @@ impl<'a, 'tcx> Builder<'a, 'tcx> {
) {
let (current_root, parent_root) =
if self.tcx.sess.opts.unstable_opts.maximal_hir_to_mir_coverage {
- // Some consumers of rustc need to map MIR locations back to HIR nodes. Currently the
- // the only part of rustc that tracks MIR -> HIR is the `SourceScopeLocalData::lint_root`
- // field that tracks lint levels for MIR locations. Normally the number of source scopes
- // is limited to the set of nodes with lint annotations. The -Zmaximal-hir-to-mir-coverage
- // flag changes this behavior to maximize the number of source scopes, increasing the
- // granularity of the MIR->HIR mapping.
+ // Some consumers of rustc need to map MIR locations back to HIR nodes. Currently
+ // the the only part of rustc that tracks MIR -> HIR is the
+ // `SourceScopeLocalData::lint_root` field that tracks lint levels for MIR
+ // locations. Normally the number of source scopes is limited to the set of nodes
+ // with lint annotations. The -Zmaximal-hir-to-mir-coverage flag changes this
+ // behavior to maximize the number of source scopes, increasing the granularity of
+ // the MIR->HIR mapping.
(current_id, parent_id)
} else {
- // Use `maybe_lint_level_root_bounded` with `self.hir_id` as a bound
- // to avoid adding Hir dependencies on our parents.
- // We estimate the true lint roots here to avoid creating a lot of source scopes.
+ // Use `maybe_lint_level_root_bounded` to avoid adding Hir dependencies on our
+ // parents. We estimate the true lint roots here to avoid creating a lot of source
+ // scopes.
(
- self.tcx.maybe_lint_level_root_bounded(current_id, self.hir_id),
- self.tcx.maybe_lint_level_root_bounded(parent_id, self.hir_id),
+ self.maybe_lint_level_root_bounded(current_id),
+ if parent_id == self.hir_id {
+ parent_id // this is very common
+ } else {
+ self.maybe_lint_level_root_bounded(parent_id)
+ },
)
};
@@ -783,6 +788,50 @@ impl<'a, 'tcx> Builder<'a, 'tcx> {
}
}
+ /// Walks upwards from `orig_id` to find a node which might change lint levels with attributes.
+ /// It stops at `self.hir_id` and just returns it if reached.
+ fn maybe_lint_level_root_bounded(&mut self, orig_id: HirId) -> HirId {
+ // This assertion lets us just store `ItemLocalId` in the cache, rather
+ // than the full `HirId`.
+ assert_eq!(orig_id.owner, self.hir_id.owner);
+
+ let mut id = orig_id;
+ let hir = self.tcx.hir();
+ loop {
+ if id == self.hir_id {
+ // This is a moderately common case, mostly hit for previously unseen nodes.
+ break;
+ }
+
+ if hir.attrs(id).iter().any(|attr| Level::from_attr(attr).is_some()) {
+ // This is a rare case. It's for a node path that doesn't reach the root due to an
+ // intervening lint level attribute. This result doesn't get cached.
+ return id;
+ }
+
+ let next = hir.parent_id(id);
+ if next == id {
+ bug!("lint traversal reached the root of the crate");
+ }
+ id = next;
+
+ // This lookup is just an optimization; it can be removed without affecting
+ // functionality. It might seem strange to see this at the end of this loop, but the
+ // `orig_id` passed in to this function is almost always previously unseen, for which a
+ // lookup will be a miss. So we only do lookups for nodes up the parent chain, where
+ // cache lookups have a very high hit rate.
+ if self.lint_level_roots_cache.contains(id.local_id) {
+ break;
+ }
+ }
+
+ // `orig_id` traced to `self_id`; record this fact. If `orig_id` is a leaf node it will
+ // rarely (never?) subsequently be searched for, but it's hard to know if that is the case.
+ // The performance wins from the cache all come from caching non-leaf nodes.
+ self.lint_level_roots_cache.insert(orig_id.local_id);
+ self.hir_id
+ }
+
/// Creates a new source scope, nested in the current one.
pub(crate) fn new_source_scope(
&mut self,
diff --git a/compiler/rustc_mir_build/src/check_unsafety.rs b/compiler/rustc_mir_build/src/check_unsafety.rs
index 6b2b140fa..192bd4a83 100644
--- a/compiler/rustc_mir_build/src/check_unsafety.rs
+++ b/compiler/rustc_mir_build/src/check_unsafety.rs
@@ -91,7 +91,12 @@ impl<'tcx> UnsafetyVisitor<'_, 'tcx> {
kind.emit_unsafe_op_in_unsafe_fn_lint(self.tcx, self.hir_context, span);
}
SafetyContext::Safe => {
- kind.emit_requires_unsafe_err(self.tcx, span, unsafe_op_in_unsafe_fn_allowed);
+ kind.emit_requires_unsafe_err(
+ self.tcx,
+ span,
+ self.hir_context,
+ unsafe_op_in_unsafe_fn_allowed,
+ );
}
}
}
@@ -383,7 +388,7 @@ impl<'a, 'tcx> Visitor<'a, 'tcx> for UnsafetyVisitor<'a, 'tcx> {
ExprKind::Adt(box AdtExpr {
adt_def,
variant_index: _,
- substs: _,
+ args: _,
user_ty: _,
fields: _,
base: _,
@@ -393,14 +398,14 @@ impl<'a, 'tcx> Visitor<'a, 'tcx> for UnsafetyVisitor<'a, 'tcx> {
},
ExprKind::Closure(box ClosureExpr {
closure_id,
- substs: _,
+ args: _,
upvars: _,
movability: _,
fake_reads: _,
}) => {
self.visit_inner_body(closure_id);
}
- ExprKind::ConstBlock { did, substs: _ } => {
+ ExprKind::ConstBlock { did, args: _ } => {
let def_id = did.expect_local();
self.visit_inner_body(def_id);
}
@@ -602,98 +607,164 @@ impl UnsafeOpKind {
&self,
tcx: TyCtxt<'_>,
span: Span,
+ hir_context: hir::HirId,
unsafe_op_in_unsafe_fn_allowed: bool,
) {
+ let note_non_inherited = tcx.hir().parent_iter(hir_context).find(|(id, node)| {
+ if let hir::Node::Expr(block) = node
+ && let hir::ExprKind::Block(block, _) = block.kind
+ && let hir::BlockCheckMode::UnsafeBlock(_) = block.rules
+ {
+ true
+ }
+ else if let Some(sig) = tcx.hir().fn_sig_by_hir_id(*id)
+ && sig.header.is_unsafe()
+ {
+ true
+ } else {
+ false
+ }
+ });
+ let unsafe_not_inherited_note = if let Some((id, _)) = note_non_inherited {
+ let span = tcx.hir().span(id);
+ let span = tcx.sess.source_map().guess_head_span(span);
+ Some(UnsafeNotInheritedNote { span })
+ } else {
+ None
+ };
+
match self {
CallToUnsafeFunction(Some(did)) if unsafe_op_in_unsafe_fn_allowed => {
tcx.sess.emit_err(CallToUnsafeFunctionRequiresUnsafeUnsafeOpInUnsafeFnAllowed {
span,
+ unsafe_not_inherited_note,
function: &tcx.def_path_str(*did),
});
}
CallToUnsafeFunction(Some(did)) => {
tcx.sess.emit_err(CallToUnsafeFunctionRequiresUnsafe {
span,
+ unsafe_not_inherited_note,
function: &tcx.def_path_str(*did),
});
}
CallToUnsafeFunction(None) if unsafe_op_in_unsafe_fn_allowed => {
tcx.sess.emit_err(
- CallToUnsafeFunctionRequiresUnsafeNamelessUnsafeOpInUnsafeFnAllowed { span },
+ CallToUnsafeFunctionRequiresUnsafeNamelessUnsafeOpInUnsafeFnAllowed {
+ span,
+ unsafe_not_inherited_note,
+ },
);
}
CallToUnsafeFunction(None) => {
- tcx.sess.emit_err(CallToUnsafeFunctionRequiresUnsafeNameless { span });
+ tcx.sess.emit_err(CallToUnsafeFunctionRequiresUnsafeNameless {
+ span,
+ unsafe_not_inherited_note,
+ });
}
UseOfInlineAssembly if unsafe_op_in_unsafe_fn_allowed => {
- tcx.sess
- .emit_err(UseOfInlineAssemblyRequiresUnsafeUnsafeOpInUnsafeFnAllowed { span });
+ tcx.sess.emit_err(UseOfInlineAssemblyRequiresUnsafeUnsafeOpInUnsafeFnAllowed {
+ span,
+ unsafe_not_inherited_note,
+ });
}
UseOfInlineAssembly => {
- tcx.sess.emit_err(UseOfInlineAssemblyRequiresUnsafe { span });
+ tcx.sess.emit_err(UseOfInlineAssemblyRequiresUnsafe {
+ span,
+ unsafe_not_inherited_note,
+ });
}
InitializingTypeWith if unsafe_op_in_unsafe_fn_allowed => {
- tcx.sess
- .emit_err(InitializingTypeWithRequiresUnsafeUnsafeOpInUnsafeFnAllowed { span });
+ tcx.sess.emit_err(InitializingTypeWithRequiresUnsafeUnsafeOpInUnsafeFnAllowed {
+ span,
+ unsafe_not_inherited_note,
+ });
}
InitializingTypeWith => {
- tcx.sess.emit_err(InitializingTypeWithRequiresUnsafe { span });
+ tcx.sess.emit_err(InitializingTypeWithRequiresUnsafe {
+ span,
+ unsafe_not_inherited_note,
+ });
}
UseOfMutableStatic if unsafe_op_in_unsafe_fn_allowed => {
- tcx.sess
- .emit_err(UseOfMutableStaticRequiresUnsafeUnsafeOpInUnsafeFnAllowed { span });
+ tcx.sess.emit_err(UseOfMutableStaticRequiresUnsafeUnsafeOpInUnsafeFnAllowed {
+ span,
+ unsafe_not_inherited_note,
+ });
}
UseOfMutableStatic => {
- tcx.sess.emit_err(UseOfMutableStaticRequiresUnsafe { span });
+ tcx.sess
+ .emit_err(UseOfMutableStaticRequiresUnsafe { span, unsafe_not_inherited_note });
}
UseOfExternStatic if unsafe_op_in_unsafe_fn_allowed => {
- tcx.sess
- .emit_err(UseOfExternStaticRequiresUnsafeUnsafeOpInUnsafeFnAllowed { span });
+ tcx.sess.emit_err(UseOfExternStaticRequiresUnsafeUnsafeOpInUnsafeFnAllowed {
+ span,
+ unsafe_not_inherited_note,
+ });
}
UseOfExternStatic => {
- tcx.sess.emit_err(UseOfExternStaticRequiresUnsafe { span });
+ tcx.sess
+ .emit_err(UseOfExternStaticRequiresUnsafe { span, unsafe_not_inherited_note });
}
DerefOfRawPointer if unsafe_op_in_unsafe_fn_allowed => {
- tcx.sess
- .emit_err(DerefOfRawPointerRequiresUnsafeUnsafeOpInUnsafeFnAllowed { span });
+ tcx.sess.emit_err(DerefOfRawPointerRequiresUnsafeUnsafeOpInUnsafeFnAllowed {
+ span,
+ unsafe_not_inherited_note,
+ });
}
DerefOfRawPointer => {
- tcx.sess.emit_err(DerefOfRawPointerRequiresUnsafe { span });
+ tcx.sess
+ .emit_err(DerefOfRawPointerRequiresUnsafe { span, unsafe_not_inherited_note });
}
AccessToUnionField if unsafe_op_in_unsafe_fn_allowed => {
- tcx.sess
- .emit_err(AccessToUnionFieldRequiresUnsafeUnsafeOpInUnsafeFnAllowed { span });
+ tcx.sess.emit_err(AccessToUnionFieldRequiresUnsafeUnsafeOpInUnsafeFnAllowed {
+ span,
+ unsafe_not_inherited_note,
+ });
}
AccessToUnionField => {
- tcx.sess.emit_err(AccessToUnionFieldRequiresUnsafe { span });
+ tcx.sess
+ .emit_err(AccessToUnionFieldRequiresUnsafe { span, unsafe_not_inherited_note });
}
MutationOfLayoutConstrainedField if unsafe_op_in_unsafe_fn_allowed => {
tcx.sess.emit_err(
MutationOfLayoutConstrainedFieldRequiresUnsafeUnsafeOpInUnsafeFnAllowed {
span,
+ unsafe_not_inherited_note,
},
);
}
MutationOfLayoutConstrainedField => {
- tcx.sess.emit_err(MutationOfLayoutConstrainedFieldRequiresUnsafe { span });
+ tcx.sess.emit_err(MutationOfLayoutConstrainedFieldRequiresUnsafe {
+ span,
+ unsafe_not_inherited_note,
+ });
}
BorrowOfLayoutConstrainedField if unsafe_op_in_unsafe_fn_allowed => {
tcx.sess.emit_err(
- BorrowOfLayoutConstrainedFieldRequiresUnsafeUnsafeOpInUnsafeFnAllowed { span },
+ BorrowOfLayoutConstrainedFieldRequiresUnsafeUnsafeOpInUnsafeFnAllowed {
+ span,
+ unsafe_not_inherited_note,
+ },
);
}
BorrowOfLayoutConstrainedField => {
- tcx.sess.emit_err(BorrowOfLayoutConstrainedFieldRequiresUnsafe { span });
+ tcx.sess.emit_err(BorrowOfLayoutConstrainedFieldRequiresUnsafe {
+ span,
+ unsafe_not_inherited_note,
+ });
}
CallToFunctionWith(did) if unsafe_op_in_unsafe_fn_allowed => {
tcx.sess.emit_err(CallToFunctionWithRequiresUnsafeUnsafeOpInUnsafeFnAllowed {
span,
+ unsafe_not_inherited_note,
function: &tcx.def_path_str(*did),
});
}
CallToFunctionWith(did) => {
tcx.sess.emit_err(CallToFunctionWithRequiresUnsafe {
span,
+ unsafe_not_inherited_note,
function: &tcx.def_path_str(*did),
});
}
@@ -712,9 +783,7 @@ pub fn thir_check_unsafety(tcx: TyCtxt<'_>, def: LocalDefId) {
return;
}
- let Ok((thir, expr)) = tcx.thir_body(def) else {
- return
- };
+ let Ok((thir, expr)) = tcx.thir_body(def) else { return };
let thir = &thir.borrow();
// If `thir` is empty, a type error occurred, skip this body.
if thir.exprs.is_empty() {
diff --git a/compiler/rustc_mir_build/src/errors.rs b/compiler/rustc_mir_build/src/errors.rs
index df00cc75c..3ff3387a7 100644
--- a/compiler/rustc_mir_build/src/errors.rs
+++ b/compiler/rustc_mir_build/src/errors.rs
@@ -119,6 +119,8 @@ pub struct CallToUnsafeFunctionRequiresUnsafe<'a> {
#[label]
pub span: Span,
pub function: &'a str,
+ #[subdiagnostic]
+ pub unsafe_not_inherited_note: Option<UnsafeNotInheritedNote>,
}
#[derive(Diagnostic)]
@@ -128,6 +130,8 @@ pub struct CallToUnsafeFunctionRequiresUnsafeNameless {
#[primary_span]
#[label]
pub span: Span,
+ #[subdiagnostic]
+ pub unsafe_not_inherited_note: Option<UnsafeNotInheritedNote>,
}
#[derive(Diagnostic)]
@@ -138,6 +142,8 @@ pub struct CallToUnsafeFunctionRequiresUnsafeUnsafeOpInUnsafeFnAllowed<'a> {
#[label]
pub span: Span,
pub function: &'a str,
+ #[subdiagnostic]
+ pub unsafe_not_inherited_note: Option<UnsafeNotInheritedNote>,
}
#[derive(Diagnostic)]
@@ -150,6 +156,8 @@ pub struct CallToUnsafeFunctionRequiresUnsafeNamelessUnsafeOpInUnsafeFnAllowed {
#[primary_span]
#[label]
pub span: Span,
+ #[subdiagnostic]
+ pub unsafe_not_inherited_note: Option<UnsafeNotInheritedNote>,
}
#[derive(Diagnostic)]
@@ -159,6 +167,8 @@ pub struct UseOfInlineAssemblyRequiresUnsafe {
#[primary_span]
#[label]
pub span: Span,
+ #[subdiagnostic]
+ pub unsafe_not_inherited_note: Option<UnsafeNotInheritedNote>,
}
#[derive(Diagnostic)]
@@ -168,6 +178,8 @@ pub struct UseOfInlineAssemblyRequiresUnsafeUnsafeOpInUnsafeFnAllowed {
#[primary_span]
#[label]
pub span: Span,
+ #[subdiagnostic]
+ pub unsafe_not_inherited_note: Option<UnsafeNotInheritedNote>,
}
#[derive(Diagnostic)]
@@ -177,6 +189,8 @@ pub struct InitializingTypeWithRequiresUnsafe {
#[primary_span]
#[label]
pub span: Span,
+ #[subdiagnostic]
+ pub unsafe_not_inherited_note: Option<UnsafeNotInheritedNote>,
}
#[derive(Diagnostic)]
@@ -189,6 +203,8 @@ pub struct InitializingTypeWithRequiresUnsafeUnsafeOpInUnsafeFnAllowed {
#[primary_span]
#[label]
pub span: Span,
+ #[subdiagnostic]
+ pub unsafe_not_inherited_note: Option<UnsafeNotInheritedNote>,
}
#[derive(Diagnostic)]
@@ -198,6 +214,8 @@ pub struct UseOfMutableStaticRequiresUnsafe {
#[primary_span]
#[label]
pub span: Span,
+ #[subdiagnostic]
+ pub unsafe_not_inherited_note: Option<UnsafeNotInheritedNote>,
}
#[derive(Diagnostic)]
@@ -207,6 +225,8 @@ pub struct UseOfMutableStaticRequiresUnsafeUnsafeOpInUnsafeFnAllowed {
#[primary_span]
#[label]
pub span: Span,
+ #[subdiagnostic]
+ pub unsafe_not_inherited_note: Option<UnsafeNotInheritedNote>,
}
#[derive(Diagnostic)]
@@ -216,6 +236,8 @@ pub struct UseOfExternStaticRequiresUnsafe {
#[primary_span]
#[label]
pub span: Span,
+ #[subdiagnostic]
+ pub unsafe_not_inherited_note: Option<UnsafeNotInheritedNote>,
}
#[derive(Diagnostic)]
@@ -225,6 +247,8 @@ pub struct UseOfExternStaticRequiresUnsafeUnsafeOpInUnsafeFnAllowed {
#[primary_span]
#[label]
pub span: Span,
+ #[subdiagnostic]
+ pub unsafe_not_inherited_note: Option<UnsafeNotInheritedNote>,
}
#[derive(Diagnostic)]
@@ -234,6 +258,8 @@ pub struct DerefOfRawPointerRequiresUnsafe {
#[primary_span]
#[label]
pub span: Span,
+ #[subdiagnostic]
+ pub unsafe_not_inherited_note: Option<UnsafeNotInheritedNote>,
}
#[derive(Diagnostic)]
@@ -243,6 +269,8 @@ pub struct DerefOfRawPointerRequiresUnsafeUnsafeOpInUnsafeFnAllowed {
#[primary_span]
#[label]
pub span: Span,
+ #[subdiagnostic]
+ pub unsafe_not_inherited_note: Option<UnsafeNotInheritedNote>,
}
#[derive(Diagnostic)]
@@ -252,6 +280,8 @@ pub struct AccessToUnionFieldRequiresUnsafe {
#[primary_span]
#[label]
pub span: Span,
+ #[subdiagnostic]
+ pub unsafe_not_inherited_note: Option<UnsafeNotInheritedNote>,
}
#[derive(Diagnostic)]
@@ -261,6 +291,8 @@ pub struct AccessToUnionFieldRequiresUnsafeUnsafeOpInUnsafeFnAllowed {
#[primary_span]
#[label]
pub span: Span,
+ #[subdiagnostic]
+ pub unsafe_not_inherited_note: Option<UnsafeNotInheritedNote>,
}
#[derive(Diagnostic)]
@@ -270,6 +302,8 @@ pub struct MutationOfLayoutConstrainedFieldRequiresUnsafe {
#[primary_span]
#[label]
pub span: Span,
+ #[subdiagnostic]
+ pub unsafe_not_inherited_note: Option<UnsafeNotInheritedNote>,
}
#[derive(Diagnostic)]
@@ -282,6 +316,8 @@ pub struct MutationOfLayoutConstrainedFieldRequiresUnsafeUnsafeOpInUnsafeFnAllow
#[primary_span]
#[label]
pub span: Span,
+ #[subdiagnostic]
+ pub unsafe_not_inherited_note: Option<UnsafeNotInheritedNote>,
}
#[derive(Diagnostic)]
@@ -291,6 +327,8 @@ pub struct BorrowOfLayoutConstrainedFieldRequiresUnsafe {
#[primary_span]
#[label]
pub span: Span,
+ #[subdiagnostic]
+ pub unsafe_not_inherited_note: Option<UnsafeNotInheritedNote>,
}
#[derive(Diagnostic)]
@@ -303,6 +341,8 @@ pub struct BorrowOfLayoutConstrainedFieldRequiresUnsafeUnsafeOpInUnsafeFnAllowed
#[primary_span]
#[label]
pub span: Span,
+ #[subdiagnostic]
+ pub unsafe_not_inherited_note: Option<UnsafeNotInheritedNote>,
}
#[derive(Diagnostic)]
@@ -313,6 +353,8 @@ pub struct CallToFunctionWithRequiresUnsafe<'a> {
#[label]
pub span: Span,
pub function: &'a str,
+ #[subdiagnostic]
+ pub unsafe_not_inherited_note: Option<UnsafeNotInheritedNote>,
}
#[derive(Diagnostic)]
@@ -323,6 +365,15 @@ pub struct CallToFunctionWithRequiresUnsafeUnsafeOpInUnsafeFnAllowed<'a> {
#[label]
pub span: Span,
pub function: &'a str,
+ #[subdiagnostic]
+ pub unsafe_not_inherited_note: Option<UnsafeNotInheritedNote>,
+}
+
+#[derive(Subdiagnostic)]
+#[label(mir_build_unsafe_not_inherited)]
+pub struct UnsafeNotInheritedNote {
+ #[primary_span]
+ pub span: Span,
}
#[derive(LintDiagnostic)]
@@ -403,17 +454,13 @@ impl<'a> IntoDiagnostic<'a> for NonExhaustivePatternsTypeNotEmpty<'_, '_, '_> {
if self.span.eq_ctxt(self.expr_span) {
// Get the span for the empty match body `{}`.
let (indentation, more) = if let Some(snippet) = sm.indentation_before(self.span) {
- (format!("\n{}", snippet), " ")
+ (format!("\n{snippet}"), " ")
} else {
(" ".to_string(), "")
};
suggestion = Some((
self.span.shrink_to_hi().with_hi(self.expr_span.hi()),
- format!(
- " {{{indentation}{more}_ => todo!(),{indentation}}}",
- indentation = indentation,
- more = more,
- ),
+ format!(" {{{indentation}{more}_ => todo!(),{indentation}}}",),
));
}
diff --git a/compiler/rustc_mir_build/src/lib.rs b/compiler/rustc_mir_build/src/lib.rs
index 4fdc3178c..099fefbf0 100644
--- a/compiler/rustc_mir_build/src/lib.rs
+++ b/compiler/rustc_mir_build/src/lib.rs
@@ -19,7 +19,7 @@ extern crate rustc_middle;
mod build;
mod check_unsafety;
mod errors;
-mod lints;
+pub mod lints;
pub mod thir;
use rustc_middle::query::Providers;
diff --git a/compiler/rustc_mir_build/src/lints.rs b/compiler/rustc_mir_build/src/lints.rs
index 8e41957af..7fb73b5c7 100644
--- a/compiler/rustc_mir_build/src/lints.rs
+++ b/compiler/rustc_mir_build/src/lints.rs
@@ -3,27 +3,43 @@ use rustc_data_structures::graph::iterate::{
NodeStatus, TriColorDepthFirstSearch, TriColorVisitor,
};
use rustc_hir::def::DefKind;
-use rustc_middle::mir::{self, BasicBlock, BasicBlocks, Body, Operand, TerminatorKind};
-use rustc_middle::ty::subst::{GenericArg, InternalSubsts};
-use rustc_middle::ty::{self, Instance, TyCtxt};
+use rustc_middle::mir::{self, BasicBlock, BasicBlocks, Body, Terminator, TerminatorKind};
+use rustc_middle::ty::{self, Instance, Ty, TyCtxt};
+use rustc_middle::ty::{GenericArg, GenericArgs};
use rustc_session::lint::builtin::UNCONDITIONAL_RECURSION;
use rustc_span::Span;
use std::ops::ControlFlow;
pub(crate) fn check<'tcx>(tcx: TyCtxt<'tcx>, body: &Body<'tcx>) {
+ check_call_recursion(tcx, body);
+}
+
+fn check_call_recursion<'tcx>(tcx: TyCtxt<'tcx>, body: &Body<'tcx>) {
let def_id = body.source.def_id().expect_local();
if let DefKind::Fn | DefKind::AssocFn = tcx.def_kind(def_id) {
- // If this is trait/impl method, extract the trait's substs.
- let trait_substs = match tcx.trait_of_item(def_id.to_def_id()) {
+ // If this is trait/impl method, extract the trait's args.
+ let trait_args = match tcx.trait_of_item(def_id.to_def_id()) {
Some(trait_def_id) => {
- let trait_substs_count = tcx.generics_of(trait_def_id).count();
- &InternalSubsts::identity_for_item(tcx, def_id)[..trait_substs_count]
+ let trait_args_count = tcx.generics_of(trait_def_id).count();
+ &GenericArgs::identity_for_item(tcx, def_id)[..trait_args_count]
}
_ => &[],
};
- let mut vis = Search { tcx, body, reachable_recursive_calls: vec![], trait_substs };
+ check_recursion(tcx, body, CallRecursion { trait_args })
+ }
+}
+
+fn check_recursion<'tcx>(
+ tcx: TyCtxt<'tcx>,
+ body: &Body<'tcx>,
+ classifier: impl TerminatorClassifier<'tcx>,
+) {
+ let def_id = body.source.def_id().expect_local();
+
+ if let DefKind::Fn | DefKind::AssocFn = tcx.def_kind(def_id) {
+ let mut vis = Search { tcx, body, classifier, reachable_recursive_calls: vec![] };
if let Some(NonRecursive) =
TriColorDepthFirstSearch::new(&body.basic_blocks).run_from_start(&mut vis)
{
@@ -46,20 +62,66 @@ pub(crate) fn check<'tcx>(tcx: TyCtxt<'tcx>, body: &Body<'tcx>) {
}
}
+/// Requires drop elaboration to have been performed first.
+pub fn check_drop_recursion<'tcx>(tcx: TyCtxt<'tcx>, body: &Body<'tcx>) {
+ let def_id = body.source.def_id().expect_local();
+
+ // First check if `body` is an `fn drop()` of `Drop`
+ if let DefKind::AssocFn = tcx.def_kind(def_id) &&
+ let Some(trait_ref) = tcx.impl_of_method(def_id.to_def_id()).and_then(|def_id| tcx.impl_trait_ref(def_id)) &&
+ let Some(drop_trait) = tcx.lang_items().drop_trait() && drop_trait == trait_ref.instantiate_identity().def_id {
+
+ // It was. Now figure out for what type `Drop` is implemented and then
+ // check for recursion.
+ if let ty::Ref(_, dropped_ty, _) = tcx.liberate_late_bound_regions(
+ def_id.to_def_id(),
+ tcx.fn_sig(def_id).instantiate_identity().input(0),
+ ).kind() {
+ check_recursion(tcx, body, RecursiveDrop { drop_for: *dropped_ty });
+ }
+ }
+}
+
+trait TerminatorClassifier<'tcx> {
+ fn is_recursive_terminator(
+ &self,
+ tcx: TyCtxt<'tcx>,
+ body: &Body<'tcx>,
+ terminator: &Terminator<'tcx>,
+ ) -> bool;
+}
+
struct NonRecursive;
-struct Search<'mir, 'tcx> {
+struct Search<'mir, 'tcx, C: TerminatorClassifier<'tcx>> {
tcx: TyCtxt<'tcx>,
body: &'mir Body<'tcx>,
- trait_substs: &'tcx [GenericArg<'tcx>],
+ classifier: C,
reachable_recursive_calls: Vec<Span>,
}
-impl<'mir, 'tcx> Search<'mir, 'tcx> {
+struct CallRecursion<'tcx> {
+ trait_args: &'tcx [GenericArg<'tcx>],
+}
+
+struct RecursiveDrop<'tcx> {
+ /// The type that `Drop` is implemented for.
+ drop_for: Ty<'tcx>,
+}
+
+impl<'tcx> TerminatorClassifier<'tcx> for CallRecursion<'tcx> {
/// Returns `true` if `func` refers to the function we are searching in.
- fn is_recursive_call(&self, func: &Operand<'tcx>, args: &[Operand<'tcx>]) -> bool {
- let Search { tcx, body, trait_substs, .. } = *self;
+ fn is_recursive_terminator(
+ &self,
+ tcx: TyCtxt<'tcx>,
+ body: &Body<'tcx>,
+ terminator: &Terminator<'tcx>,
+ ) -> bool {
+ let TerminatorKind::Call { func, args, .. } = &terminator.kind else {
+ return false;
+ };
+
// Resolving function type to a specific instance that is being called is expensive. To
// avoid the cost we check the number of arguments first, which is sufficient to reject
// most of calls as non-recursive.
@@ -70,30 +132,46 @@ impl<'mir, 'tcx> Search<'mir, 'tcx> {
let param_env = tcx.param_env(caller);
let func_ty = func.ty(body, tcx);
- if let ty::FnDef(callee, substs) = *func_ty.kind() {
- let normalized_substs = tcx.normalize_erasing_regions(param_env, substs);
- let (callee, call_substs) = if let Ok(Some(instance)) =
- Instance::resolve(tcx, param_env, callee, normalized_substs)
+ if let ty::FnDef(callee, args) = *func_ty.kind() {
+ let normalized_args = tcx.normalize_erasing_regions(param_env, args);
+ let (callee, call_args) = if let Ok(Some(instance)) =
+ Instance::resolve(tcx, param_env, callee, normalized_args)
{
- (instance.def_id(), instance.substs)
+ (instance.def_id(), instance.args)
} else {
- (callee, normalized_substs)
+ (callee, normalized_args)
};
// FIXME(#57965): Make this work across function boundaries
- // If this is a trait fn, the substs on the trait have to match, or we might be
+ // If this is a trait fn, the args on the trait have to match, or we might be
// calling into an entirely different method (for example, a call from the default
// method in the trait to `<A as Trait<B>>::method`, where `A` and/or `B` are
// specific types).
- return callee == caller && &call_substs[..trait_substs.len()] == trait_substs;
+ return callee == caller && &call_args[..self.trait_args.len()] == self.trait_args;
}
false
}
}
-impl<'mir, 'tcx> TriColorVisitor<BasicBlocks<'tcx>> for Search<'mir, 'tcx> {
+impl<'tcx> TerminatorClassifier<'tcx> for RecursiveDrop<'tcx> {
+ fn is_recursive_terminator(
+ &self,
+ tcx: TyCtxt<'tcx>,
+ body: &Body<'tcx>,
+ terminator: &Terminator<'tcx>,
+ ) -> bool {
+ let TerminatorKind::Drop { place, .. } = &terminator.kind else { return false };
+
+ let dropped_ty = place.ty(body, tcx).ty;
+ dropped_ty == self.drop_for
+ }
+}
+
+impl<'mir, 'tcx, C: TerminatorClassifier<'tcx>> TriColorVisitor<BasicBlocks<'tcx>>
+ for Search<'mir, 'tcx, C>
+{
type BreakVal = NonRecursive;
fn node_examined(
@@ -138,10 +216,8 @@ impl<'mir, 'tcx> TriColorVisitor<BasicBlocks<'tcx>> for Search<'mir, 'tcx> {
fn node_settled(&mut self, bb: BasicBlock) -> ControlFlow<Self::BreakVal> {
// When we examine a node for the last time, remember it if it is a recursive call.
let terminator = self.body[bb].terminator();
- if let TerminatorKind::Call { func, args, .. } = &terminator.kind {
- if self.is_recursive_call(func, args) {
- self.reachable_recursive_calls.push(terminator.source_info.span);
- }
+ if self.classifier.is_recursive_terminator(self.tcx, self.body, terminator) {
+ self.reachable_recursive_calls.push(terminator.source_info.span);
}
ControlFlow::Continue(())
@@ -149,15 +225,14 @@ impl<'mir, 'tcx> TriColorVisitor<BasicBlocks<'tcx>> for Search<'mir, 'tcx> {
fn ignore_edge(&mut self, bb: BasicBlock, target: BasicBlock) -> bool {
let terminator = self.body[bb].terminator();
- if terminator.unwind() == Some(&mir::UnwindAction::Cleanup(target))
- && terminator.successors().count() > 1
+ let ignore_unwind = terminator.unwind() == Some(&mir::UnwindAction::Cleanup(target))
+ && terminator.successors().count() > 1;
+ if ignore_unwind || self.classifier.is_recursive_terminator(self.tcx, self.body, terminator)
{
return true;
}
- // Don't traverse successors of recursive calls or false CFG edges.
- match self.body[bb].terminator().kind {
- TerminatorKind::Call { ref func, ref args, .. } => self.is_recursive_call(func, args),
- TerminatorKind::FalseEdge { imaginary_target, .. } => imaginary_target == target,
+ match &terminator.kind {
+ TerminatorKind::FalseEdge { imaginary_target, .. } => imaginary_target == &target,
_ => false,
}
}
diff --git a/compiler/rustc_mir_build/src/thir/cx/expr.rs b/compiler/rustc_mir_build/src/thir/cx/expr.rs
index 37537683f..6c1f7d7a6 100644
--- a/compiler/rustc_mir_build/src/thir/cx/expr.rs
+++ b/compiler/rustc_mir_build/src/thir/cx/expr.rs
@@ -15,9 +15,9 @@ use rustc_middle::thir::*;
use rustc_middle::ty::adjustment::{
Adjust, Adjustment, AutoBorrow, AutoBorrowMutability, PointerCoercion,
};
-use rustc_middle::ty::subst::InternalSubsts;
+use rustc_middle::ty::GenericArgs;
use rustc_middle::ty::{
- self, AdtKind, InlineConstSubsts, InlineConstSubstsParts, ScalarInt, Ty, UpvarSubsts, UserType,
+ self, AdtKind, InlineConstArgs, InlineConstArgsParts, ScalarInt, Ty, UpvarArgs, UserType,
};
use rustc_span::{sym, Span};
use rustc_target::abi::{FieldIdx, FIRST_VARIANT};
@@ -41,11 +41,6 @@ impl<'tcx> Cx<'tcx> {
let mut expr = self.make_mirror_unadjusted(hir_expr);
- let adjustment_span = match self.adjustment_span {
- Some((hir_id, span)) if hir_id == hir_expr.hir_id => Some(span),
- _ => None,
- };
-
trace!(?expr.ty);
// Now apply adjustments, if any.
@@ -53,12 +48,7 @@ impl<'tcx> Cx<'tcx> {
for adjustment in self.typeck_results.expr_adjustments(hir_expr) {
trace!(?expr, ?adjustment);
let span = expr.span;
- expr = self.apply_adjustment(
- hir_expr,
- expr,
- adjustment,
- adjustment_span.unwrap_or(span),
- );
+ expr = self.apply_adjustment(hir_expr, expr, adjustment, span);
}
}
@@ -220,7 +210,7 @@ impl<'tcx> Cx<'tcx> {
let res = self.typeck_results().qpath_res(qpath, source.hir_id);
let ty = self.typeck_results().node_type(source.hir_id);
- let ty::Adt(adt_def, substs) = ty.kind() else {
+ let ty::Adt(adt_def, args) = ty.kind() else {
return ExprKind::Cast { source: self.mirror_expr(source) };
};
@@ -239,9 +229,7 @@ impl<'tcx> Cx<'tcx> {
let param_env_ty = self.param_env.and(discr_ty);
let size = tcx
.layout_of(param_env_ty)
- .unwrap_or_else(|e| {
- panic!("could not compute layout for {:?}: {:?}", param_env_ty, e)
- })
+ .unwrap_or_else(|e| panic!("could not compute layout for {param_env_ty:?}: {e:?}"))
.size;
let lit = ScalarInt::try_from_uint(discr_offset as u128, size).unwrap();
@@ -252,7 +240,7 @@ impl<'tcx> Cx<'tcx> {
// in case we are offsetting from a computed discriminant
// and not the beginning of discriminants (which is always `0`)
Some(did) => {
- let kind = ExprKind::NamedConst { def_id: did, substs, user_ty: None };
+ let kind = ExprKind::NamedConst { def_id: did, args, user_ty: None };
let lhs =
self.thir.exprs.push(Expr { temp_lifetime, ty: discr_ty, span, kind });
let bin = ExprKind::Binary { op: BinOp::Add, lhs, rhs: offset };
@@ -274,7 +262,6 @@ impl<'tcx> Cx<'tcx> {
fn make_mirror_unadjusted(&mut self, expr: &'tcx hir::Expr<'tcx>) -> Expr<'tcx> {
let tcx = self.tcx;
let expr_ty = self.typeck_results().expr_ty(expr);
- let expr_span = expr.span;
let temp_lifetime =
self.rvalue_scopes.temporary_scope(self.region_scope_tree, expr.hir_id.local_id);
@@ -283,17 +270,11 @@ impl<'tcx> Cx<'tcx> {
hir::ExprKind::MethodCall(segment, receiver, ref args, fn_span) => {
// Rewrite a.b(c) into UFCS form like Trait::b(a, c)
let expr = self.method_callee(expr, segment.ident.span, None);
- // When we apply adjustments to the receiver, use the span of
- // the overall method call for better diagnostics. args[0]
- // is guaranteed to exist, since a method call always has a receiver.
- let old_adjustment_span =
- self.adjustment_span.replace((receiver.hir_id, expr_span));
info!("Using method span: {:?}", expr.span);
let args = std::iter::once(receiver)
.chain(args.iter())
.map(|expr| self.mirror_expr(expr))
.collect();
- self.adjustment_span = old_adjustment_span;
ExprKind::Call {
ty: expr.ty,
fun: self.thir.exprs.push(expr),
@@ -389,7 +370,7 @@ impl<'tcx> Cx<'tcx> {
None
};
if let Some((adt_def, index)) = adt_data {
- let substs = self.typeck_results().node_substs(fun.hir_id);
+ let node_args = self.typeck_results().node_args(fun.hir_id);
let user_provided_types = self.typeck_results().user_provided_types();
let user_ty =
user_provided_types.get(fun.hir_id).copied().map(|mut u_ty| {
@@ -410,7 +391,7 @@ impl<'tcx> Cx<'tcx> {
.collect();
ExprKind::Adt(Box::new(AdtExpr {
adt_def,
- substs,
+ args: node_args,
variant_index: index,
fields: field_refs,
user_ty,
@@ -464,7 +445,6 @@ impl<'tcx> Cx<'tcx> {
let rhs = self.mirror_expr(rhs);
self.overloaded_operator(expr, Box::new([lhs, rhs]))
} else {
- // FIXME overflow
match op.node {
hir::BinOpKind::And => ExprKind::LogicalOp {
op: LogicalOp::And,
@@ -488,11 +468,17 @@ impl<'tcx> Cx<'tcx> {
}
}
- hir::ExprKind::Index(ref lhs, ref index) => {
+ hir::ExprKind::Index(ref lhs, ref index, brackets_span) => {
if self.typeck_results().is_method_call(expr) {
let lhs = self.mirror_expr(lhs);
let index = self.mirror_expr(index);
- self.overloaded_place(expr, expr_ty, None, Box::new([lhs, index]), expr.span)
+ self.overloaded_place(
+ expr,
+ expr_ty,
+ None,
+ Box::new([lhs, index]),
+ brackets_span,
+ )
} else {
ExprKind::Index { lhs: self.mirror_expr(lhs), index: self.mirror_expr(index) }
}
@@ -528,7 +514,7 @@ impl<'tcx> Cx<'tcx> {
}
hir::ExprKind::Struct(ref qpath, ref fields, ref base) => match expr_ty.kind() {
- ty::Adt(adt, substs) => match adt.adt_kind() {
+ ty::Adt(adt, args) => match adt.adt_kind() {
AdtKind::Struct | AdtKind::Union => {
let user_provided_types = self.typeck_results().user_provided_types();
let user_ty = user_provided_types.get(expr.hir_id).copied().map(Box::new);
@@ -536,7 +522,7 @@ impl<'tcx> Cx<'tcx> {
ExprKind::Adt(Box::new(AdtExpr {
adt_def: *adt,
variant_index: FIRST_VARIANT,
- substs,
+ args,
user_ty,
fields: self.field_refs(fields),
base: base.map(|base| FruInfo {
@@ -563,7 +549,7 @@ impl<'tcx> Cx<'tcx> {
ExprKind::Adt(Box::new(AdtExpr {
adt_def: *adt,
variant_index: index,
- substs,
+ args,
user_ty,
fields: self.field_refs(fields),
base: None,
@@ -582,10 +568,10 @@ impl<'tcx> Cx<'tcx> {
hir::ExprKind::Closure { .. } => {
let closure_ty = self.typeck_results().expr_ty(expr);
- let (def_id, substs, movability) = match *closure_ty.kind() {
- ty::Closure(def_id, substs) => (def_id, UpvarSubsts::Closure(substs), None),
- ty::Generator(def_id, substs, movability) => {
- (def_id, UpvarSubsts::Generator(substs), Some(movability))
+ let (def_id, args, movability) = match *closure_ty.kind() {
+ ty::Closure(def_id, args) => (def_id, UpvarArgs::Closure(args), None),
+ ty::Generator(def_id, args, movability) => {
+ (def_id, UpvarArgs::Generator(args), Some(movability))
}
_ => {
span_bug!(expr.span, "closure expr w/o closure type: {:?}", closure_ty);
@@ -597,7 +583,7 @@ impl<'tcx> Cx<'tcx> {
.tcx
.closure_captures(def_id)
.iter()
- .zip(substs.upvar_tys())
+ .zip(args.upvar_tys())
.map(|(captured_place, ty)| {
let upvars = self.capture_upvar(expr, captured_place, ty);
self.thir.exprs.push(upvars)
@@ -618,7 +604,7 @@ impl<'tcx> Cx<'tcx> {
ExprKind::Closure(Box::new(ClosureExpr {
closure_id: def_id,
- substs,
+ args,
upvars,
movability,
fake_reads,
@@ -701,13 +687,11 @@ impl<'tcx> Cx<'tcx> {
let ty = self.typeck_results().node_type(anon_const.hir_id);
let did = anon_const.def_id.to_def_id();
let typeck_root_def_id = tcx.typeck_root_def_id(did);
- let parent_substs =
- tcx.erase_regions(InternalSubsts::identity_for_item(tcx, typeck_root_def_id));
- let substs =
- InlineConstSubsts::new(tcx, InlineConstSubstsParts { parent_substs, ty })
- .substs;
+ let parent_args =
+ tcx.erase_regions(GenericArgs::identity_for_item(tcx, typeck_root_def_id));
+ let args = InlineConstArgs::new(tcx, InlineConstArgsParts { parent_args, ty }).args;
- ExprKind::ConstBlock { did, substs }
+ ExprKind::ConstBlock { did, args }
}
// Now comes the rote stuff:
hir::ExprKind::Repeat(ref v, _) => {
@@ -748,6 +732,7 @@ impl<'tcx> Cx<'tcx> {
},
hir::ExprKind::Match(ref discr, ref arms, _) => ExprKind::Match {
scrutinee: self.mirror_expr(discr),
+ scrutinee_hir_id: discr.hir_id,
arms: arms.iter().map(|a| self.convert_arm(a)).collect(),
},
hir::ExprKind::Loop(ref body, ..) => {
@@ -826,12 +811,12 @@ impl<'tcx> Cx<'tcx> {
Expr { temp_lifetime, ty: expr_ty, span: expr.span, kind }
}
- fn user_substs_applied_to_res(
+ fn user_args_applied_to_res(
&mut self,
hir_id: hir::HirId,
res: Res,
) -> Option<Box<ty::CanonicalUserType<'tcx>>> {
- debug!("user_substs_applied_to_res: res={:?}", res);
+ debug!("user_args_applied_to_res: res={:?}", res);
let user_provided_type = match res {
// A reference to something callable -- e.g., a fn, method, or
// a tuple-struct or tuple-variant. This has the type of a
@@ -849,15 +834,15 @@ impl<'tcx> Cx<'tcx> {
// this variant -- but with the substitutions given by the
// user.
Res::Def(DefKind::Ctor(_, CtorKind::Const), _) => {
- self.user_substs_applied_to_ty_of_hir_id(hir_id).map(Box::new)
+ self.user_args_applied_to_ty_of_hir_id(hir_id).map(Box::new)
}
// `Self` is used in expression as a tuple struct constructor or a unit struct constructor
- Res::SelfCtor(_) => self.user_substs_applied_to_ty_of_hir_id(hir_id).map(Box::new),
+ Res::SelfCtor(_) => self.user_args_applied_to_ty_of_hir_id(hir_id).map(Box::new),
- _ => bug!("user_substs_applied_to_res: unexpected res {:?} at {:?}", res, hir_id),
+ _ => bug!("user_args_applied_to_res: unexpected res {:?} at {:?}", res, hir_id),
};
- debug!("user_substs_applied_to_res: user_provided_type={:?}", user_provided_type);
+ debug!("user_args_applied_to_res: user_provided_type={:?}", user_provided_type);
user_provided_type
}
@@ -876,13 +861,13 @@ impl<'tcx> Cx<'tcx> {
self.typeck_results().type_dependent_def(expr.hir_id).unwrap_or_else(|| {
span_bug!(expr.span, "no type-dependent def for method callee")
});
- let user_ty = self.user_substs_applied_to_res(expr.hir_id, Res::Def(kind, def_id));
+ let user_ty = self.user_args_applied_to_res(expr.hir_id, Res::Def(kind, def_id));
debug!("method_callee: user_ty={:?}", user_ty);
(
Ty::new_fn_def(
self.tcx(),
def_id,
- self.typeck_results().node_substs(expr.hir_id),
+ self.typeck_results().node_args(expr.hir_id),
),
user_ty,
)
@@ -909,14 +894,14 @@ impl<'tcx> Cx<'tcx> {
}
fn convert_path_expr(&mut self, expr: &'tcx hir::Expr<'tcx>, res: Res) -> ExprKind<'tcx> {
- let substs = self.typeck_results().node_substs(expr.hir_id);
+ let args = self.typeck_results().node_args(expr.hir_id);
match res {
// A regular function, constructor function or a constant.
Res::Def(DefKind::Fn, _)
| Res::Def(DefKind::AssocFn, _)
| Res::Def(DefKind::Ctor(_, CtorKind::Fn), _)
| Res::SelfCtor(_) => {
- let user_ty = self.user_substs_applied_to_res(expr.hir_id, res);
+ let user_ty = self.user_args_applied_to_res(expr.hir_id, res);
ExprKind::ZstLiteral { user_ty }
}
@@ -931,8 +916,8 @@ impl<'tcx> Cx<'tcx> {
}
Res::Def(DefKind::Const, def_id) | Res::Def(DefKind::AssocConst, def_id) => {
- let user_ty = self.user_substs_applied_to_res(expr.hir_id, res);
- ExprKind::NamedConst { def_id, substs, user_ty }
+ let user_ty = self.user_args_applied_to_res(expr.hir_id, res);
+ ExprKind::NamedConst { def_id, args, user_ty }
}
Res::Def(DefKind::Ctor(_, CtorKind::Const), def_id) => {
@@ -943,10 +928,10 @@ impl<'tcx> Cx<'tcx> {
match ty.kind() {
// A unit struct/variant which is used as a value.
// We return a completely different ExprKind here to account for this special case.
- ty::Adt(adt_def, substs) => ExprKind::Adt(Box::new(AdtExpr {
+ ty::Adt(adt_def, args) => ExprKind::Adt(Box::new(AdtExpr {
adt_def: *adt_def,
variant_index: adt_def.variant_index_with_ctor_id(def_id),
- substs,
+ args,
user_ty,
fields: Box::new([]),
base: None,
@@ -1093,6 +1078,9 @@ impl<'tcx> Cx<'tcx> {
variant_index,
name: field,
},
+ HirProjectionKind::OpaqueCast => {
+ ExprKind::Use { source: self.thir.exprs.push(captured_place_expr) }
+ }
HirProjectionKind::Index | HirProjectionKind::Subslice => {
// We don't capture these projections, so we can ignore them here
continue;
diff --git a/compiler/rustc_mir_build/src/thir/cx/mod.rs b/compiler/rustc_mir_build/src/thir/cx/mod.rs
index e6a98d1aa..d98cc76ad 100644
--- a/compiler/rustc_mir_build/src/thir/cx/mod.rs
+++ b/compiler/rustc_mir_build/src/thir/cx/mod.rs
@@ -16,7 +16,6 @@ use rustc_hir::Node;
use rustc_middle::middle::region;
use rustc_middle::thir::*;
use rustc_middle::ty::{self, RvalueScopes, Ty, TyCtxt};
-use rustc_span::Span;
pub(crate) fn thir_body(
tcx: TyCtxt<'_>,
@@ -62,14 +61,6 @@ struct Cx<'tcx> {
typeck_results: &'tcx ty::TypeckResults<'tcx>,
rvalue_scopes: &'tcx RvalueScopes,
- /// When applying adjustments to the expression
- /// with the given `HirId`, use the given `Span`,
- /// instead of the usual span. This is used to
- /// assign the span of an overall method call
- /// (e.g. `my_val.foo()`) to the adjustment expressions
- /// for the receiver.
- adjustment_span: Option<(HirId, Span)>,
-
/// False to indicate that adjustments should not be applied. Only used for `custom_mir`
apply_adjustments: bool,
@@ -110,7 +101,6 @@ impl<'tcx> Cx<'tcx> {
typeck_results,
rvalue_scopes: &typeck_results.rvalue_scopes,
body_owner: def.to_def_id(),
- adjustment_span: None,
apply_adjustments: hir
.attrs(hir_id)
.iter()
@@ -132,7 +122,7 @@ impl<'tcx> Cx<'tcx> {
DefKind::Closure => {
let closure_ty = self.typeck_results.node_type(owner_id);
- let ty::Closure(closure_def_id, closure_substs) = *closure_ty.kind() else {
+ let ty::Closure(closure_def_id, closure_args) = *closure_ty.kind() else {
bug!("closure expr does not have closure type: {:?}", closure_ty);
};
@@ -144,7 +134,7 @@ impl<'tcx> Cx<'tcx> {
};
let env_region = ty::Region::new_late_bound(self.tcx, ty::INNERMOST, br);
let closure_env_ty =
- self.tcx.closure_env_ty(closure_def_id, closure_substs, env_region).unwrap();
+ self.tcx.closure_env_ty(closure_def_id, closure_args, env_region).unwrap();
let liberated_closure_env_ty = self.tcx.erase_late_bound_regions(
ty::Binder::bind_with_vars(closure_env_ty, bound_vars),
);
@@ -196,7 +186,7 @@ impl<'tcx> Cx<'tcx> {
self.tcx
.type_of(va_list_did)
- .subst(self.tcx, &[self.tcx.lifetimes.re_erased.into()])
+ .instantiate(self.tcx, &[self.tcx.lifetimes.re_erased.into()])
} else {
fn_sig.inputs()[index]
};
diff --git a/compiler/rustc_mir_build/src/thir/pattern/check_match.rs b/compiler/rustc_mir_build/src/thir/pattern/check_match.rs
index ef60f08bf..383e80851 100644
--- a/compiler/rustc_mir_build/src/thir/pattern/check_match.rs
+++ b/compiler/rustc_mir_build/src/thir/pattern/check_match.rs
@@ -135,10 +135,12 @@ impl<'a, 'tcx> Visitor<'a, 'tcx> for MatchVisitor<'a, '_, 'tcx> {
});
return;
}
- ExprKind::Match { scrutinee, box ref arms } => {
+ ExprKind::Match { scrutinee, scrutinee_hir_id, box ref arms } => {
let source = match ex.span.desugaring_kind() {
Some(DesugaringKind::ForLoop) => hir::MatchSource::ForLoopDesugar,
- Some(DesugaringKind::QuestionMark) => hir::MatchSource::TryDesugar,
+ Some(DesugaringKind::QuestionMark) => {
+ hir::MatchSource::TryDesugar(scrutinee_hir_id)
+ }
Some(DesugaringKind::Await) => hir::MatchSource::AwaitDesugar,
_ => hir::MatchSource::Normal,
};
@@ -277,7 +279,7 @@ impl<'p, 'tcx> MatchVisitor<'_, 'p, 'tcx> {
| hir::MatchSource::FormatArgs => report_arm_reachability(&cx, &report),
// Unreachable patterns in try and await expressions occur when one of
// the arms are an uninhabited type. Which is OK.
- hir::MatchSource::AwaitDesugar | hir::MatchSource::TryDesugar => {}
+ hir::MatchSource::AwaitDesugar | hir::MatchSource::TryDesugar(_) => {}
}
// Check if the match is exhaustive.
@@ -501,12 +503,12 @@ impl<'p, 'tcx> MatchVisitor<'_, 'p, 'tcx> {
let witness_1_is_privately_uninhabited =
if cx.tcx.features().exhaustive_patterns
&& let Some(witness_1) = witnesses.get(0)
- && let ty::Adt(adt, substs) = witness_1.ty().kind()
+ && let ty::Adt(adt, args) = witness_1.ty().kind()
&& adt.is_enum()
&& let Constructor::Variant(variant_index) = witness_1.ctor()
{
let variant = adt.variant(*variant_index);
- let inhabited = variant.inhabited_predicate(cx.tcx, *adt).subst(cx.tcx, substs);
+ let inhabited = variant.inhabited_predicate(cx.tcx, *adt).instantiate(cx.tcx, args);
assert!(inhabited.apply(cx.tcx, cx.param_env, cx.module));
!inhabited.apply_ignore_module(cx.tcx, cx.param_env)
} else {
@@ -691,7 +693,7 @@ fn non_exhaustive_match<'p, 'tcx>(
err = create_e0004(
cx.tcx.sess,
sp,
- format!("non-exhaustive patterns: {} not covered", joined_patterns),
+ format!("non-exhaustive patterns: {joined_patterns} not covered"),
);
err.span_label(sp, pattern_not_covered_label(&witnesses, &joined_patterns));
patterns_len = witnesses.len();
@@ -721,15 +723,13 @@ fn non_exhaustive_match<'p, 'tcx>(
&& matches!(witnesses[0].ctor(), Constructor::NonExhaustive)
{
err.note(format!(
- "`{}` does not have a fixed maximum value, so a wildcard `_` is necessary to match \
+ "`{scrut_ty}` does not have a fixed maximum value, so a wildcard `_` is necessary to match \
exhaustively",
- scrut_ty,
));
if cx.tcx.sess.is_nightly_build() {
err.help(format!(
"add `#![feature(precise_pointer_size_matching)]` to the crate attributes to \
- enable precise `{}` matching",
- scrut_ty,
+ enable precise `{scrut_ty}` matching",
));
}
}
@@ -745,18 +745,13 @@ fn non_exhaustive_match<'p, 'tcx>(
[] if sp.eq_ctxt(expr_span) => {
// Get the span for the empty match body `{}`.
let (indentation, more) = if let Some(snippet) = sm.indentation_before(sp) {
- (format!("\n{}", snippet), " ")
+ (format!("\n{snippet}"), " ")
} else {
(" ".to_string(), "")
};
suggestion = Some((
sp.shrink_to_hi().with_hi(expr_span.hi()),
- format!(
- " {{{indentation}{more}{pattern} => todo!(),{indentation}}}",
- indentation = indentation,
- more = more,
- pattern = pattern,
- ),
+ format!(" {{{indentation}{more}{pattern} => todo!(),{indentation}}}",),
));
}
[only] => {
@@ -765,7 +760,7 @@ fn non_exhaustive_match<'p, 'tcx>(
&& let Ok(with_trailing) = sm.span_extend_while(only.span, |c| c.is_whitespace() || c == ',')
&& sm.is_multiline(with_trailing)
{
- (format!("\n{}", snippet), true)
+ (format!("\n{snippet}"), true)
} else {
(" ".to_string(), false)
};
@@ -780,7 +775,7 @@ fn non_exhaustive_match<'p, 'tcx>(
};
suggestion = Some((
only.span.shrink_to_hi(),
- format!("{}{}{} => todo!()", comma, pre_indentation, pattern),
+ format!("{comma}{pre_indentation}{pattern} => todo!()"),
));
}
[.., prev, last] => {
@@ -803,7 +798,7 @@ fn non_exhaustive_match<'p, 'tcx>(
if let Some(spacing) = spacing {
suggestion = Some((
last.span.shrink_to_hi(),
- format!("{}{}{} => todo!()", comma, spacing, pattern),
+ format!("{comma}{spacing}{pattern} => todo!()"),
));
}
}
@@ -900,7 +895,7 @@ fn adt_defined_here<'p, 'tcx>(
for pat in spans {
span.push_span_label(pat, "not covered");
}
- err.span_note(span, format!("`{}` defined here", ty));
+ err.span_note(span, format!("`{ty}` defined here"));
}
}
@@ -942,7 +937,9 @@ fn maybe_point_at_variant<'a, 'p: 'a, 'tcx: 'a>(
/// This analysis is *not* subsumed by NLL.
fn check_borrow_conflicts_in_at_patterns<'tcx>(cx: &MatchVisitor<'_, '_, 'tcx>, pat: &Pat<'tcx>) {
// Extract `sub` in `binding @ sub`.
- let PatKind::Binding { name, mode, ty, subpattern: Some(box ref sub), .. } = pat.kind else { return };
+ let PatKind::Binding { name, mode, ty, subpattern: Some(box ref sub), .. } = pat.kind else {
+ return;
+ };
let is_binding_by_move = |ty: Ty<'tcx>| !ty.is_copy_modulo_regions(cx.tcx, cx.param_env);
diff --git a/compiler/rustc_mir_build/src/thir/pattern/const_to_pat.rs b/compiler/rustc_mir_build/src/thir/pattern/const_to_pat.rs
index 050b01294..1376344cf 100644
--- a/compiler/rustc_mir_build/src/thir/pattern/const_to_pat.rs
+++ b/compiler/rustc_mir_build/src/thir/pattern/const_to_pat.rs
@@ -325,6 +325,11 @@ impl<'tcx> ConstToPat<'tcx> {
// `PartialEq::eq` on it.
return Err(FallbackToConstRef);
}
+ ty::FnDef(..) => {
+ self.saw_const_match_error.set(true);
+ tcx.sess.emit_err(InvalidPattern { span, non_sm_ty: ty });
+ PatKind::Wild
+ }
ty::Adt(adt_def, _) if !self.type_marked_structural(ty) => {
debug!("adt_def {:?} has !type_marked_structural for cv.ty: {:?}", adt_def, ty,);
self.saw_const_match_error.set(true);
@@ -332,20 +337,20 @@ impl<'tcx> ConstToPat<'tcx> {
tcx.sess.emit_err(err);
PatKind::Wild
}
- ty::Adt(adt_def, substs) if adt_def.is_enum() => {
+ ty::Adt(adt_def, args) if adt_def.is_enum() => {
let (&variant_index, fields) = cv.unwrap_branch().split_first().unwrap();
let variant_index =
VariantIdx::from_u32(variant_index.unwrap_leaf().try_to_u32().ok().unwrap());
PatKind::Variant {
adt_def: *adt_def,
- substs,
+ args,
variant_index,
subpatterns: self.field_pats(
fields.iter().copied().zip(
adt_def.variants()[variant_index]
.fields
.iter()
- .map(|field| field.ty(self.tcx(), substs)),
+ .map(|field| field.ty(self.tcx(), args)),
),
)?,
}
@@ -354,9 +359,9 @@ impl<'tcx> ConstToPat<'tcx> {
subpatterns: self
.field_pats(cv.unwrap_branch().iter().copied().zip(fields.iter()))?,
},
- ty::Adt(def, substs) => PatKind::Leaf {
+ ty::Adt(def, args) => PatKind::Leaf {
subpatterns: self.field_pats(cv.unwrap_branch().iter().copied().zip(
- def.non_enum_variant().fields.iter().map(|field| field.ty(self.tcx(), substs)),
+ def.non_enum_variant().fields.iter().map(|field| field.ty(self.tcx(), args)),
))?,
},
ty::Slice(elem_ty) => PatKind::Slice {
@@ -440,7 +445,7 @@ impl<'tcx> ConstToPat<'tcx> {
}
}
},
- ty::Bool | ty::Char | ty::Int(_) | ty::Uint(_) | ty::FnDef(..) => PatKind::Constant {
+ ty::Bool | ty::Char | ty::Int(_) | ty::Uint(_) => PatKind::Constant {
value: mir::ConstantKind::Ty(ty::Const::new_value(tcx, cv, ty)),
},
ty::FnPtr(..) | ty::RawPtr(..) => unreachable!(),
diff --git a/compiler/rustc_mir_build/src/thir/pattern/deconstruct_pat.rs b/compiler/rustc_mir_build/src/thir/pattern/deconstruct_pat.rs
index 9df6d2f43..bee1c4e46 100644
--- a/compiler/rustc_mir_build/src/thir/pattern/deconstruct_pat.rs
+++ b/compiler/rustc_mir_build/src/thir/pattern/deconstruct_pat.rs
@@ -306,9 +306,9 @@ impl fmt::Debug for IntRange {
let (lo, hi) = self.boundaries();
let bias = self.bias;
let (lo, hi) = (lo ^ bias, hi ^ bias);
- write!(f, "{}", lo)?;
+ write!(f, "{lo}")?;
write!(f, "{}", RangeEnd::Included)?;
- write!(f, "{}", hi)
+ write!(f, "{hi}")
}
}
@@ -922,7 +922,7 @@ impl<'tcx> SplitWildcard<'tcx> {
let kind = if cx.is_uninhabited(*sub_ty) { FixedLen(0) } else { VarLen(0, 0) };
smallvec![Slice(Slice::new(None, kind))]
}
- ty::Adt(def, substs) if def.is_enum() => {
+ ty::Adt(def, args) if def.is_enum() => {
// If the enum is declared as `#[non_exhaustive]`, we treat it as if it had an
// additional "unknown" constructor.
// There is no point in enumerating all possible variants, because the user can't
@@ -950,21 +950,19 @@ impl<'tcx> SplitWildcard<'tcx> {
let is_secretly_empty =
def.variants().is_empty() && !is_exhaustive_pat_feature && !pcx.is_top_level;
- let mut ctors: SmallVec<[_; 1]> = def
- .variants()
- .iter_enumerated()
- .filter(|(_, v)| {
- // If `exhaustive_patterns` is enabled, we exclude variants known to be
- // uninhabited.
- !is_exhaustive_pat_feature
- || v.inhabited_predicate(cx.tcx, *def).subst(cx.tcx, substs).apply(
- cx.tcx,
- cx.param_env,
- cx.module,
- )
- })
- .map(|(idx, _)| Variant(idx))
- .collect();
+ let mut ctors: SmallVec<[_; 1]> =
+ def.variants()
+ .iter_enumerated()
+ .filter(|(_, v)| {
+ // If `exhaustive_patterns` is enabled, we exclude variants known to be
+ // uninhabited.
+ !is_exhaustive_pat_feature
+ || v.inhabited_predicate(cx.tcx, *def)
+ .instantiate(cx.tcx, args)
+ .apply(cx.tcx, cx.param_env, cx.module)
+ })
+ .map(|(idx, _)| Variant(idx))
+ .collect();
if is_secretly_empty || is_declared_nonexhaustive {
ctors.push(NonExhaustive);
@@ -1156,12 +1154,12 @@ impl<'p, 'tcx> Fields<'p, 'tcx> {
ty: Ty<'tcx>,
variant: &'a VariantDef,
) -> impl Iterator<Item = (FieldIdx, Ty<'tcx>)> + Captures<'a> + Captures<'p> {
- let ty::Adt(adt, substs) = ty.kind() else { bug!() };
+ let ty::Adt(adt, args) = ty.kind() else { bug!() };
// Whether we must not match the fields of this variant exhaustively.
let is_non_exhaustive = variant.is_field_list_non_exhaustive() && !adt.did().is_local();
variant.fields.iter().enumerate().filter_map(move |(i, field)| {
- let ty = field.ty(cx.tcx, substs);
+ let ty = field.ty(cx.tcx, args);
// `field.ty()` doesn't normalize after substituting.
let ty = cx.tcx.normalize_erasing_regions(cx.param_env, ty);
let is_visible = adt.is_enum() || field.vis.is_accessible_from(cx.module, cx.tcx);
@@ -1183,11 +1181,11 @@ impl<'p, 'tcx> Fields<'p, 'tcx> {
Single | Variant(_) => match pcx.ty.kind() {
ty::Tuple(fs) => Fields::wildcards_from_tys(pcx.cx, fs.iter(), pcx.span),
ty::Ref(_, rty, _) => Fields::wildcards_from_tys(pcx.cx, once(*rty), pcx.span),
- ty::Adt(adt, substs) => {
+ ty::Adt(adt, args) => {
if adt.is_box() {
// The only legal patterns of type `Box` (outside `std`) are `_` and box
// patterns. If we're here we can assume this is a box pattern.
- Fields::wildcards_from_tys(pcx.cx, once(substs.type_at(0)), pcx.span)
+ Fields::wildcards_from_tys(pcx.cx, once(args.type_at(0)), pcx.span)
} else {
let variant = &adt.variant(constructor.variant_index_for_adt(*adt));
let tys = Fields::list_variant_nonhidden_fields(pcx.cx, pcx.ty, variant)
@@ -1294,7 +1292,7 @@ impl<'p, 'tcx> DeconstructedPat<'p, 'tcx> {
}
fields = Fields::from_iter(cx, wilds);
}
- ty::Adt(adt, substs) if adt.is_box() => {
+ ty::Adt(adt, args) if adt.is_box() => {
// The only legal patterns of type `Box` (outside `std`) are `_` and box
// patterns. If we're here we can assume this is a box pattern.
// FIXME(Nadrieril): A `Box` can in theory be matched either with `Box(_,
@@ -1311,7 +1309,7 @@ impl<'p, 'tcx> DeconstructedPat<'p, 'tcx> {
let pat = if let Some(pat) = pattern {
mkpat(&pat.pattern)
} else {
- DeconstructedPat::wildcard(substs.type_at(0), pat.span)
+ DeconstructedPat::wildcard(args.type_at(0), pat.span)
};
ctor = Single;
fields = Fields::singleton(cx, pat);
@@ -1437,7 +1435,7 @@ impl<'p, 'tcx> DeconstructedPat<'p, 'tcx> {
// the pattern is a box pattern.
PatKind::Deref { subpattern: subpatterns.next().unwrap() }
}
- ty::Adt(adt_def, substs) => {
+ ty::Adt(adt_def, args) => {
let variant_index = self.ctor.variant_index_for_adt(*adt_def);
let variant = &adt_def.variant(variant_index);
let subpatterns = Fields::list_variant_nonhidden_fields(cx, self.ty, variant)
@@ -1446,7 +1444,7 @@ impl<'p, 'tcx> DeconstructedPat<'p, 'tcx> {
.collect();
if adt_def.is_enum() {
- PatKind::Variant { adt_def: *adt_def, substs, variant_index, subpatterns }
+ PatKind::Variant { adt_def: *adt_def, args, variant_index, subpatterns }
} else {
PatKind::Leaf { subpatterns }
}
@@ -1621,7 +1619,7 @@ impl<'p, 'tcx> fmt::Debug for DeconstructedPat<'p, 'tcx> {
// of `std`). So this branch is only reachable when the feature is enabled and
// the pattern is a box pattern.
let subpattern = self.iter_fields().next().unwrap();
- write!(f, "box {:?}", subpattern)
+ write!(f, "box {subpattern:?}")
}
ty::Adt(..) | ty::Tuple(..) => {
let variant = match self.ty.kind() {
@@ -1640,7 +1638,7 @@ impl<'p, 'tcx> fmt::Debug for DeconstructedPat<'p, 'tcx> {
write!(f, "(")?;
for p in self.iter_fields() {
write!(f, "{}", start_or_comma())?;
- write!(f, "{:?}", p)?;
+ write!(f, "{p:?}")?;
}
write!(f, ")")
}
@@ -1676,11 +1674,11 @@ impl<'p, 'tcx> fmt::Debug for DeconstructedPat<'p, 'tcx> {
write!(f, "]")
}
&FloatRange(lo, hi, end) => {
- write!(f, "{}", lo)?;
- write!(f, "{}", end)?;
- write!(f, "{}", hi)
+ write!(f, "{lo}")?;
+ write!(f, "{end}")?;
+ write!(f, "{hi}")
}
- IntRange(range) => write!(f, "{:?}", range), // Best-effort, will render e.g. `false` as `0..=0`
+ IntRange(range) => write!(f, "{range:?}"), // Best-effort, will render e.g. `false` as `0..=0`
Wildcard | Missing { .. } | NonExhaustive => write!(f, "_ : {:?}", self.ty),
Or => {
for pat in self.iter_fields() {
@@ -1688,7 +1686,7 @@ impl<'p, 'tcx> fmt::Debug for DeconstructedPat<'p, 'tcx> {
}
Ok(())
}
- Str(value) => write!(f, "{}", value),
+ Str(value) => write!(f, "{value}"),
Opaque => write!(f, "<constant pattern>"),
}
}
diff --git a/compiler/rustc_mir_build/src/thir/pattern/mod.rs b/compiler/rustc_mir_build/src/thir/pattern/mod.rs
index 600995927..c08fe54c3 100644
--- a/compiler/rustc_mir_build/src/thir/pattern/mod.rs
+++ b/compiler/rustc_mir_build/src/thir/pattern/mod.rs
@@ -23,10 +23,10 @@ use rustc_middle::mir::interpret::{
use rustc_middle::mir::{self, ConstantKind, UserTypeProjection};
use rustc_middle::mir::{BorrowKind, Mutability};
use rustc_middle::thir::{Ascription, BindingMode, FieldPat, LocalVarId, Pat, PatKind, PatRange};
-use rustc_middle::ty::subst::{GenericArg, SubstsRef};
use rustc_middle::ty::CanonicalUserTypeAnnotation;
use rustc_middle::ty::TypeVisitableExt;
use rustc_middle::ty::{self, AdtDef, Region, Ty, TyCtxt, UserType};
+use rustc_middle::ty::{GenericArg, GenericArgsRef};
use rustc_span::{Span, Symbol};
use rustc_target::abi::FieldIdx;
@@ -416,8 +416,8 @@ impl<'a, 'tcx> PatCtxt<'a, 'tcx> {
let enum_id = self.tcx.parent(variant_id);
let adt_def = self.tcx.adt_def(enum_id);
if adt_def.is_enum() {
- let substs = match ty.kind() {
- ty::Adt(_, substs) | ty::FnDef(_, substs) => substs,
+ let args = match ty.kind() {
+ ty::Adt(_, args) | ty::FnDef(_, args) => args,
ty::Error(_) => {
// Avoid ICE (#50585)
return PatKind::Wild;
@@ -426,7 +426,7 @@ impl<'a, 'tcx> PatCtxt<'a, 'tcx> {
};
PatKind::Variant {
adt_def,
- substs,
+ args,
variant_index: adt_def.variant_index_with_id(variant_id),
subpatterns,
}
@@ -439,7 +439,7 @@ impl<'a, 'tcx> PatCtxt<'a, 'tcx> {
DefKind::Struct
| DefKind::Ctor(CtorOf::Struct, ..)
| DefKind::Union
- | DefKind::TyAlias
+ | DefKind::TyAlias { .. }
| DefKind::AssocTy,
_,
)
@@ -460,7 +460,7 @@ impl<'a, 'tcx> PatCtxt<'a, 'tcx> {
}
};
- if let Some(user_ty) = self.user_substs_applied_to_ty_of_hir_id(hir_id) {
+ if let Some(user_ty) = self.user_args_applied_to_ty_of_hir_id(hir_id) {
debug!("lower_variant_or_leaf: kind={:?} user_ty={:?} span={:?}", kind, user_ty, span);
let annotation = CanonicalUserTypeAnnotation {
user_ty: Box::new(user_ty),
@@ -496,13 +496,13 @@ impl<'a, 'tcx> PatCtxt<'a, 'tcx> {
// Use `Reveal::All` here because patterns are always monomorphic even if their function
// isn't.
let param_env_reveal_all = self.param_env.with_reveal_all_normalized(self.tcx);
- // N.B. There is no guarantee that substs collected in typeck results are fully normalized,
+ // N.B. There is no guarantee that args collected in typeck results are fully normalized,
// so they need to be normalized in order to pass to `Instance::resolve`, which will ICE
// if given unnormalized types.
- let substs = self
+ let args = self
.tcx
- .normalize_erasing_regions(param_env_reveal_all, self.typeck_results.node_substs(id));
- let instance = match ty::Instance::resolve(self.tcx, param_env_reveal_all, def_id, substs) {
+ .normalize_erasing_regions(param_env_reveal_all, self.typeck_results.node_args(id));
+ let instance = match ty::Instance::resolve(self.tcx, param_env_reveal_all, def_id, args) {
Ok(Some(i)) => i,
Ok(None) => {
// It should be assoc consts if there's no error but we cannot resolve it.
@@ -617,16 +617,14 @@ impl<'a, 'tcx> PatCtxt<'a, 'tcx> {
}
let typeck_root_def_id = tcx.typeck_root_def_id(def_id.to_def_id());
- let parent_substs =
- tcx.erase_regions(ty::InternalSubsts::identity_for_item(tcx, typeck_root_def_id));
- let substs =
- ty::InlineConstSubsts::new(tcx, ty::InlineConstSubstsParts { parent_substs, ty })
- .substs;
+ let parent_args =
+ tcx.erase_regions(ty::GenericArgs::identity_for_item(tcx, typeck_root_def_id));
+ let args = ty::InlineConstArgs::new(tcx, ty::InlineConstArgsParts { parent_args, ty }).args;
- let uneval = mir::UnevaluatedConst { def: def_id.to_def_id(), substs, promoted: None };
- debug_assert!(!substs.has_free_regions());
+ let uneval = mir::UnevaluatedConst { def: def_id.to_def_id(), args, promoted: None };
+ debug_assert!(!args.has_free_regions());
- let ct = ty::UnevaluatedConst { def: def_id.to_def_id(), substs: substs };
+ let ct = ty::UnevaluatedConst { def: def_id.to_def_id(), args: args };
// First try using a valtree in order to destructure the constant into a pattern.
if let Ok(Some(valtree)) =
self.tcx.const_eval_resolve_for_typeck(self.param_env, ct, Some(span))
@@ -754,7 +752,7 @@ macro_rules! ClonePatternFoldableImpls {
ClonePatternFoldableImpls! { <'tcx>
Span, FieldIdx, Mutability, Symbol, LocalVarId, usize,
Region<'tcx>, Ty<'tcx>, BindingMode, AdtDef<'tcx>,
- SubstsRef<'tcx>, &'tcx GenericArg<'tcx>, UserType<'tcx>,
+ GenericArgsRef<'tcx>, &'tcx GenericArg<'tcx>, UserType<'tcx>,
UserTypeProjection, CanonicalUserTypeAnnotation<'tcx>
}
@@ -804,10 +802,10 @@ impl<'tcx> PatternFoldable<'tcx> for PatKind<'tcx> {
is_primary,
}
}
- PatKind::Variant { adt_def, substs, variant_index, ref subpatterns } => {
+ PatKind::Variant { adt_def, args, variant_index, ref subpatterns } => {
PatKind::Variant {
adt_def: adt_def.fold_with(folder),
- substs: substs.fold_with(folder),
+ args: args.fold_with(folder),
variant_index,
subpatterns: subpatterns.fold_with(folder),
}
diff --git a/compiler/rustc_mir_build/src/thir/pattern/usefulness.rs b/compiler/rustc_mir_build/src/thir/pattern/usefulness.rs
index e5b635069..08cfe98bb 100644
--- a/compiler/rustc_mir_build/src/thir/pattern/usefulness.rs
+++ b/compiler/rustc_mir_build/src/thir/pattern/usefulness.rs
@@ -459,7 +459,7 @@ impl<'p, 'tcx> fmt::Debug for PatStack<'p, 'tcx> {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
write!(f, "+")?;
for pat in self.iter() {
- write!(f, " {:?} +", pat)?;
+ write!(f, " {pat:?} +")?;
}
Ok(())
}
@@ -530,7 +530,7 @@ impl<'p, 'tcx> fmt::Debug for Matrix<'p, 'tcx> {
let Matrix { patterns: m, .. } = self;
let pretty_printed_matrix: Vec<Vec<String>> =
- m.iter().map(|row| row.iter().map(|pat| format!("{:?}", pat)).collect()).collect();
+ m.iter().map(|row| row.iter().map(|pat| format!("{pat:?}")).collect()).collect();
let column_count = m.iter().map(|row| row.len()).next().unwrap_or(0);
assert!(m.iter().all(|row| row.len() == column_count));
diff --git a/compiler/rustc_mir_build/src/thir/print.rs b/compiler/rustc_mir_build/src/thir/print.rs
index 8d7c624a8..3b6276cfe 100644
--- a/compiler/rustc_mir_build/src/thir/print.rs
+++ b/compiler/rustc_mir_build/src/thir/print.rs
@@ -321,7 +321,7 @@ impl<'a, 'tcx> ThirPrinter<'a, 'tcx> {
print_indented!(self, format!("pat: {:?}", pat), depth_lvl + 1);
print_indented!(self, "}", depth_lvl);
}
- Match { scrutinee, arms } => {
+ Match { scrutinee, arms, .. } => {
print_indented!(self, "Match {", depth_lvl);
print_indented!(self, "scrutinee:", depth_lvl + 1);
self.print_expr(*scrutinee, depth_lvl + 2);
@@ -427,10 +427,10 @@ impl<'a, 'tcx> ThirPrinter<'a, 'tcx> {
self.print_expr(*value, depth_lvl + 2);
print_indented!(self, "}", depth_lvl);
}
- ConstBlock { did, substs } => {
+ ConstBlock { did, args } => {
print_indented!(self, "ConstBlock {", depth_lvl);
print_indented!(self, format!("did: {:?}", did), depth_lvl + 1);
- print_indented!(self, format!("substs: {:?}", substs), depth_lvl + 1);
+ print_indented!(self, format!("args: {:?}", args), depth_lvl + 1);
print_indented!(self, "}", depth_lvl);
}
Repeat { value, count } => {
@@ -499,11 +499,11 @@ impl<'a, 'tcx> ThirPrinter<'a, 'tcx> {
ZstLiteral { user_ty } => {
print_indented!(self, format!("ZstLiteral(user_ty: {:?})", user_ty), depth_lvl);
}
- NamedConst { def_id, substs, user_ty } => {
+ NamedConst { def_id, args, user_ty } => {
print_indented!(self, "NamedConst {", depth_lvl);
print_indented!(self, format!("def_id: {:?}", def_id), depth_lvl + 1);
print_indented!(self, format!("user_ty: {:?}", user_ty), depth_lvl + 1);
- print_indented!(self, format!("substs: {:?}", substs), depth_lvl + 1);
+ print_indented!(self, format!("args: {:?}", args), depth_lvl + 1);
print_indented!(self, "}", depth_lvl);
}
ConstParam { param, def_id } => {
@@ -560,7 +560,7 @@ impl<'a, 'tcx> ThirPrinter<'a, 'tcx> {
format!("variant_index: {:?}", adt_expr.variant_index),
depth_lvl + 1
);
- print_indented!(self, format!("substs: {:?}", adt_expr.substs), depth_lvl + 1);
+ print_indented!(self, format!("args: {:?}", adt_expr.args), depth_lvl + 1);
print_indented!(self, format!("user_ty: {:?}", adt_expr.user_ty), depth_lvl + 1);
for (i, field_expr) in adt_expr.fields.iter().enumerate() {
@@ -662,11 +662,11 @@ impl<'a, 'tcx> ThirPrinter<'a, 'tcx> {
print_indented!(self, "}", depth_lvl + 1);
}
- PatKind::Variant { adt_def, substs, variant_index, subpatterns } => {
+ PatKind::Variant { adt_def, args, variant_index, subpatterns } => {
print_indented!(self, "Variant {", depth_lvl + 1);
print_indented!(self, "adt_def: ", depth_lvl + 2);
self.print_adt_def(*adt_def, depth_lvl + 3);
- print_indented!(self, format!("substs: {:?}", substs), depth_lvl + 2);
+ print_indented!(self, format!("args: {:?}", args), depth_lvl + 2);
print_indented!(self, format!("variant_index: {:?}", variant_index), depth_lvl + 2);
if subpatterns.len() > 0 {
@@ -784,11 +784,11 @@ impl<'a, 'tcx> ThirPrinter<'a, 'tcx> {
}
fn print_closure_expr(&mut self, expr: &ClosureExpr<'tcx>, depth_lvl: usize) {
- let ClosureExpr { closure_id, substs, upvars, movability, fake_reads } = expr;
+ let ClosureExpr { closure_id, args, upvars, movability, fake_reads } = expr;
print_indented!(self, "ClosureExpr {", depth_lvl);
print_indented!(self, format!("closure_id: {:?}", closure_id), depth_lvl + 1);
- print_indented!(self, format!("substs: {:?}", substs), depth_lvl + 1);
+ print_indented!(self, format!("args: {:?}", args), depth_lvl + 1);
if upvars.len() > 0 {
print_indented!(self, "upvars: [", depth_lvl + 1);
diff --git a/compiler/rustc_mir_build/src/thir/util.rs b/compiler/rustc_mir_build/src/thir/util.rs
index c58ed1ac0..9106b4d33 100644
--- a/compiler/rustc_mir_build/src/thir/util.rs
+++ b/compiler/rustc_mir_build/src/thir/util.rs
@@ -9,7 +9,7 @@ pub(crate) trait UserAnnotatedTyHelpers<'tcx> {
/// Looks up the type associated with this hir-id and applies the
/// user-given substitutions; the hir-id must map to a suitable
/// type.
- fn user_substs_applied_to_ty_of_hir_id(
+ fn user_args_applied_to_ty_of_hir_id(
&self,
hir_id: hir::HirId,
) -> Option<CanonicalUserType<'tcx>> {
diff --git a/compiler/rustc_mir_dataflow/src/elaborate_drops.rs b/compiler/rustc_mir_dataflow/src/elaborate_drops.rs
index 0540a5e94..9e02b0271 100644
--- a/compiler/rustc_mir_dataflow/src/elaborate_drops.rs
+++ b/compiler/rustc_mir_dataflow/src/elaborate_drops.rs
@@ -4,8 +4,8 @@ use rustc_index::Idx;
use rustc_middle::mir::patch::MirPatch;
use rustc_middle::mir::*;
use rustc_middle::traits::Reveal;
-use rustc_middle::ty::subst::SubstsRef;
use rustc_middle::ty::util::IntTypeExt;
+use rustc_middle::ty::GenericArgsRef;
use rustc_middle::ty::{self, Ty, TyCtxt};
use rustc_target::abi::{FieldIdx, VariantIdx, FIRST_VARIANT};
use std::{fmt, iter};
@@ -263,7 +263,7 @@ where
base_place: Place<'tcx>,
variant_path: D::Path,
variant: &'tcx ty::VariantDef,
- substs: SubstsRef<'tcx>,
+ args: GenericArgsRef<'tcx>,
) -> Vec<(Place<'tcx>, Option<D::Path>)> {
variant
.fields
@@ -276,7 +276,7 @@ where
assert_eq!(self.elaborator.param_env().reveal(), Reveal::All);
let field_ty =
- tcx.normalize_erasing_regions(self.elaborator.param_env(), f.ty(tcx, substs));
+ tcx.normalize_erasing_regions(self.elaborator.param_env(), f.ty(tcx, args));
(tcx.mk_place_field(base_place, field, field_ty), subpath)
})
@@ -414,16 +414,16 @@ where
fn open_drop_for_box_contents(
&mut self,
adt: ty::AdtDef<'tcx>,
- substs: SubstsRef<'tcx>,
+ args: GenericArgsRef<'tcx>,
succ: BasicBlock,
unwind: Unwind,
) -> BasicBlock {
// drop glue is sent straight to codegen
// box cannot be directly dereferenced
- let unique_ty = adt.non_enum_variant().fields[FieldIdx::new(0)].ty(self.tcx(), substs);
+ let unique_ty = adt.non_enum_variant().fields[FieldIdx::new(0)].ty(self.tcx(), args);
let unique_variant = unique_ty.ty_adt_def().unwrap().non_enum_variant();
- let nonnull_ty = unique_variant.fields[FieldIdx::from_u32(0)].ty(self.tcx(), substs);
- let ptr_ty = Ty::new_imm_ptr(self.tcx(), substs[0].expect_ty());
+ let nonnull_ty = unique_variant.fields[FieldIdx::from_u32(0)].ty(self.tcx(), args);
+ let ptr_ty = Ty::new_imm_ptr(self.tcx(), args[0].expect_ty());
let unique_place = self.tcx().mk_place_field(self.place, FieldIdx::new(0), unique_ty);
let nonnull_place = self.tcx().mk_place_field(unique_place, FieldIdx::new(0), nonnull_ty);
@@ -436,7 +436,11 @@ where
}
#[instrument(level = "debug", ret)]
- fn open_drop_for_adt(&mut self, adt: ty::AdtDef<'tcx>, substs: SubstsRef<'tcx>) -> BasicBlock {
+ fn open_drop_for_adt(
+ &mut self,
+ adt: ty::AdtDef<'tcx>,
+ args: GenericArgsRef<'tcx>,
+ ) -> BasicBlock {
if adt.variants().is_empty() {
return self.elaborator.patch().new_block(BasicBlockData {
statements: vec![],
@@ -453,7 +457,7 @@ where
let contents_drop = if skip_contents {
(self.succ, self.unwind)
} else {
- self.open_drop_for_adt_contents(adt, substs)
+ self.open_drop_for_adt_contents(adt, args)
};
if adt.is_box() {
@@ -463,7 +467,7 @@ where
.1
.map(|unwind| self.destructor_call_block((unwind, Unwind::InCleanup)));
- self.open_drop_for_box_contents(adt, substs, succ, unwind)
+ self.open_drop_for_box_contents(adt, args, succ, unwind)
} else if adt.has_dtor(self.tcx()) {
self.destructor_call_block(contents_drop)
} else {
@@ -474,7 +478,7 @@ where
fn open_drop_for_adt_contents(
&mut self,
adt: ty::AdtDef<'tcx>,
- substs: SubstsRef<'tcx>,
+ args: GenericArgsRef<'tcx>,
) -> (BasicBlock, Unwind) {
let (succ, unwind) = self.drop_ladder_bottom();
if !adt.is_enum() {
@@ -482,18 +486,18 @@ where
self.place,
self.path,
&adt.variant(FIRST_VARIANT),
- substs,
+ args,
);
self.drop_ladder(fields, succ, unwind)
} else {
- self.open_drop_for_multivariant(adt, substs, succ, unwind)
+ self.open_drop_for_multivariant(adt, args, succ, unwind)
}
}
fn open_drop_for_multivariant(
&mut self,
adt: ty::AdtDef<'tcx>,
- substs: SubstsRef<'tcx>,
+ args: GenericArgsRef<'tcx>,
succ: BasicBlock,
unwind: Unwind,
) -> (BasicBlock, Unwind) {
@@ -515,7 +519,7 @@ where
self.place,
ProjectionElem::Downcast(Some(variant.name), variant_index),
);
- let fields = self.move_paths_for_fields(base_place, variant_path, &variant, substs);
+ let fields = self.move_paths_for_fields(base_place, variant_path, &variant, args);
values.push(discr.val);
if let Unwind::To(unwind) = unwind {
// We can't use the half-ladder from the original
@@ -550,7 +554,7 @@ where
let have_field_with_drop_glue = variant
.fields
.iter()
- .any(|field| field.ty(tcx, substs).needs_drop(tcx, param_env));
+ .any(|field| field.ty(tcx, args).needs_drop(tcx, param_env));
if have_field_with_drop_glue {
have_otherwise_with_drop_glue = true;
}
@@ -856,22 +860,16 @@ where
fn open_drop(&mut self) -> BasicBlock {
let ty = self.place_ty(self.place);
match ty.kind() {
- ty::Closure(_, substs) => {
- let tys: Vec<_> = substs.as_closure().upvar_tys().collect();
- self.open_drop_for_tuple(&tys)
- }
+ ty::Closure(_, args) => self.open_drop_for_tuple(&args.as_closure().upvar_tys()),
// Note that `elaborate_drops` only drops the upvars of a generator,
// and this is ok because `open_drop` here can only be reached
// within that own generator's resume function.
// This should only happen for the self argument on the resume function.
// It effectively only contains upvars until the generator transformation runs.
// See librustc_body/transform/generator.rs for more details.
- ty::Generator(_, substs, _) => {
- let tys: Vec<_> = substs.as_generator().upvar_tys().collect();
- self.open_drop_for_tuple(&tys)
- }
+ ty::Generator(_, args, _) => self.open_drop_for_tuple(&args.as_generator().upvar_tys()),
ty::Tuple(fields) => self.open_drop_for_tuple(fields),
- ty::Adt(def, substs) => self.open_drop_for_adt(*def, substs),
+ ty::Adt(def, args) => self.open_drop_for_adt(*def, args),
ty::Dynamic(..) => self.complete_drop(self.succ, self.unwind),
ty::Array(ety, size) => {
let size = size.try_eval_target_usize(self.tcx(), self.elaborator.param_env());
diff --git a/compiler/rustc_mir_dataflow/src/framework/direction.rs b/compiler/rustc_mir_dataflow/src/framework/direction.rs
index 804b44a6b..8a9e37c5a 100644
--- a/compiler/rustc_mir_dataflow/src/framework/direction.rs
+++ b/compiler/rustc_mir_dataflow/src/framework/direction.rs
@@ -1,11 +1,10 @@
-use rustc_middle::mir::{self, BasicBlock, Location, SwitchTargets, UnwindAction};
-use rustc_middle::ty::TyCtxt;
+use rustc_middle::mir::{
+ self, BasicBlock, CallReturnPlaces, Location, SwitchTargets, TerminatorEdges, UnwindAction,
+};
use std::ops::RangeInclusive;
use super::visitor::{ResultsVisitable, ResultsVisitor};
-use super::{
- Analysis, CallReturnPlaces, Effect, EffectIndex, GenKillAnalysis, GenKillSet, SwitchIntTarget,
-};
+use super::{Analysis, Effect, EffectIndex, GenKillAnalysis, GenKillSet, SwitchIntTarget};
pub trait Direction {
const IS_FORWARD: bool;
@@ -24,15 +23,17 @@ pub trait Direction {
) where
A: Analysis<'tcx>;
- fn apply_effects_in_block<'tcx, A>(
+ fn apply_effects_in_block<'mir, 'tcx, A>(
analysis: &mut A,
state: &mut A::Domain,
block: BasicBlock,
- block_data: &mir::BasicBlockData<'tcx>,
- ) where
+ block_data: &'mir mir::BasicBlockData<'tcx>,
+ statement_effect: Option<&dyn Fn(BasicBlock, &mut A::Domain)>,
+ ) -> TerminatorEdges<'mir, 'tcx>
+ where
A: Analysis<'tcx>;
- fn gen_kill_effects_in_block<'tcx, A>(
+ fn gen_kill_statement_effects_in_block<'tcx, A>(
analysis: &mut A,
trans: &mut GenKillSet<A::Idx>,
block: BasicBlock,
@@ -51,10 +52,10 @@ pub trait Direction {
fn join_state_into_successors_of<'tcx, A>(
analysis: &mut A,
- tcx: TyCtxt<'tcx>,
body: &mir::Body<'tcx>,
exit_state: &mut A::Domain,
- block: (BasicBlock, &'_ mir::BasicBlockData<'tcx>),
+ block: BasicBlock,
+ edges: TerminatorEdges<'_, 'tcx>,
propagate: impl FnMut(BasicBlock, &A::Domain),
) where
A: Analysis<'tcx>;
@@ -66,27 +67,33 @@ pub struct Backward;
impl Direction for Backward {
const IS_FORWARD: bool = false;
- fn apply_effects_in_block<'tcx, A>(
+ fn apply_effects_in_block<'mir, 'tcx, A>(
analysis: &mut A,
state: &mut A::Domain,
block: BasicBlock,
- block_data: &mir::BasicBlockData<'tcx>,
- ) where
+ block_data: &'mir mir::BasicBlockData<'tcx>,
+ statement_effect: Option<&dyn Fn(BasicBlock, &mut A::Domain)>,
+ ) -> TerminatorEdges<'mir, 'tcx>
+ where
A: Analysis<'tcx>,
{
let terminator = block_data.terminator();
let location = Location { block, statement_index: block_data.statements.len() };
analysis.apply_before_terminator_effect(state, terminator, location);
- analysis.apply_terminator_effect(state, terminator, location);
-
- for (statement_index, statement) in block_data.statements.iter().enumerate().rev() {
- let location = Location { block, statement_index };
- analysis.apply_before_statement_effect(state, statement, location);
- analysis.apply_statement_effect(state, statement, location);
+ let edges = analysis.apply_terminator_effect(state, terminator, location);
+ if let Some(statement_effect) = statement_effect {
+ statement_effect(block, state)
+ } else {
+ for (statement_index, statement) in block_data.statements.iter().enumerate().rev() {
+ let location = Location { block, statement_index };
+ analysis.apply_before_statement_effect(state, statement, location);
+ analysis.apply_statement_effect(state, statement, location);
+ }
}
+ edges
}
- fn gen_kill_effects_in_block<'tcx, A>(
+ fn gen_kill_statement_effects_in_block<'tcx, A>(
analysis: &mut A,
trans: &mut GenKillSet<A::Idx>,
block: BasicBlock,
@@ -94,11 +101,6 @@ impl Direction for Backward {
) where
A: GenKillAnalysis<'tcx>,
{
- let terminator = block_data.terminator();
- let location = Location { block, statement_index: block_data.statements.len() };
- analysis.before_terminator_effect(trans, terminator, location);
- analysis.terminator_effect(trans, terminator, location);
-
for (statement_index, statement) in block_data.statements.iter().enumerate().rev() {
let location = Location { block, statement_index };
analysis.before_statement_effect(trans, statement, location);
@@ -217,10 +219,10 @@ impl Direction for Backward {
fn join_state_into_successors_of<'tcx, A>(
analysis: &mut A,
- _tcx: TyCtxt<'tcx>,
body: &mir::Body<'tcx>,
exit_state: &mut A::Domain,
- (bb, _bb_data): (BasicBlock, &'_ mir::BasicBlockData<'tcx>),
+ bb: BasicBlock,
+ _edges: TerminatorEdges<'_, 'tcx>,
mut propagate: impl FnMut(BasicBlock, &A::Domain),
) where
A: Analysis<'tcx>,
@@ -254,7 +256,11 @@ impl Direction for Backward {
mir::TerminatorKind::Yield { resume, resume_arg, .. } if resume == bb => {
let mut tmp = exit_state.clone();
- analysis.apply_yield_resume_effect(&mut tmp, resume, resume_arg);
+ analysis.apply_call_return_effect(
+ &mut tmp,
+ resume,
+ CallReturnPlaces::Yield(resume_arg),
+ );
propagate(pred, &tmp);
}
@@ -318,27 +324,33 @@ pub struct Forward;
impl Direction for Forward {
const IS_FORWARD: bool = true;
- fn apply_effects_in_block<'tcx, A>(
+ fn apply_effects_in_block<'mir, 'tcx, A>(
analysis: &mut A,
state: &mut A::Domain,
block: BasicBlock,
- block_data: &mir::BasicBlockData<'tcx>,
- ) where
+ block_data: &'mir mir::BasicBlockData<'tcx>,
+ statement_effect: Option<&dyn Fn(BasicBlock, &mut A::Domain)>,
+ ) -> TerminatorEdges<'mir, 'tcx>
+ where
A: Analysis<'tcx>,
{
- for (statement_index, statement) in block_data.statements.iter().enumerate() {
- let location = Location { block, statement_index };
- analysis.apply_before_statement_effect(state, statement, location);
- analysis.apply_statement_effect(state, statement, location);
+ if let Some(statement_effect) = statement_effect {
+ statement_effect(block, state)
+ } else {
+ for (statement_index, statement) in block_data.statements.iter().enumerate() {
+ let location = Location { block, statement_index };
+ analysis.apply_before_statement_effect(state, statement, location);
+ analysis.apply_statement_effect(state, statement, location);
+ }
}
let terminator = block_data.terminator();
let location = Location { block, statement_index: block_data.statements.len() };
analysis.apply_before_terminator_effect(state, terminator, location);
- analysis.apply_terminator_effect(state, terminator, location);
+ analysis.apply_terminator_effect(state, terminator, location)
}
- fn gen_kill_effects_in_block<'tcx, A>(
+ fn gen_kill_statement_effects_in_block<'tcx, A>(
analysis: &mut A,
trans: &mut GenKillSet<A::Idx>,
block: BasicBlock,
@@ -351,11 +363,6 @@ impl Direction for Forward {
analysis.before_statement_effect(trans, statement, location);
analysis.statement_effect(trans, statement, location);
}
-
- let terminator = block_data.terminator();
- let location = Location { block, statement_index: block_data.statements.len() };
- analysis.before_terminator_effect(trans, terminator, location);
- analysis.terminator_effect(trans, terminator, location);
}
fn apply_effects_in_range<'tcx, A>(
@@ -464,86 +471,32 @@ impl Direction for Forward {
fn join_state_into_successors_of<'tcx, A>(
analysis: &mut A,
- _tcx: TyCtxt<'tcx>,
_body: &mir::Body<'tcx>,
exit_state: &mut A::Domain,
- (bb, bb_data): (BasicBlock, &'_ mir::BasicBlockData<'tcx>),
+ bb: BasicBlock,
+ edges: TerminatorEdges<'_, 'tcx>,
mut propagate: impl FnMut(BasicBlock, &A::Domain),
) where
A: Analysis<'tcx>,
{
- use mir::TerminatorKind::*;
- match bb_data.terminator().kind {
- Return | Resume | Terminate | GeneratorDrop | Unreachable => {}
-
- Goto { target } => propagate(target, exit_state),
-
- Assert { target, unwind, expected: _, msg: _, cond: _ }
- | Drop { target, unwind, place: _, replace: _ }
- | FalseUnwind { real_target: target, unwind } => {
- if let UnwindAction::Cleanup(unwind) = unwind {
- propagate(unwind, exit_state);
- }
-
+ match edges {
+ TerminatorEdges::None => {}
+ TerminatorEdges::Single(target) => propagate(target, exit_state),
+ TerminatorEdges::Double(target, unwind) => {
propagate(target, exit_state);
+ propagate(unwind, exit_state);
}
-
- FalseEdge { real_target, imaginary_target } => {
- propagate(real_target, exit_state);
- propagate(imaginary_target, exit_state);
- }
-
- Yield { resume: target, drop, resume_arg, value: _ } => {
- if let Some(drop) = drop {
- propagate(drop, exit_state);
- }
-
- analysis.apply_yield_resume_effect(exit_state, target, resume_arg);
- propagate(target, exit_state);
- }
-
- Call { unwind, destination, target, func: _, args: _, call_source: _, fn_span: _ } => {
- if let UnwindAction::Cleanup(unwind) = unwind {
- propagate(unwind, exit_state);
- }
-
- if let Some(target) = target {
- // N.B.: This must be done *last*, otherwise the unwind path will see the call
- // return effect.
- analysis.apply_call_return_effect(
- exit_state,
- bb,
- CallReturnPlaces::Call(destination),
- );
- propagate(target, exit_state);
- }
- }
-
- InlineAsm {
- template: _,
- ref operands,
- options: _,
- line_spans: _,
- destination,
- unwind,
- } => {
+ TerminatorEdges::AssignOnReturn { return_, unwind, place } => {
+ // This must be done *first*, otherwise the unwind path will see the assignments.
if let UnwindAction::Cleanup(unwind) = unwind {
propagate(unwind, exit_state);
}
-
- if let Some(target) = destination {
- // N.B.: This must be done *last*, otherwise the unwind path will see the call
- // return effect.
- analysis.apply_call_return_effect(
- exit_state,
- bb,
- CallReturnPlaces::InlineAsm(operands),
- );
- propagate(target, exit_state);
+ if let Some(return_) = return_ {
+ analysis.apply_call_return_effect(exit_state, bb, place);
+ propagate(return_, exit_state);
}
}
-
- SwitchInt { ref targets, ref discr } => {
+ TerminatorEdges::SwitchInt { targets, discr } => {
let mut applier = ForwardSwitchIntEdgeEffectsApplier {
exit_state,
targets,
diff --git a/compiler/rustc_mir_dataflow/src/framework/engine.rs b/compiler/rustc_mir_dataflow/src/framework/engine.rs
index c755d7588..a29962d77 100644
--- a/compiler/rustc_mir_dataflow/src/framework/engine.rs
+++ b/compiler/rustc_mir_dataflow/src/framework/engine.rs
@@ -144,7 +144,7 @@ where
// gen/kill problems on cyclic CFGs. This is not ideal, but it doesn't seem to degrade
// performance in practice. I've tried a few ways to avoid this, but they have downsides. See
// the message for the commit that added this FIXME for more information.
- apply_trans_for_block: Option<Box<dyn Fn(BasicBlock, &mut A::Domain)>>,
+ apply_statement_trans_for_block: Option<Box<dyn Fn(BasicBlock, &mut A::Domain)>>,
}
impl<'a, 'tcx, A, D, T> Engine<'a, 'tcx, A>
@@ -165,12 +165,17 @@ where
// Otherwise, compute and store the cumulative transfer function for each block.
- let identity = GenKillSet::identity(analysis.bottom_value(body).domain_size());
+ let identity = GenKillSet::identity(analysis.domain_size(body));
let mut trans_for_block = IndexVec::from_elem(identity, &body.basic_blocks);
for (block, block_data) in body.basic_blocks.iter_enumerated() {
let trans = &mut trans_for_block[block];
- A::Direction::gen_kill_effects_in_block(&mut analysis, trans, block, block_data);
+ A::Direction::gen_kill_statement_effects_in_block(
+ &mut analysis,
+ trans,
+ block,
+ block_data,
+ );
}
let apply_trans = Box::new(move |bb: BasicBlock, state: &mut A::Domain| {
@@ -199,17 +204,18 @@ where
tcx: TyCtxt<'tcx>,
body: &'a mir::Body<'tcx>,
analysis: A,
- apply_trans_for_block: Option<Box<dyn Fn(BasicBlock, &mut A::Domain)>>,
+ apply_statement_trans_for_block: Option<Box<dyn Fn(BasicBlock, &mut A::Domain)>>,
) -> Self {
- let bottom_value = analysis.bottom_value(body);
- let mut entry_sets = IndexVec::from_elem(bottom_value.clone(), &body.basic_blocks);
+ let mut entry_sets =
+ IndexVec::from_fn_n(|_| analysis.bottom_value(body), body.basic_blocks.len());
analysis.initialize_start_block(body, &mut entry_sets[mir::START_BLOCK]);
- if A::Direction::IS_BACKWARD && entry_sets[mir::START_BLOCK] != bottom_value {
+ if A::Direction::IS_BACKWARD && entry_sets[mir::START_BLOCK] != analysis.bottom_value(body)
+ {
bug!("`initialize_start_block` is not yet supported for backward dataflow analyses");
}
- Engine { analysis, tcx, body, pass_name: None, entry_sets, apply_trans_for_block }
+ Engine { analysis, tcx, body, pass_name: None, entry_sets, apply_statement_trans_for_block }
}
/// Adds an identifier to the graphviz output for this particular run of a dataflow analysis.
@@ -231,7 +237,7 @@ where
body,
mut entry_sets,
tcx,
- apply_trans_for_block,
+ apply_statement_trans_for_block,
pass_name,
..
} = self;
@@ -263,19 +269,20 @@ where
state.clone_from(&entry_sets[bb]);
// Apply the block transfer function, using the cached one if it exists.
- match &apply_trans_for_block {
- Some(apply) => apply(bb, &mut state),
- None => {
- A::Direction::apply_effects_in_block(&mut analysis, &mut state, bb, bb_data)
- }
- }
+ let edges = A::Direction::apply_effects_in_block(
+ &mut analysis,
+ &mut state,
+ bb,
+ bb_data,
+ apply_statement_trans_for_block.as_deref(),
+ );
A::Direction::join_state_into_successors_of(
&mut analysis,
- tcx,
body,
&mut state,
- (bb, bb_data),
+ bb,
+ edges,
|target: BasicBlock, state: &A::Domain| {
let set_changed = entry_sets[target].join(state);
if set_changed {
diff --git a/compiler/rustc_mir_dataflow/src/framework/fmt.rs b/compiler/rustc_mir_dataflow/src/framework/fmt.rs
index 6a256fae3..e3a66bd95 100644
--- a/compiler/rustc_mir_dataflow/src/framework/fmt.rs
+++ b/compiler/rustc_mir_dataflow/src/framework/fmt.rs
@@ -1,6 +1,7 @@
//! Custom formatting traits used when outputting Graphviz diagrams with the results of a dataflow
//! analysis.
+use super::lattice::MaybeReachable;
use rustc_index::bit_set::{BitSet, ChunkedBitSet, HybridBitSet};
use rustc_index::Idx;
use std::fmt;
@@ -124,6 +125,37 @@ where
}
}
+impl<S, C> DebugWithContext<C> for MaybeReachable<S>
+where
+ S: DebugWithContext<C>,
+{
+ fn fmt_with(&self, ctxt: &C, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ match self {
+ MaybeReachable::Unreachable => {
+ write!(f, "unreachable")
+ }
+ MaybeReachable::Reachable(set) => set.fmt_with(ctxt, f),
+ }
+ }
+
+ fn fmt_diff_with(&self, old: &Self, ctxt: &C, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ match (self, old) {
+ (MaybeReachable::Unreachable, MaybeReachable::Unreachable) => Ok(()),
+ (MaybeReachable::Unreachable, MaybeReachable::Reachable(set)) => {
+ write!(f, "\u{001f}+")?;
+ set.fmt_with(ctxt, f)
+ }
+ (MaybeReachable::Reachable(set), MaybeReachable::Unreachable) => {
+ write!(f, "\u{001f}-")?;
+ set.fmt_with(ctxt, f)
+ }
+ (MaybeReachable::Reachable(this), MaybeReachable::Reachable(old)) => {
+ this.fmt_diff_with(old, ctxt, f)
+ }
+ }
+ }
+}
+
fn fmt_diff<T, C>(
inserted: &HybridBitSet<T>,
removed: &HybridBitSet<T>,
diff --git a/compiler/rustc_mir_dataflow/src/framework/graphviz.rs b/compiler/rustc_mir_dataflow/src/framework/graphviz.rs
index e331533c3..1421d9b45 100644
--- a/compiler/rustc_mir_dataflow/src/framework/graphviz.rs
+++ b/compiler/rustc_mir_dataflow/src/framework/graphviz.rs
@@ -269,7 +269,11 @@ where
self.write_row(w, "", "(on yield resume)", |this, w, fmt| {
let state_on_generator_drop = this.results.get().clone();
this.results.apply_custom_effect(|analysis, state| {
- analysis.apply_yield_resume_effect(state, resume, resume_arg);
+ analysis.apply_call_return_effect(
+ state,
+ resume,
+ CallReturnPlaces::Yield(resume_arg),
+ );
});
write!(
diff --git a/compiler/rustc_mir_dataflow/src/framework/lattice.rs b/compiler/rustc_mir_dataflow/src/framework/lattice.rs
index 3952f44ad..3b89598d2 100644
--- a/compiler/rustc_mir_dataflow/src/framework/lattice.rs
+++ b/compiler/rustc_mir_dataflow/src/framework/lattice.rs
@@ -187,10 +187,6 @@ impl<T: Idx> MeetSemiLattice for ChunkedBitSet<T> {
pub struct Dual<T>(pub T);
impl<T: Idx> BitSetExt<T> for Dual<BitSet<T>> {
- fn domain_size(&self) -> usize {
- self.0.domain_size()
- }
-
fn contains(&self, elem: T) -> bool {
self.0.contains(elem)
}
@@ -276,3 +272,93 @@ impl<T> HasBottom for FlatSet<T> {
impl<T> HasTop for FlatSet<T> {
const TOP: Self = Self::Top;
}
+
+/// Extend a lattice with a bottom value to represent an unreachable execution.
+///
+/// The only useful action on an unreachable state is joining it with a reachable one to make it
+/// reachable. All other actions, gen/kill for instance, are no-ops.
+#[derive(PartialEq, Eq, Debug)]
+pub enum MaybeReachable<T> {
+ Unreachable,
+ Reachable(T),
+}
+
+impl<T> MaybeReachable<T> {
+ pub fn is_reachable(&self) -> bool {
+ matches!(self, MaybeReachable::Reachable(_))
+ }
+}
+
+impl<T> HasBottom for MaybeReachable<T> {
+ const BOTTOM: Self = MaybeReachable::Unreachable;
+}
+
+impl<T: HasTop> HasTop for MaybeReachable<T> {
+ const TOP: Self = MaybeReachable::Reachable(T::TOP);
+}
+
+impl<S> MaybeReachable<S> {
+ /// Return whether the current state contains the given element. If the state is unreachable,
+ /// it does no contain anything.
+ pub fn contains<T>(&self, elem: T) -> bool
+ where
+ S: BitSetExt<T>,
+ {
+ match self {
+ MaybeReachable::Unreachable => false,
+ MaybeReachable::Reachable(set) => set.contains(elem),
+ }
+ }
+}
+
+impl<T, S: BitSetExt<T>> BitSetExt<T> for MaybeReachable<S> {
+ fn contains(&self, elem: T) -> bool {
+ self.contains(elem)
+ }
+
+ fn union(&mut self, other: &HybridBitSet<T>) {
+ match self {
+ MaybeReachable::Unreachable => {}
+ MaybeReachable::Reachable(set) => set.union(other),
+ }
+ }
+
+ fn subtract(&mut self, other: &HybridBitSet<T>) {
+ match self {
+ MaybeReachable::Unreachable => {}
+ MaybeReachable::Reachable(set) => set.subtract(other),
+ }
+ }
+}
+
+impl<V: Clone> Clone for MaybeReachable<V> {
+ fn clone(&self) -> Self {
+ match self {
+ MaybeReachable::Reachable(x) => MaybeReachable::Reachable(x.clone()),
+ MaybeReachable::Unreachable => MaybeReachable::Unreachable,
+ }
+ }
+
+ fn clone_from(&mut self, source: &Self) {
+ match (&mut *self, source) {
+ (MaybeReachable::Reachable(x), MaybeReachable::Reachable(y)) => {
+ x.clone_from(&y);
+ }
+ _ => *self = source.clone(),
+ }
+ }
+}
+
+impl<T: JoinSemiLattice + Clone> JoinSemiLattice for MaybeReachable<T> {
+ fn join(&mut self, other: &Self) -> bool {
+ // Unreachable acts as a bottom.
+ match (&mut *self, &other) {
+ (_, MaybeReachable::Unreachable) => false,
+ (MaybeReachable::Unreachable, _) => {
+ *self = other.clone();
+ true
+ }
+ (MaybeReachable::Reachable(this), MaybeReachable::Reachable(other)) => this.join(other),
+ }
+ }
+}
diff --git a/compiler/rustc_mir_dataflow/src/framework/mod.rs b/compiler/rustc_mir_dataflow/src/framework/mod.rs
index 58df9b9a7..ce30c642f 100644
--- a/compiler/rustc_mir_dataflow/src/framework/mod.rs
+++ b/compiler/rustc_mir_dataflow/src/framework/mod.rs
@@ -34,7 +34,7 @@ use std::cmp::Ordering;
use rustc_index::bit_set::{BitSet, ChunkedBitSet, HybridBitSet};
use rustc_index::Idx;
-use rustc_middle::mir::{self, BasicBlock, Location};
+use rustc_middle::mir::{self, BasicBlock, CallReturnPlaces, Location, TerminatorEdges};
use rustc_middle::ty::TyCtxt;
mod cursor;
@@ -48,23 +48,18 @@ mod visitor;
pub use self::cursor::{AnalysisResults, ResultsClonedCursor, ResultsCursor, ResultsRefCursor};
pub use self::direction::{Backward, Direction, Forward};
pub use self::engine::{Engine, EntrySets, Results, ResultsCloned};
-pub use self::lattice::{JoinSemiLattice, MeetSemiLattice};
+pub use self::lattice::{JoinSemiLattice, MaybeReachable, MeetSemiLattice};
pub use self::visitor::{visit_results, ResultsVisitable, ResultsVisitor};
/// Analysis domains are all bitsets of various kinds. This trait holds
/// operations needed by all of them.
pub trait BitSetExt<T> {
- fn domain_size(&self) -> usize;
fn contains(&self, elem: T) -> bool;
fn union(&mut self, other: &HybridBitSet<T>);
fn subtract(&mut self, other: &HybridBitSet<T>);
}
impl<T: Idx> BitSetExt<T> for BitSet<T> {
- fn domain_size(&self) -> usize {
- self.domain_size()
- }
-
fn contains(&self, elem: T) -> bool {
self.contains(elem)
}
@@ -79,10 +74,6 @@ impl<T: Idx> BitSetExt<T> for BitSet<T> {
}
impl<T: Idx> BitSetExt<T> for ChunkedBitSet<T> {
- fn domain_size(&self) -> usize {
- self.domain_size()
- }
-
fn contains(&self, elem: T) -> bool {
self.contains(elem)
}
@@ -172,12 +163,12 @@ pub trait Analysis<'tcx>: AnalysisDomain<'tcx> {
/// in this function. That should go in `apply_call_return_effect`. For example, in the
/// `InitializedPlaces` analyses, the return place for a function call is not marked as
/// initialized here.
- fn apply_terminator_effect(
+ fn apply_terminator_effect<'mir>(
&mut self,
state: &mut Self::Domain,
- terminator: &mir::Terminator<'tcx>,
+ terminator: &'mir mir::Terminator<'tcx>,
location: Location,
- );
+ ) -> TerminatorEdges<'mir, 'tcx>;
/// Updates the current dataflow state with an effect that occurs immediately *before* the
/// given terminator.
@@ -207,20 +198,6 @@ pub trait Analysis<'tcx>: AnalysisDomain<'tcx> {
return_places: CallReturnPlaces<'_, 'tcx>,
);
- /// Updates the current dataflow state with the effect of resuming from a `Yield` terminator.
- ///
- /// This is similar to `apply_call_return_effect` in that it only takes place after the
- /// generator is resumed, not when it is dropped.
- ///
- /// By default, no effects happen.
- fn apply_yield_resume_effect(
- &mut self,
- _state: &mut Self::Domain,
- _resume_block: BasicBlock,
- _resume_place: mir::Place<'tcx>,
- ) {
- }
-
/// Updates the current dataflow state with the effect of taking a particular branch in a
/// `SwitchInt` terminator.
///
@@ -295,6 +272,8 @@ where
pub trait GenKillAnalysis<'tcx>: Analysis<'tcx> {
type Idx: Idx;
+ fn domain_size(&self, body: &mir::Body<'tcx>) -> usize;
+
/// See `Analysis::apply_statement_effect`.
fn statement_effect(
&mut self,
@@ -313,12 +292,12 @@ pub trait GenKillAnalysis<'tcx>: Analysis<'tcx> {
}
/// See `Analysis::apply_terminator_effect`.
- fn terminator_effect(
+ fn terminator_effect<'mir>(
&mut self,
- trans: &mut impl GenKill<Self::Idx>,
- terminator: &mir::Terminator<'tcx>,
+ trans: &mut Self::Domain,
+ terminator: &'mir mir::Terminator<'tcx>,
location: Location,
- );
+ ) -> TerminatorEdges<'mir, 'tcx>;
/// See `Analysis::apply_before_terminator_effect`.
fn before_terminator_effect(
@@ -339,15 +318,6 @@ pub trait GenKillAnalysis<'tcx>: Analysis<'tcx> {
return_places: CallReturnPlaces<'_, 'tcx>,
);
- /// See `Analysis::apply_yield_resume_effect`.
- fn yield_resume_effect(
- &mut self,
- _trans: &mut impl GenKill<Self::Idx>,
- _resume_block: BasicBlock,
- _resume_place: mir::Place<'tcx>,
- ) {
- }
-
/// See `Analysis::apply_switch_int_edge_effects`.
fn switch_int_edge_effects<G: GenKill<Self::Idx>>(
&mut self,
@@ -381,13 +351,13 @@ where
self.before_statement_effect(state, statement, location);
}
- fn apply_terminator_effect(
+ fn apply_terminator_effect<'mir>(
&mut self,
state: &mut A::Domain,
- terminator: &mir::Terminator<'tcx>,
+ terminator: &'mir mir::Terminator<'tcx>,
location: Location,
- ) {
- self.terminator_effect(state, terminator, location);
+ ) -> TerminatorEdges<'mir, 'tcx> {
+ self.terminator_effect(state, terminator, location)
}
fn apply_before_terminator_effect(
@@ -410,15 +380,6 @@ where
self.call_return_effect(state, block, return_places);
}
- fn apply_yield_resume_effect(
- &mut self,
- state: &mut A::Domain,
- resume_block: BasicBlock,
- resume_place: mir::Place<'tcx>,
- ) {
- self.yield_resume_effect(state, resume_block, resume_place);
- }
-
fn apply_switch_int_edge_effects(
&mut self,
block: BasicBlock,
@@ -531,6 +492,24 @@ impl<T: Idx> GenKill<T> for ChunkedBitSet<T> {
}
}
+impl<T, S: GenKill<T>> GenKill<T> for MaybeReachable<S> {
+ fn gen(&mut self, elem: T) {
+ match self {
+ // If the state is not reachable, adding an element does nothing.
+ MaybeReachable::Unreachable => {}
+ MaybeReachable::Reachable(set) => set.gen(elem),
+ }
+ }
+
+ fn kill(&mut self, elem: T) {
+ match self {
+ // If the state is not reachable, killing an element does nothing.
+ MaybeReachable::Unreachable => {}
+ MaybeReachable::Reachable(set) => set.kill(elem),
+ }
+ }
+}
+
impl<T: Idx> GenKill<T> for lattice::Dual<BitSet<T>> {
fn gen(&mut self, elem: T) {
self.0.insert(elem);
@@ -612,29 +591,5 @@ pub trait SwitchIntEdgeEffects<D> {
fn apply(&mut self, apply_edge_effect: impl FnMut(&mut D, SwitchIntTarget));
}
-/// List of places that are written to after a successful (non-unwind) return
-/// from a `Call` or `InlineAsm`.
-pub enum CallReturnPlaces<'a, 'tcx> {
- Call(mir::Place<'tcx>),
- InlineAsm(&'a [mir::InlineAsmOperand<'tcx>]),
-}
-
-impl<'tcx> CallReturnPlaces<'_, 'tcx> {
- pub fn for_each(&self, mut f: impl FnMut(mir::Place<'tcx>)) {
- match *self {
- Self::Call(place) => f(place),
- Self::InlineAsm(operands) => {
- for op in operands {
- match *op {
- mir::InlineAsmOperand::Out { place: Some(place), .. }
- | mir::InlineAsmOperand::InOut { out_place: Some(place), .. } => f(place),
- _ => {}
- }
- }
- }
- }
- }
-}
-
#[cfg(test)]
mod tests;
diff --git a/compiler/rustc_mir_dataflow/src/framework/tests.rs b/compiler/rustc_mir_dataflow/src/framework/tests.rs
index cb0ec144e..9cce5b26c 100644
--- a/compiler/rustc_mir_dataflow/src/framework/tests.rs
+++ b/compiler/rustc_mir_dataflow/src/framework/tests.rs
@@ -198,14 +198,15 @@ impl<'tcx, D: Direction> Analysis<'tcx> for MockAnalysis<'tcx, D> {
assert!(state.insert(idx));
}
- fn apply_terminator_effect(
+ fn apply_terminator_effect<'mir>(
&mut self,
state: &mut Self::Domain,
- _terminator: &mir::Terminator<'tcx>,
+ terminator: &'mir mir::Terminator<'tcx>,
location: Location,
- ) {
+ ) -> TerminatorEdges<'mir, 'tcx> {
let idx = self.effect(Effect::Primary.at_index(location.statement_index));
assert!(state.insert(idx));
+ terminator.edges()
}
fn apply_before_terminator_effect(
diff --git a/compiler/rustc_mir_dataflow/src/impls/borrowed_locals.rs b/compiler/rustc_mir_dataflow/src/impls/borrowed_locals.rs
index b88ed32b6..8d7b50796 100644
--- a/compiler/rustc_mir_dataflow/src/impls/borrowed_locals.rs
+++ b/compiler/rustc_mir_dataflow/src/impls/borrowed_locals.rs
@@ -1,9 +1,9 @@
-use super::*;
-
-use crate::{AnalysisDomain, CallReturnPlaces, GenKill, GenKillAnalysis};
+use rustc_index::bit_set::BitSet;
use rustc_middle::mir::visit::Visitor;
use rustc_middle::mir::*;
+use crate::{AnalysisDomain, GenKill, GenKillAnalysis};
+
/// A dataflow analysis that tracks whether a pointer or reference could possibly exist that points
/// to a given local.
///
@@ -14,7 +14,7 @@ use rustc_middle::mir::*;
pub struct MaybeBorrowedLocals;
impl MaybeBorrowedLocals {
- fn transfer_function<'a, T>(&'a self, trans: &'a mut T) -> TransferFunction<'a, T> {
+ pub(super) fn transfer_function<'a, T>(&'a self, trans: &'a mut T) -> TransferFunction<'a, T> {
TransferFunction { trans }
}
}
@@ -23,12 +23,12 @@ impl<'tcx> AnalysisDomain<'tcx> for MaybeBorrowedLocals {
type Domain = BitSet<Local>;
const NAME: &'static str = "maybe_borrowed_locals";
- fn bottom_value(&self, body: &mir::Body<'tcx>) -> Self::Domain {
+ fn bottom_value(&self, body: &Body<'tcx>) -> Self::Domain {
// bottom = unborrowed
BitSet::new_empty(body.local_decls().len())
}
- fn initialize_start_block(&self, _: &mir::Body<'tcx>, _: &mut Self::Domain) {
+ fn initialize_start_block(&self, _: &Body<'tcx>, _: &mut Self::Domain) {
// No locals are aliased on function entry
}
}
@@ -36,35 +36,40 @@ impl<'tcx> AnalysisDomain<'tcx> for MaybeBorrowedLocals {
impl<'tcx> GenKillAnalysis<'tcx> for MaybeBorrowedLocals {
type Idx = Local;
+ fn domain_size(&self, body: &Body<'tcx>) -> usize {
+ body.local_decls.len()
+ }
+
fn statement_effect(
&mut self,
trans: &mut impl GenKill<Self::Idx>,
- statement: &mir::Statement<'tcx>,
+ statement: &Statement<'tcx>,
location: Location,
) {
self.transfer_function(trans).visit_statement(statement, location);
}
- fn terminator_effect(
+ fn terminator_effect<'mir>(
&mut self,
- trans: &mut impl GenKill<Self::Idx>,
- terminator: &mir::Terminator<'tcx>,
+ trans: &mut Self::Domain,
+ terminator: &'mir Terminator<'tcx>,
location: Location,
- ) {
+ ) -> TerminatorEdges<'mir, 'tcx> {
self.transfer_function(trans).visit_terminator(terminator, location);
+ terminator.edges()
}
fn call_return_effect(
&mut self,
_trans: &mut impl GenKill<Self::Idx>,
- _block: mir::BasicBlock,
+ _block: BasicBlock,
_return_places: CallReturnPlaces<'_, 'tcx>,
) {
}
}
/// A `Visitor` that defines the transfer function for `MaybeBorrowedLocals`.
-struct TransferFunction<'a, T> {
+pub(super) struct TransferFunction<'a, T> {
trans: &'a mut T,
}
@@ -82,37 +87,37 @@ where
}
}
- fn visit_rvalue(&mut self, rvalue: &mir::Rvalue<'tcx>, location: Location) {
+ fn visit_rvalue(&mut self, rvalue: &Rvalue<'tcx>, location: Location) {
self.super_rvalue(rvalue, location);
match rvalue {
- mir::Rvalue::AddressOf(_, borrowed_place) | mir::Rvalue::Ref(_, _, borrowed_place) => {
+ Rvalue::AddressOf(_, borrowed_place) | Rvalue::Ref(_, _, borrowed_place) => {
if !borrowed_place.is_indirect() {
self.trans.gen(borrowed_place.local);
}
}
- mir::Rvalue::Cast(..)
- | mir::Rvalue::ShallowInitBox(..)
- | mir::Rvalue::Use(..)
- | mir::Rvalue::ThreadLocalRef(..)
- | mir::Rvalue::Repeat(..)
- | mir::Rvalue::Len(..)
- | mir::Rvalue::BinaryOp(..)
- | mir::Rvalue::CheckedBinaryOp(..)
- | mir::Rvalue::NullaryOp(..)
- | mir::Rvalue::UnaryOp(..)
- | mir::Rvalue::Discriminant(..)
- | mir::Rvalue::Aggregate(..)
- | mir::Rvalue::CopyForDeref(..) => {}
+ Rvalue::Cast(..)
+ | Rvalue::ShallowInitBox(..)
+ | Rvalue::Use(..)
+ | Rvalue::ThreadLocalRef(..)
+ | Rvalue::Repeat(..)
+ | Rvalue::Len(..)
+ | Rvalue::BinaryOp(..)
+ | Rvalue::CheckedBinaryOp(..)
+ | Rvalue::NullaryOp(..)
+ | Rvalue::UnaryOp(..)
+ | Rvalue::Discriminant(..)
+ | Rvalue::Aggregate(..)
+ | Rvalue::CopyForDeref(..) => {}
}
}
- fn visit_terminator(&mut self, terminator: &mir::Terminator<'tcx>, location: Location) {
+ fn visit_terminator(&mut self, terminator: &Terminator<'tcx>, location: Location) {
self.super_terminator(terminator, location);
match terminator.kind {
- mir::TerminatorKind::Drop { place: dropped_place, .. } => {
+ TerminatorKind::Drop { place: dropped_place, .. } => {
// Drop terminators may call custom drop glue (`Drop::drop`), which takes `&mut
// self` as a parameter. In the general case, a drop impl could launder that
// reference into the surrounding environment through a raw pointer, thus creating
diff --git a/compiler/rustc_mir_dataflow/src/impls/initialized.rs b/compiler/rustc_mir_dataflow/src/impls/initialized.rs
new file mode 100644
index 000000000..e6d383d62
--- /dev/null
+++ b/compiler/rustc_mir_dataflow/src/impls/initialized.rs
@@ -0,0 +1,778 @@
+use rustc_index::bit_set::{BitSet, ChunkedBitSet};
+use rustc_index::Idx;
+use rustc_middle::mir::{self, Body, CallReturnPlaces, Location, TerminatorEdges};
+use rustc_middle::ty::{self, TyCtxt};
+
+use crate::drop_flag_effects_for_function_entry;
+use crate::drop_flag_effects_for_location;
+use crate::elaborate_drops::DropFlagState;
+use crate::framework::SwitchIntEdgeEffects;
+use crate::move_paths::{HasMoveData, InitIndex, InitKind, LookupResult, MoveData, MovePathIndex};
+use crate::on_lookup_result_bits;
+use crate::MoveDataParamEnv;
+use crate::{drop_flag_effects, on_all_children_bits, on_all_drop_children_bits};
+use crate::{lattice, AnalysisDomain, GenKill, GenKillAnalysis, MaybeReachable};
+
+/// `MaybeInitializedPlaces` tracks all places that might be
+/// initialized upon reaching a particular point in the control flow
+/// for a function.
+///
+/// For example, in code like the following, we have corresponding
+/// dataflow information shown in the right-hand comments.
+///
+/// ```rust
+/// struct S;
+/// fn foo(pred: bool) { // maybe-init:
+/// // {}
+/// let a = S; let mut b = S; let c; let d; // {a, b}
+///
+/// if pred {
+/// drop(a); // { b}
+/// b = S; // { b}
+///
+/// } else {
+/// drop(b); // {a}
+/// d = S; // {a, d}
+///
+/// } // {a, b, d}
+///
+/// c = S; // {a, b, c, d}
+/// }
+/// ```
+///
+/// To determine whether a place *must* be initialized at a
+/// particular control-flow point, one can take the set-difference
+/// between this data and the data from `MaybeUninitializedPlaces` at the
+/// corresponding control-flow point.
+///
+/// Similarly, at a given `drop` statement, the set-intersection
+/// between this data and `MaybeUninitializedPlaces` yields the set of
+/// places that would require a dynamic drop-flag at that statement.
+pub struct MaybeInitializedPlaces<'a, 'tcx> {
+ tcx: TyCtxt<'tcx>,
+ body: &'a Body<'tcx>,
+ mdpe: &'a MoveDataParamEnv<'tcx>,
+ skip_unreachable_unwind: bool,
+}
+
+impl<'a, 'tcx> MaybeInitializedPlaces<'a, 'tcx> {
+ pub fn new(tcx: TyCtxt<'tcx>, body: &'a Body<'tcx>, mdpe: &'a MoveDataParamEnv<'tcx>) -> Self {
+ MaybeInitializedPlaces { tcx, body, mdpe, skip_unreachable_unwind: false }
+ }
+
+ pub fn skipping_unreachable_unwind(mut self) -> Self {
+ self.skip_unreachable_unwind = true;
+ self
+ }
+
+ pub fn is_unwind_dead(
+ &self,
+ place: mir::Place<'tcx>,
+ state: &MaybeReachable<ChunkedBitSet<MovePathIndex>>,
+ ) -> bool {
+ if let LookupResult::Exact(path) = self.move_data().rev_lookup.find(place.as_ref()) {
+ let mut maybe_live = false;
+ on_all_drop_children_bits(self.tcx, self.body, self.mdpe, path, |child| {
+ maybe_live |= state.contains(child);
+ });
+ !maybe_live
+ } else {
+ false
+ }
+ }
+}
+
+impl<'a, 'tcx> HasMoveData<'tcx> for MaybeInitializedPlaces<'a, 'tcx> {
+ fn move_data(&self) -> &MoveData<'tcx> {
+ &self.mdpe.move_data
+ }
+}
+
+/// `MaybeUninitializedPlaces` tracks all places that might be
+/// uninitialized upon reaching a particular point in the control flow
+/// for a function.
+///
+/// For example, in code like the following, we have corresponding
+/// dataflow information shown in the right-hand comments.
+///
+/// ```rust
+/// struct S;
+/// fn foo(pred: bool) { // maybe-uninit:
+/// // {a, b, c, d}
+/// let a = S; let mut b = S; let c; let d; // { c, d}
+///
+/// if pred {
+/// drop(a); // {a, c, d}
+/// b = S; // {a, c, d}
+///
+/// } else {
+/// drop(b); // { b, c, d}
+/// d = S; // { b, c }
+///
+/// } // {a, b, c, d}
+///
+/// c = S; // {a, b, d}
+/// }
+/// ```
+///
+/// To determine whether a place *must* be uninitialized at a
+/// particular control-flow point, one can take the set-difference
+/// between this data and the data from `MaybeInitializedPlaces` at the
+/// corresponding control-flow point.
+///
+/// Similarly, at a given `drop` statement, the set-intersection
+/// between this data and `MaybeInitializedPlaces` yields the set of
+/// places that would require a dynamic drop-flag at that statement.
+pub struct MaybeUninitializedPlaces<'a, 'tcx> {
+ tcx: TyCtxt<'tcx>,
+ body: &'a Body<'tcx>,
+ mdpe: &'a MoveDataParamEnv<'tcx>,
+
+ mark_inactive_variants_as_uninit: bool,
+ skip_unreachable_unwind: BitSet<mir::BasicBlock>,
+}
+
+impl<'a, 'tcx> MaybeUninitializedPlaces<'a, 'tcx> {
+ pub fn new(tcx: TyCtxt<'tcx>, body: &'a Body<'tcx>, mdpe: &'a MoveDataParamEnv<'tcx>) -> Self {
+ MaybeUninitializedPlaces {
+ tcx,
+ body,
+ mdpe,
+ mark_inactive_variants_as_uninit: false,
+ skip_unreachable_unwind: BitSet::new_empty(body.basic_blocks.len()),
+ }
+ }
+
+ /// Causes inactive enum variants to be marked as "maybe uninitialized" after a switch on an
+ /// enum discriminant.
+ ///
+ /// This is correct in a vacuum but is not the default because it causes problems in the borrow
+ /// checker, where this information gets propagated along `FakeEdge`s.
+ pub fn mark_inactive_variants_as_uninit(mut self) -> Self {
+ self.mark_inactive_variants_as_uninit = true;
+ self
+ }
+
+ pub fn skipping_unreachable_unwind(
+ mut self,
+ unreachable_unwind: BitSet<mir::BasicBlock>,
+ ) -> Self {
+ self.skip_unreachable_unwind = unreachable_unwind;
+ self
+ }
+}
+
+impl<'a, 'tcx> HasMoveData<'tcx> for MaybeUninitializedPlaces<'a, 'tcx> {
+ fn move_data(&self) -> &MoveData<'tcx> {
+ &self.mdpe.move_data
+ }
+}
+
+/// `DefinitelyInitializedPlaces` tracks all places that are definitely
+/// initialized upon reaching a particular point in the control flow
+/// for a function.
+///
+/// For example, in code like the following, we have corresponding
+/// dataflow information shown in the right-hand comments.
+///
+/// ```rust
+/// struct S;
+/// fn foo(pred: bool) { // definite-init:
+/// // { }
+/// let a = S; let mut b = S; let c; let d; // {a, b }
+///
+/// if pred {
+/// drop(a); // { b, }
+/// b = S; // { b, }
+///
+/// } else {
+/// drop(b); // {a, }
+/// d = S; // {a, d}
+///
+/// } // { }
+///
+/// c = S; // { c }
+/// }
+/// ```
+///
+/// To determine whether a place *may* be uninitialized at a
+/// particular control-flow point, one can take the set-complement
+/// of this data.
+///
+/// Similarly, at a given `drop` statement, the set-difference between
+/// this data and `MaybeInitializedPlaces` yields the set of places
+/// that would require a dynamic drop-flag at that statement.
+pub struct DefinitelyInitializedPlaces<'a, 'tcx> {
+ tcx: TyCtxt<'tcx>,
+ body: &'a Body<'tcx>,
+ mdpe: &'a MoveDataParamEnv<'tcx>,
+}
+
+impl<'a, 'tcx> DefinitelyInitializedPlaces<'a, 'tcx> {
+ pub fn new(tcx: TyCtxt<'tcx>, body: &'a Body<'tcx>, mdpe: &'a MoveDataParamEnv<'tcx>) -> Self {
+ DefinitelyInitializedPlaces { tcx, body, mdpe }
+ }
+}
+
+impl<'a, 'tcx> HasMoveData<'tcx> for DefinitelyInitializedPlaces<'a, 'tcx> {
+ fn move_data(&self) -> &MoveData<'tcx> {
+ &self.mdpe.move_data
+ }
+}
+
+/// `EverInitializedPlaces` tracks all places that might have ever been
+/// initialized upon reaching a particular point in the control flow
+/// for a function, without an intervening `StorageDead`.
+///
+/// This dataflow is used to determine if an immutable local variable may
+/// be assigned to.
+///
+/// For example, in code like the following, we have corresponding
+/// dataflow information shown in the right-hand comments.
+///
+/// ```rust
+/// struct S;
+/// fn foo(pred: bool) { // ever-init:
+/// // { }
+/// let a = S; let mut b = S; let c; let d; // {a, b }
+///
+/// if pred {
+/// drop(a); // {a, b, }
+/// b = S; // {a, b, }
+///
+/// } else {
+/// drop(b); // {a, b, }
+/// d = S; // {a, b, d }
+///
+/// } // {a, b, d }
+///
+/// c = S; // {a, b, c, d }
+/// }
+/// ```
+pub struct EverInitializedPlaces<'a, 'tcx> {
+ #[allow(dead_code)]
+ tcx: TyCtxt<'tcx>,
+ body: &'a Body<'tcx>,
+ mdpe: &'a MoveDataParamEnv<'tcx>,
+}
+
+impl<'a, 'tcx> EverInitializedPlaces<'a, 'tcx> {
+ pub fn new(tcx: TyCtxt<'tcx>, body: &'a Body<'tcx>, mdpe: &'a MoveDataParamEnv<'tcx>) -> Self {
+ EverInitializedPlaces { tcx, body, mdpe }
+ }
+}
+
+impl<'a, 'tcx> HasMoveData<'tcx> for EverInitializedPlaces<'a, 'tcx> {
+ fn move_data(&self) -> &MoveData<'tcx> {
+ &self.mdpe.move_data
+ }
+}
+
+impl<'a, 'tcx> MaybeInitializedPlaces<'a, 'tcx> {
+ fn update_bits(
+ trans: &mut impl GenKill<MovePathIndex>,
+ path: MovePathIndex,
+ state: DropFlagState,
+ ) {
+ match state {
+ DropFlagState::Absent => trans.kill(path),
+ DropFlagState::Present => trans.gen(path),
+ }
+ }
+}
+
+impl<'a, 'tcx> MaybeUninitializedPlaces<'a, 'tcx> {
+ fn update_bits(
+ trans: &mut impl GenKill<MovePathIndex>,
+ path: MovePathIndex,
+ state: DropFlagState,
+ ) {
+ match state {
+ DropFlagState::Absent => trans.gen(path),
+ DropFlagState::Present => trans.kill(path),
+ }
+ }
+}
+
+impl<'a, 'tcx> DefinitelyInitializedPlaces<'a, 'tcx> {
+ fn update_bits(
+ trans: &mut impl GenKill<MovePathIndex>,
+ path: MovePathIndex,
+ state: DropFlagState,
+ ) {
+ match state {
+ DropFlagState::Absent => trans.kill(path),
+ DropFlagState::Present => trans.gen(path),
+ }
+ }
+}
+
+impl<'tcx> AnalysisDomain<'tcx> for MaybeInitializedPlaces<'_, 'tcx> {
+ type Domain = MaybeReachable<ChunkedBitSet<MovePathIndex>>;
+ const NAME: &'static str = "maybe_init";
+
+ fn bottom_value(&self, _: &mir::Body<'tcx>) -> Self::Domain {
+ // bottom = uninitialized
+ MaybeReachable::Unreachable
+ }
+
+ fn initialize_start_block(&self, _: &mir::Body<'tcx>, state: &mut Self::Domain) {
+ *state =
+ MaybeReachable::Reachable(ChunkedBitSet::new_empty(self.move_data().move_paths.len()));
+ drop_flag_effects_for_function_entry(self.tcx, self.body, self.mdpe, |path, s| {
+ assert!(s == DropFlagState::Present);
+ state.gen(path);
+ });
+ }
+}
+
+impl<'tcx> GenKillAnalysis<'tcx> for MaybeInitializedPlaces<'_, 'tcx> {
+ type Idx = MovePathIndex;
+
+ fn domain_size(&self, _: &Body<'tcx>) -> usize {
+ self.move_data().move_paths.len()
+ }
+
+ fn statement_effect(
+ &mut self,
+ trans: &mut impl GenKill<Self::Idx>,
+ statement: &mir::Statement<'tcx>,
+ location: Location,
+ ) {
+ drop_flag_effects_for_location(self.tcx, self.body, self.mdpe, location, |path, s| {
+ Self::update_bits(trans, path, s)
+ });
+
+ // Mark all places as "maybe init" if they are mutably borrowed. See #90752.
+ if self.tcx.sess.opts.unstable_opts.precise_enum_drop_elaboration
+ && let Some((_, rvalue)) = statement.kind.as_assign()
+ && let mir::Rvalue::Ref(_, mir::BorrowKind::Mut { .. }, place)
+ // FIXME: Does `&raw const foo` allow mutation? See #90413.
+ | mir::Rvalue::AddressOf(_, place) = rvalue
+ && let LookupResult::Exact(mpi) = self.move_data().rev_lookup.find(place.as_ref())
+ {
+ on_all_children_bits(self.tcx, self.body, self.move_data(), mpi, |child| {
+ trans.gen(child);
+ })
+ }
+ }
+
+ fn terminator_effect<'mir>(
+ &mut self,
+ state: &mut Self::Domain,
+ terminator: &'mir mir::Terminator<'tcx>,
+ location: Location,
+ ) -> TerminatorEdges<'mir, 'tcx> {
+ let mut edges = terminator.edges();
+ if self.skip_unreachable_unwind
+ && let mir::TerminatorKind::Drop { target, unwind, place, replace: _ } = terminator.kind
+ && matches!(unwind, mir::UnwindAction::Cleanup(_))
+ && self.is_unwind_dead(place, state)
+ {
+ edges = TerminatorEdges::Single(target);
+ }
+ drop_flag_effects_for_location(self.tcx, self.body, self.mdpe, location, |path, s| {
+ Self::update_bits(state, path, s)
+ });
+ edges
+ }
+
+ fn call_return_effect(
+ &mut self,
+ trans: &mut impl GenKill<Self::Idx>,
+ _block: mir::BasicBlock,
+ return_places: CallReturnPlaces<'_, 'tcx>,
+ ) {
+ return_places.for_each(|place| {
+ // when a call returns successfully, that means we need to set
+ // the bits for that dest_place to 1 (initialized).
+ on_lookup_result_bits(
+ self.tcx,
+ self.body,
+ self.move_data(),
+ self.move_data().rev_lookup.find(place.as_ref()),
+ |mpi| {
+ trans.gen(mpi);
+ },
+ );
+ });
+ }
+
+ fn switch_int_edge_effects<G: GenKill<Self::Idx>>(
+ &mut self,
+ block: mir::BasicBlock,
+ discr: &mir::Operand<'tcx>,
+ edge_effects: &mut impl SwitchIntEdgeEffects<G>,
+ ) {
+ if !self.tcx.sess.opts.unstable_opts.precise_enum_drop_elaboration {
+ return;
+ }
+
+ let enum_ = discr.place().and_then(|discr| {
+ switch_on_enum_discriminant(self.tcx, &self.body, &self.body[block], discr)
+ });
+
+ let Some((enum_place, enum_def)) = enum_ else {
+ return;
+ };
+
+ let mut discriminants = enum_def.discriminants(self.tcx);
+ edge_effects.apply(|trans, edge| {
+ let Some(value) = edge.value else {
+ return;
+ };
+
+ // MIR building adds discriminants to the `values` array in the same order as they
+ // are yielded by `AdtDef::discriminants`. We rely on this to match each
+ // discriminant in `values` to its corresponding variant in linear time.
+ let (variant, _) = discriminants
+ .find(|&(_, discr)| discr.val == value)
+ .expect("Order of `AdtDef::discriminants` differed from `SwitchInt::values`");
+
+ // Kill all move paths that correspond to variants we know to be inactive along this
+ // particular outgoing edge of a `SwitchInt`.
+ drop_flag_effects::on_all_inactive_variants(
+ self.tcx,
+ self.body,
+ self.move_data(),
+ enum_place,
+ variant,
+ |mpi| trans.kill(mpi),
+ );
+ });
+ }
+}
+
+impl<'tcx> AnalysisDomain<'tcx> for MaybeUninitializedPlaces<'_, 'tcx> {
+ type Domain = ChunkedBitSet<MovePathIndex>;
+
+ const NAME: &'static str = "maybe_uninit";
+
+ fn bottom_value(&self, _: &mir::Body<'tcx>) -> Self::Domain {
+ // bottom = initialized (start_block_effect counters this at outset)
+ ChunkedBitSet::new_empty(self.move_data().move_paths.len())
+ }
+
+ // sets on_entry bits for Arg places
+ fn initialize_start_block(&self, _: &mir::Body<'tcx>, state: &mut Self::Domain) {
+ // set all bits to 1 (uninit) before gathering counter-evidence
+ state.insert_all();
+
+ drop_flag_effects_for_function_entry(self.tcx, self.body, self.mdpe, |path, s| {
+ assert!(s == DropFlagState::Present);
+ state.remove(path);
+ });
+ }
+}
+
+impl<'tcx> GenKillAnalysis<'tcx> for MaybeUninitializedPlaces<'_, 'tcx> {
+ type Idx = MovePathIndex;
+
+ fn domain_size(&self, _: &Body<'tcx>) -> usize {
+ self.move_data().move_paths.len()
+ }
+
+ fn statement_effect(
+ &mut self,
+ trans: &mut impl GenKill<Self::Idx>,
+ _statement: &mir::Statement<'tcx>,
+ location: Location,
+ ) {
+ drop_flag_effects_for_location(self.tcx, self.body, self.mdpe, location, |path, s| {
+ Self::update_bits(trans, path, s)
+ });
+
+ // Unlike in `MaybeInitializedPlaces` above, we don't need to change the state when a
+ // mutable borrow occurs. Places cannot become uninitialized through a mutable reference.
+ }
+
+ fn terminator_effect<'mir>(
+ &mut self,
+ trans: &mut Self::Domain,
+ terminator: &'mir mir::Terminator<'tcx>,
+ location: Location,
+ ) -> TerminatorEdges<'mir, 'tcx> {
+ drop_flag_effects_for_location(self.tcx, self.body, self.mdpe, location, |path, s| {
+ Self::update_bits(trans, path, s)
+ });
+ if self.skip_unreachable_unwind.contains(location.block) {
+ let mir::TerminatorKind::Drop { target, unwind, .. } = terminator.kind else { bug!() };
+ assert!(matches!(unwind, mir::UnwindAction::Cleanup(_)));
+ TerminatorEdges::Single(target)
+ } else {
+ terminator.edges()
+ }
+ }
+
+ fn call_return_effect(
+ &mut self,
+ trans: &mut impl GenKill<Self::Idx>,
+ _block: mir::BasicBlock,
+ return_places: CallReturnPlaces<'_, 'tcx>,
+ ) {
+ return_places.for_each(|place| {
+ // when a call returns successfully, that means we need to set
+ // the bits for that dest_place to 0 (initialized).
+ on_lookup_result_bits(
+ self.tcx,
+ self.body,
+ self.move_data(),
+ self.move_data().rev_lookup.find(place.as_ref()),
+ |mpi| {
+ trans.kill(mpi);
+ },
+ );
+ });
+ }
+
+ fn switch_int_edge_effects<G: GenKill<Self::Idx>>(
+ &mut self,
+ block: mir::BasicBlock,
+ discr: &mir::Operand<'tcx>,
+ edge_effects: &mut impl SwitchIntEdgeEffects<G>,
+ ) {
+ if !self.tcx.sess.opts.unstable_opts.precise_enum_drop_elaboration {
+ return;
+ }
+
+ if !self.mark_inactive_variants_as_uninit {
+ return;
+ }
+
+ let enum_ = discr.place().and_then(|discr| {
+ switch_on_enum_discriminant(self.tcx, &self.body, &self.body[block], discr)
+ });
+
+ let Some((enum_place, enum_def)) = enum_ else {
+ return;
+ };
+
+ let mut discriminants = enum_def.discriminants(self.tcx);
+ edge_effects.apply(|trans, edge| {
+ let Some(value) = edge.value else {
+ return;
+ };
+
+ // MIR building adds discriminants to the `values` array in the same order as they
+ // are yielded by `AdtDef::discriminants`. We rely on this to match each
+ // discriminant in `values` to its corresponding variant in linear time.
+ let (variant, _) = discriminants
+ .find(|&(_, discr)| discr.val == value)
+ .expect("Order of `AdtDef::discriminants` differed from `SwitchInt::values`");
+
+ // Mark all move paths that correspond to variants other than this one as maybe
+ // uninitialized (in reality, they are *definitely* uninitialized).
+ drop_flag_effects::on_all_inactive_variants(
+ self.tcx,
+ self.body,
+ self.move_data(),
+ enum_place,
+ variant,
+ |mpi| trans.gen(mpi),
+ );
+ });
+ }
+}
+
+impl<'a, 'tcx> AnalysisDomain<'tcx> for DefinitelyInitializedPlaces<'a, 'tcx> {
+ /// Use set intersection as the join operator.
+ type Domain = lattice::Dual<BitSet<MovePathIndex>>;
+
+ const NAME: &'static str = "definite_init";
+
+ fn bottom_value(&self, _: &mir::Body<'tcx>) -> Self::Domain {
+ // bottom = initialized (start_block_effect counters this at outset)
+ lattice::Dual(BitSet::new_filled(self.move_data().move_paths.len()))
+ }
+
+ // sets on_entry bits for Arg places
+ fn initialize_start_block(&self, _: &mir::Body<'tcx>, state: &mut Self::Domain) {
+ state.0.clear();
+
+ drop_flag_effects_for_function_entry(self.tcx, self.body, self.mdpe, |path, s| {
+ assert!(s == DropFlagState::Present);
+ state.0.insert(path);
+ });
+ }
+}
+
+impl<'tcx> GenKillAnalysis<'tcx> for DefinitelyInitializedPlaces<'_, 'tcx> {
+ type Idx = MovePathIndex;
+
+ fn domain_size(&self, _: &Body<'tcx>) -> usize {
+ self.move_data().move_paths.len()
+ }
+
+ fn statement_effect(
+ &mut self,
+ trans: &mut impl GenKill<Self::Idx>,
+ _statement: &mir::Statement<'tcx>,
+ location: Location,
+ ) {
+ drop_flag_effects_for_location(self.tcx, self.body, self.mdpe, location, |path, s| {
+ Self::update_bits(trans, path, s)
+ })
+ }
+
+ fn terminator_effect<'mir>(
+ &mut self,
+ trans: &mut Self::Domain,
+ terminator: &'mir mir::Terminator<'tcx>,
+ location: Location,
+ ) -> TerminatorEdges<'mir, 'tcx> {
+ drop_flag_effects_for_location(self.tcx, self.body, self.mdpe, location, |path, s| {
+ Self::update_bits(trans, path, s)
+ });
+ terminator.edges()
+ }
+
+ fn call_return_effect(
+ &mut self,
+ trans: &mut impl GenKill<Self::Idx>,
+ _block: mir::BasicBlock,
+ return_places: CallReturnPlaces<'_, 'tcx>,
+ ) {
+ return_places.for_each(|place| {
+ // when a call returns successfully, that means we need to set
+ // the bits for that dest_place to 1 (initialized).
+ on_lookup_result_bits(
+ self.tcx,
+ self.body,
+ self.move_data(),
+ self.move_data().rev_lookup.find(place.as_ref()),
+ |mpi| {
+ trans.gen(mpi);
+ },
+ );
+ });
+ }
+}
+
+impl<'tcx> AnalysisDomain<'tcx> for EverInitializedPlaces<'_, 'tcx> {
+ type Domain = ChunkedBitSet<InitIndex>;
+
+ const NAME: &'static str = "ever_init";
+
+ fn bottom_value(&self, _: &mir::Body<'tcx>) -> Self::Domain {
+ // bottom = no initialized variables by default
+ ChunkedBitSet::new_empty(self.move_data().inits.len())
+ }
+
+ fn initialize_start_block(&self, body: &mir::Body<'tcx>, state: &mut Self::Domain) {
+ for arg_init in 0..body.arg_count {
+ state.insert(InitIndex::new(arg_init));
+ }
+ }
+}
+
+impl<'tcx> GenKillAnalysis<'tcx> for EverInitializedPlaces<'_, 'tcx> {
+ type Idx = InitIndex;
+
+ fn domain_size(&self, _: &Body<'tcx>) -> usize {
+ self.move_data().inits.len()
+ }
+
+ #[instrument(skip(self, trans), level = "debug")]
+ fn statement_effect(
+ &mut self,
+ trans: &mut impl GenKill<Self::Idx>,
+ stmt: &mir::Statement<'tcx>,
+ location: Location,
+ ) {
+ let move_data = self.move_data();
+ let init_path_map = &move_data.init_path_map;
+ let init_loc_map = &move_data.init_loc_map;
+ let rev_lookup = &move_data.rev_lookup;
+
+ debug!("initializes move_indexes {:?}", &init_loc_map[location]);
+ trans.gen_all(init_loc_map[location].iter().copied());
+
+ if let mir::StatementKind::StorageDead(local) = stmt.kind {
+ // End inits for StorageDead, so that an immutable variable can
+ // be reinitialized on the next iteration of the loop.
+ let move_path_index = rev_lookup.find_local(local);
+ debug!("clears the ever initialized status of {:?}", init_path_map[move_path_index]);
+ trans.kill_all(init_path_map[move_path_index].iter().copied());
+ }
+ }
+
+ #[instrument(skip(self, trans, terminator), level = "debug")]
+ fn terminator_effect<'mir>(
+ &mut self,
+ trans: &mut Self::Domain,
+ terminator: &'mir mir::Terminator<'tcx>,
+ location: Location,
+ ) -> TerminatorEdges<'mir, 'tcx> {
+ let (body, move_data) = (self.body, self.move_data());
+ let term = body[location.block].terminator();
+ let init_loc_map = &move_data.init_loc_map;
+ debug!(?term);
+ debug!("initializes move_indexes {:?}", init_loc_map[location]);
+ trans.gen_all(
+ init_loc_map[location]
+ .iter()
+ .filter(|init_index| {
+ move_data.inits[**init_index].kind != InitKind::NonPanicPathOnly
+ })
+ .copied(),
+ );
+ terminator.edges()
+ }
+
+ fn call_return_effect(
+ &mut self,
+ trans: &mut impl GenKill<Self::Idx>,
+ block: mir::BasicBlock,
+ _return_places: CallReturnPlaces<'_, 'tcx>,
+ ) {
+ let move_data = self.move_data();
+ let init_loc_map = &move_data.init_loc_map;
+
+ let call_loc = self.body.terminator_loc(block);
+ for init_index in &init_loc_map[call_loc] {
+ trans.gen(*init_index);
+ }
+ }
+}
+
+/// Inspect a `SwitchInt`-terminated basic block to see if the condition of that `SwitchInt` is
+/// an enum discriminant.
+///
+/// We expect such blocks to have a call to `discriminant` as their last statement like so:
+///
+/// ```text
+/// ...
+/// _42 = discriminant(_1)
+/// SwitchInt(_42, ..)
+/// ```
+///
+/// If the basic block matches this pattern, this function returns the place corresponding to the
+/// enum (`_1` in the example above) as well as the `AdtDef` of that enum.
+fn switch_on_enum_discriminant<'mir, 'tcx>(
+ tcx: TyCtxt<'tcx>,
+ body: &'mir mir::Body<'tcx>,
+ block: &'mir mir::BasicBlockData<'tcx>,
+ switch_on: mir::Place<'tcx>,
+) -> Option<(mir::Place<'tcx>, ty::AdtDef<'tcx>)> {
+ for statement in block.statements.iter().rev() {
+ match &statement.kind {
+ mir::StatementKind::Assign(box (lhs, mir::Rvalue::Discriminant(discriminated)))
+ if *lhs == switch_on =>
+ {
+ match discriminated.ty(body, tcx).ty.kind() {
+ ty::Adt(def, _) => return Some((*discriminated, *def)),
+
+ // `Rvalue::Discriminant` is also used to get the active yield point for a
+ // generator, but we do not need edge-specific effects in that case. This may
+ // change in the future.
+ ty::Generator(..) => return None,
+
+ t => bug!("`discriminant` called on unexpected type {:?}", t),
+ }
+ }
+ mir::StatementKind::Coverage(_) => continue,
+ _ => return None,
+ }
+ }
+ None
+}
diff --git a/compiler/rustc_mir_dataflow/src/impls/liveness.rs b/compiler/rustc_mir_dataflow/src/impls/liveness.rs
index 9662c1977..5aa73c7a9 100644
--- a/compiler/rustc_mir_dataflow/src/impls/liveness.rs
+++ b/compiler/rustc_mir_dataflow/src/impls/liveness.rs
@@ -1,8 +1,10 @@
use rustc_index::bit_set::{BitSet, ChunkedBitSet};
use rustc_middle::mir::visit::{MutatingUseContext, NonMutatingUseContext, PlaceContext, Visitor};
-use rustc_middle::mir::{self, Local, Location, Place, StatementKind};
+use rustc_middle::mir::{
+ self, CallReturnPlaces, Local, Location, Place, StatementKind, TerminatorEdges,
+};
-use crate::{Analysis, AnalysisDomain, Backward, CallReturnPlaces, GenKill, GenKillAnalysis};
+use crate::{Analysis, AnalysisDomain, Backward, GenKill, GenKillAnalysis};
/// A [live-variable dataflow analysis][liveness].
///
@@ -43,6 +45,10 @@ impl<'tcx> AnalysisDomain<'tcx> for MaybeLiveLocals {
impl<'tcx> GenKillAnalysis<'tcx> for MaybeLiveLocals {
type Idx = Local;
+ fn domain_size(&self, body: &mir::Body<'tcx>) -> usize {
+ body.local_decls.len()
+ }
+
fn statement_effect(
&mut self,
trans: &mut impl GenKill<Self::Idx>,
@@ -52,13 +58,14 @@ impl<'tcx> GenKillAnalysis<'tcx> for MaybeLiveLocals {
TransferFunction(trans).visit_statement(statement, location);
}
- fn terminator_effect(
+ fn terminator_effect<'mir>(
&mut self,
- trans: &mut impl GenKill<Self::Idx>,
- terminator: &mir::Terminator<'tcx>,
+ trans: &mut Self::Domain,
+ terminator: &'mir mir::Terminator<'tcx>,
location: Location,
- ) {
+ ) -> TerminatorEdges<'mir, 'tcx> {
TransferFunction(trans).visit_terminator(terminator, location);
+ terminator.edges()
}
fn call_return_effect(
@@ -67,28 +74,23 @@ impl<'tcx> GenKillAnalysis<'tcx> for MaybeLiveLocals {
_block: mir::BasicBlock,
return_places: CallReturnPlaces<'_, 'tcx>,
) {
- return_places.for_each(|place| {
- if let Some(local) = place.as_local() {
- trans.kill(local);
- }
- });
- }
-
- fn yield_resume_effect(
- &mut self,
- trans: &mut impl GenKill<Self::Idx>,
- _resume_block: mir::BasicBlock,
- resume_place: mir::Place<'tcx>,
- ) {
- YieldResumeEffect(trans).visit_place(
- &resume_place,
- PlaceContext::MutatingUse(MutatingUseContext::Yield),
- Location::START,
- )
+ if let CallReturnPlaces::Yield(resume_place) = return_places {
+ YieldResumeEffect(trans).visit_place(
+ &resume_place,
+ PlaceContext::MutatingUse(MutatingUseContext::Yield),
+ Location::START,
+ )
+ } else {
+ return_places.for_each(|place| {
+ if let Some(local) = place.as_local() {
+ trans.kill(local);
+ }
+ });
+ }
}
}
-struct TransferFunction<'a, T>(&'a mut T);
+pub struct TransferFunction<'a, T>(pub &'a mut T);
impl<'tcx, T> Visitor<'tcx> for TransferFunction<'_, T>
where
@@ -97,7 +99,7 @@ where
fn visit_place(&mut self, place: &mir::Place<'tcx>, context: PlaceContext, location: Location) {
if let PlaceContext::MutatingUse(MutatingUseContext::Yield) = context {
// The resume place is evaluated and assigned to only after generator resumes, so its
- // effect is handled separately in `yield_resume_effect`.
+ // effect is handled separately in `call_resume_effect`.
return;
}
@@ -283,13 +285,14 @@ impl<'a, 'tcx> Analysis<'tcx> for MaybeTransitiveLiveLocals<'a> {
TransferFunction(trans).visit_statement(statement, location);
}
- fn apply_terminator_effect(
+ fn apply_terminator_effect<'mir>(
&mut self,
trans: &mut Self::Domain,
- terminator: &mir::Terminator<'tcx>,
+ terminator: &'mir mir::Terminator<'tcx>,
location: Location,
- ) {
+ ) -> TerminatorEdges<'mir, 'tcx> {
TransferFunction(trans).visit_terminator(terminator, location);
+ terminator.edges()
}
fn apply_call_return_effect(
@@ -298,23 +301,18 @@ impl<'a, 'tcx> Analysis<'tcx> for MaybeTransitiveLiveLocals<'a> {
_block: mir::BasicBlock,
return_places: CallReturnPlaces<'_, 'tcx>,
) {
- return_places.for_each(|place| {
- if let Some(local) = place.as_local() {
- trans.remove(local);
- }
- });
- }
-
- fn apply_yield_resume_effect(
- &mut self,
- trans: &mut Self::Domain,
- _resume_block: mir::BasicBlock,
- resume_place: mir::Place<'tcx>,
- ) {
- YieldResumeEffect(trans).visit_place(
- &resume_place,
- PlaceContext::MutatingUse(MutatingUseContext::Yield),
- Location::START,
- )
+ if let CallReturnPlaces::Yield(resume_place) = return_places {
+ YieldResumeEffect(trans).visit_place(
+ &resume_place,
+ PlaceContext::MutatingUse(MutatingUseContext::Yield),
+ Location::START,
+ )
+ } else {
+ return_places.for_each(|place| {
+ if let Some(local) = place.as_local() {
+ trans.remove(local);
+ }
+ });
+ }
}
}
diff --git a/compiler/rustc_mir_dataflow/src/impls/mod.rs b/compiler/rustc_mir_dataflow/src/impls/mod.rs
index 98cec1c67..f8db18fc1 100644
--- a/compiler/rustc_mir_dataflow/src/impls/mod.rs
+++ b/compiler/rustc_mir_dataflow/src/impls/mod.rs
@@ -2,763 +2,18 @@
//! bitvectors attached to each basic block, represented via a
//! zero-sized structure.
-use rustc_index::bit_set::{BitSet, ChunkedBitSet};
-use rustc_index::Idx;
-use rustc_middle::mir::visit::{MirVisitable, Visitor};
-use rustc_middle::mir::{self, Body, Location};
-use rustc_middle::ty::{self, TyCtxt};
-
-use crate::drop_flag_effects_for_function_entry;
-use crate::drop_flag_effects_for_location;
-use crate::elaborate_drops::DropFlagState;
-use crate::framework::{CallReturnPlaces, SwitchIntEdgeEffects};
-use crate::move_paths::{HasMoveData, InitIndex, InitKind, LookupResult, MoveData, MovePathIndex};
-use crate::on_lookup_result_bits;
-use crate::MoveDataParamEnv;
-use crate::{drop_flag_effects, on_all_children_bits};
-use crate::{lattice, AnalysisDomain, GenKill, GenKillAnalysis};
-
mod borrowed_locals;
+mod initialized;
mod liveness;
mod storage_liveness;
pub use self::borrowed_locals::borrowed_locals;
pub use self::borrowed_locals::MaybeBorrowedLocals;
+pub use self::initialized::{
+ DefinitelyInitializedPlaces, EverInitializedPlaces, MaybeInitializedPlaces,
+ MaybeUninitializedPlaces,
+};
pub use self::liveness::MaybeLiveLocals;
pub use self::liveness::MaybeTransitiveLiveLocals;
+pub use self::liveness::TransferFunction as LivenessTransferFunction;
pub use self::storage_liveness::{MaybeRequiresStorage, MaybeStorageDead, MaybeStorageLive};
-
-/// `MaybeInitializedPlaces` tracks all places that might be
-/// initialized upon reaching a particular point in the control flow
-/// for a function.
-///
-/// For example, in code like the following, we have corresponding
-/// dataflow information shown in the right-hand comments.
-///
-/// ```rust
-/// struct S;
-/// fn foo(pred: bool) { // maybe-init:
-/// // {}
-/// let a = S; let mut b = S; let c; let d; // {a, b}
-///
-/// if pred {
-/// drop(a); // { b}
-/// b = S; // { b}
-///
-/// } else {
-/// drop(b); // {a}
-/// d = S; // {a, d}
-///
-/// } // {a, b, d}
-///
-/// c = S; // {a, b, c, d}
-/// }
-/// ```
-///
-/// To determine whether a place *must* be initialized at a
-/// particular control-flow point, one can take the set-difference
-/// between this data and the data from `MaybeUninitializedPlaces` at the
-/// corresponding control-flow point.
-///
-/// Similarly, at a given `drop` statement, the set-intersection
-/// between this data and `MaybeUninitializedPlaces` yields the set of
-/// places that would require a dynamic drop-flag at that statement.
-pub struct MaybeInitializedPlaces<'a, 'tcx> {
- tcx: TyCtxt<'tcx>,
- body: &'a Body<'tcx>,
- mdpe: &'a MoveDataParamEnv<'tcx>,
-}
-
-impl<'a, 'tcx> MaybeInitializedPlaces<'a, 'tcx> {
- pub fn new(tcx: TyCtxt<'tcx>, body: &'a Body<'tcx>, mdpe: &'a MoveDataParamEnv<'tcx>) -> Self {
- MaybeInitializedPlaces { tcx, body, mdpe }
- }
-}
-
-impl<'a, 'tcx> HasMoveData<'tcx> for MaybeInitializedPlaces<'a, 'tcx> {
- fn move_data(&self) -> &MoveData<'tcx> {
- &self.mdpe.move_data
- }
-}
-
-/// `MaybeUninitializedPlaces` tracks all places that might be
-/// uninitialized upon reaching a particular point in the control flow
-/// for a function.
-///
-/// For example, in code like the following, we have corresponding
-/// dataflow information shown in the right-hand comments.
-///
-/// ```rust
-/// struct S;
-/// fn foo(pred: bool) { // maybe-uninit:
-/// // {a, b, c, d}
-/// let a = S; let mut b = S; let c; let d; // { c, d}
-///
-/// if pred {
-/// drop(a); // {a, c, d}
-/// b = S; // {a, c, d}
-///
-/// } else {
-/// drop(b); // { b, c, d}
-/// d = S; // { b, c }
-///
-/// } // {a, b, c, d}
-///
-/// c = S; // {a, b, d}
-/// }
-/// ```
-///
-/// To determine whether a place *must* be uninitialized at a
-/// particular control-flow point, one can take the set-difference
-/// between this data and the data from `MaybeInitializedPlaces` at the
-/// corresponding control-flow point.
-///
-/// Similarly, at a given `drop` statement, the set-intersection
-/// between this data and `MaybeInitializedPlaces` yields the set of
-/// places that would require a dynamic drop-flag at that statement.
-pub struct MaybeUninitializedPlaces<'a, 'tcx> {
- tcx: TyCtxt<'tcx>,
- body: &'a Body<'tcx>,
- mdpe: &'a MoveDataParamEnv<'tcx>,
-
- mark_inactive_variants_as_uninit: bool,
-}
-
-impl<'a, 'tcx> MaybeUninitializedPlaces<'a, 'tcx> {
- pub fn new(tcx: TyCtxt<'tcx>, body: &'a Body<'tcx>, mdpe: &'a MoveDataParamEnv<'tcx>) -> Self {
- MaybeUninitializedPlaces { tcx, body, mdpe, mark_inactive_variants_as_uninit: false }
- }
-
- /// Causes inactive enum variants to be marked as "maybe uninitialized" after a switch on an
- /// enum discriminant.
- ///
- /// This is correct in a vacuum but is not the default because it causes problems in the borrow
- /// checker, where this information gets propagated along `FakeEdge`s.
- pub fn mark_inactive_variants_as_uninit(mut self) -> Self {
- self.mark_inactive_variants_as_uninit = true;
- self
- }
-}
-
-impl<'a, 'tcx> HasMoveData<'tcx> for MaybeUninitializedPlaces<'a, 'tcx> {
- fn move_data(&self) -> &MoveData<'tcx> {
- &self.mdpe.move_data
- }
-}
-
-/// `DefinitelyInitializedPlaces` tracks all places that are definitely
-/// initialized upon reaching a particular point in the control flow
-/// for a function.
-///
-/// For example, in code like the following, we have corresponding
-/// dataflow information shown in the right-hand comments.
-///
-/// ```rust
-/// struct S;
-/// fn foo(pred: bool) { // definite-init:
-/// // { }
-/// let a = S; let mut b = S; let c; let d; // {a, b }
-///
-/// if pred {
-/// drop(a); // { b, }
-/// b = S; // { b, }
-///
-/// } else {
-/// drop(b); // {a, }
-/// d = S; // {a, d}
-///
-/// } // { }
-///
-/// c = S; // { c }
-/// }
-/// ```
-///
-/// To determine whether a place *may* be uninitialized at a
-/// particular control-flow point, one can take the set-complement
-/// of this data.
-///
-/// Similarly, at a given `drop` statement, the set-difference between
-/// this data and `MaybeInitializedPlaces` yields the set of places
-/// that would require a dynamic drop-flag at that statement.
-pub struct DefinitelyInitializedPlaces<'a, 'tcx> {
- tcx: TyCtxt<'tcx>,
- body: &'a Body<'tcx>,
- mdpe: &'a MoveDataParamEnv<'tcx>,
-}
-
-impl<'a, 'tcx> DefinitelyInitializedPlaces<'a, 'tcx> {
- pub fn new(tcx: TyCtxt<'tcx>, body: &'a Body<'tcx>, mdpe: &'a MoveDataParamEnv<'tcx>) -> Self {
- DefinitelyInitializedPlaces { tcx, body, mdpe }
- }
-}
-
-impl<'a, 'tcx> HasMoveData<'tcx> for DefinitelyInitializedPlaces<'a, 'tcx> {
- fn move_data(&self) -> &MoveData<'tcx> {
- &self.mdpe.move_data
- }
-}
-
-/// `EverInitializedPlaces` tracks all places that might have ever been
-/// initialized upon reaching a particular point in the control flow
-/// for a function, without an intervening `StorageDead`.
-///
-/// This dataflow is used to determine if an immutable local variable may
-/// be assigned to.
-///
-/// For example, in code like the following, we have corresponding
-/// dataflow information shown in the right-hand comments.
-///
-/// ```rust
-/// struct S;
-/// fn foo(pred: bool) { // ever-init:
-/// // { }
-/// let a = S; let mut b = S; let c; let d; // {a, b }
-///
-/// if pred {
-/// drop(a); // {a, b, }
-/// b = S; // {a, b, }
-///
-/// } else {
-/// drop(b); // {a, b, }
-/// d = S; // {a, b, d }
-///
-/// } // {a, b, d }
-///
-/// c = S; // {a, b, c, d }
-/// }
-/// ```
-pub struct EverInitializedPlaces<'a, 'tcx> {
- #[allow(dead_code)]
- tcx: TyCtxt<'tcx>,
- body: &'a Body<'tcx>,
- mdpe: &'a MoveDataParamEnv<'tcx>,
-}
-
-impl<'a, 'tcx> EverInitializedPlaces<'a, 'tcx> {
- pub fn new(tcx: TyCtxt<'tcx>, body: &'a Body<'tcx>, mdpe: &'a MoveDataParamEnv<'tcx>) -> Self {
- EverInitializedPlaces { tcx, body, mdpe }
- }
-}
-
-impl<'a, 'tcx> HasMoveData<'tcx> for EverInitializedPlaces<'a, 'tcx> {
- fn move_data(&self) -> &MoveData<'tcx> {
- &self.mdpe.move_data
- }
-}
-
-impl<'a, 'tcx> MaybeInitializedPlaces<'a, 'tcx> {
- fn update_bits(
- trans: &mut impl GenKill<MovePathIndex>,
- path: MovePathIndex,
- state: DropFlagState,
- ) {
- match state {
- DropFlagState::Absent => trans.kill(path),
- DropFlagState::Present => trans.gen(path),
- }
- }
-}
-
-impl<'a, 'tcx> MaybeUninitializedPlaces<'a, 'tcx> {
- fn update_bits(
- trans: &mut impl GenKill<MovePathIndex>,
- path: MovePathIndex,
- state: DropFlagState,
- ) {
- match state {
- DropFlagState::Absent => trans.gen(path),
- DropFlagState::Present => trans.kill(path),
- }
- }
-}
-
-impl<'a, 'tcx> DefinitelyInitializedPlaces<'a, 'tcx> {
- fn update_bits(
- trans: &mut impl GenKill<MovePathIndex>,
- path: MovePathIndex,
- state: DropFlagState,
- ) {
- match state {
- DropFlagState::Absent => trans.kill(path),
- DropFlagState::Present => trans.gen(path),
- }
- }
-}
-
-impl<'tcx> AnalysisDomain<'tcx> for MaybeInitializedPlaces<'_, 'tcx> {
- type Domain = ChunkedBitSet<MovePathIndex>;
- const NAME: &'static str = "maybe_init";
-
- fn bottom_value(&self, _: &mir::Body<'tcx>) -> Self::Domain {
- // bottom = uninitialized
- ChunkedBitSet::new_empty(self.move_data().move_paths.len())
- }
-
- fn initialize_start_block(&self, _: &mir::Body<'tcx>, state: &mut Self::Domain) {
- drop_flag_effects_for_function_entry(self.tcx, self.body, self.mdpe, |path, s| {
- assert!(s == DropFlagState::Present);
- state.insert(path);
- });
- }
-}
-
-impl<'tcx> GenKillAnalysis<'tcx> for MaybeInitializedPlaces<'_, 'tcx> {
- type Idx = MovePathIndex;
-
- fn statement_effect(
- &mut self,
- trans: &mut impl GenKill<Self::Idx>,
- statement: &mir::Statement<'tcx>,
- location: Location,
- ) {
- drop_flag_effects_for_location(self.tcx, self.body, self.mdpe, location, |path, s| {
- Self::update_bits(trans, path, s)
- });
-
- if !self.tcx.sess.opts.unstable_opts.precise_enum_drop_elaboration {
- return;
- }
-
- // Mark all places as "maybe init" if they are mutably borrowed. See #90752.
- for_each_mut_borrow(statement, location, |place| {
- let LookupResult::Exact(mpi) = self.move_data().rev_lookup.find(place.as_ref()) else { return };
- on_all_children_bits(self.tcx, self.body, self.move_data(), mpi, |child| {
- trans.gen(child);
- })
- })
- }
-
- fn terminator_effect(
- &mut self,
- trans: &mut impl GenKill<Self::Idx>,
- terminator: &mir::Terminator<'tcx>,
- location: Location,
- ) {
- drop_flag_effects_for_location(self.tcx, self.body, self.mdpe, location, |path, s| {
- Self::update_bits(trans, path, s)
- });
-
- if !self.tcx.sess.opts.unstable_opts.precise_enum_drop_elaboration {
- return;
- }
-
- for_each_mut_borrow(terminator, location, |place| {
- let LookupResult::Exact(mpi) = self.move_data().rev_lookup.find(place.as_ref()) else { return };
- on_all_children_bits(self.tcx, self.body, self.move_data(), mpi, |child| {
- trans.gen(child);
- })
- })
- }
-
- fn call_return_effect(
- &mut self,
- trans: &mut impl GenKill<Self::Idx>,
- _block: mir::BasicBlock,
- return_places: CallReturnPlaces<'_, 'tcx>,
- ) {
- return_places.for_each(|place| {
- // when a call returns successfully, that means we need to set
- // the bits for that dest_place to 1 (initialized).
- on_lookup_result_bits(
- self.tcx,
- self.body,
- self.move_data(),
- self.move_data().rev_lookup.find(place.as_ref()),
- |mpi| {
- trans.gen(mpi);
- },
- );
- });
- }
-
- fn switch_int_edge_effects<G: GenKill<Self::Idx>>(
- &mut self,
- block: mir::BasicBlock,
- discr: &mir::Operand<'tcx>,
- edge_effects: &mut impl SwitchIntEdgeEffects<G>,
- ) {
- if !self.tcx.sess.opts.unstable_opts.precise_enum_drop_elaboration {
- return;
- }
-
- let enum_ = discr.place().and_then(|discr| {
- switch_on_enum_discriminant(self.tcx, &self.body, &self.body[block], discr)
- });
-
- let Some((enum_place, enum_def)) = enum_ else {
- return;
- };
-
- let mut discriminants = enum_def.discriminants(self.tcx);
- edge_effects.apply(|trans, edge| {
- let Some(value) = edge.value else {
- return;
- };
-
- // MIR building adds discriminants to the `values` array in the same order as they
- // are yielded by `AdtDef::discriminants`. We rely on this to match each
- // discriminant in `values` to its corresponding variant in linear time.
- let (variant, _) = discriminants
- .find(|&(_, discr)| discr.val == value)
- .expect("Order of `AdtDef::discriminants` differed from `SwitchInt::values`");
-
- // Kill all move paths that correspond to variants we know to be inactive along this
- // particular outgoing edge of a `SwitchInt`.
- drop_flag_effects::on_all_inactive_variants(
- self.tcx,
- self.body,
- self.move_data(),
- enum_place,
- variant,
- |mpi| trans.kill(mpi),
- );
- });
- }
-}
-
-impl<'tcx> AnalysisDomain<'tcx> for MaybeUninitializedPlaces<'_, 'tcx> {
- type Domain = ChunkedBitSet<MovePathIndex>;
-
- const NAME: &'static str = "maybe_uninit";
-
- fn bottom_value(&self, _: &mir::Body<'tcx>) -> Self::Domain {
- // bottom = initialized (start_block_effect counters this at outset)
- ChunkedBitSet::new_empty(self.move_data().move_paths.len())
- }
-
- // sets on_entry bits for Arg places
- fn initialize_start_block(&self, _: &mir::Body<'tcx>, state: &mut Self::Domain) {
- // set all bits to 1 (uninit) before gathering counter-evidence
- state.insert_all();
-
- drop_flag_effects_for_function_entry(self.tcx, self.body, self.mdpe, |path, s| {
- assert!(s == DropFlagState::Present);
- state.remove(path);
- });
- }
-}
-
-impl<'tcx> GenKillAnalysis<'tcx> for MaybeUninitializedPlaces<'_, 'tcx> {
- type Idx = MovePathIndex;
-
- fn statement_effect(
- &mut self,
- trans: &mut impl GenKill<Self::Idx>,
- _statement: &mir::Statement<'tcx>,
- location: Location,
- ) {
- drop_flag_effects_for_location(self.tcx, self.body, self.mdpe, location, |path, s| {
- Self::update_bits(trans, path, s)
- });
-
- // Unlike in `MaybeInitializedPlaces` above, we don't need to change the state when a
- // mutable borrow occurs. Places cannot become uninitialized through a mutable reference.
- }
-
- fn terminator_effect(
- &mut self,
- trans: &mut impl GenKill<Self::Idx>,
- _terminator: &mir::Terminator<'tcx>,
- location: Location,
- ) {
- drop_flag_effects_for_location(self.tcx, self.body, self.mdpe, location, |path, s| {
- Self::update_bits(trans, path, s)
- });
- }
-
- fn call_return_effect(
- &mut self,
- trans: &mut impl GenKill<Self::Idx>,
- _block: mir::BasicBlock,
- return_places: CallReturnPlaces<'_, 'tcx>,
- ) {
- return_places.for_each(|place| {
- // when a call returns successfully, that means we need to set
- // the bits for that dest_place to 0 (initialized).
- on_lookup_result_bits(
- self.tcx,
- self.body,
- self.move_data(),
- self.move_data().rev_lookup.find(place.as_ref()),
- |mpi| {
- trans.kill(mpi);
- },
- );
- });
- }
-
- fn switch_int_edge_effects<G: GenKill<Self::Idx>>(
- &mut self,
- block: mir::BasicBlock,
- discr: &mir::Operand<'tcx>,
- edge_effects: &mut impl SwitchIntEdgeEffects<G>,
- ) {
- if !self.tcx.sess.opts.unstable_opts.precise_enum_drop_elaboration {
- return;
- }
-
- if !self.mark_inactive_variants_as_uninit {
- return;
- }
-
- let enum_ = discr.place().and_then(|discr| {
- switch_on_enum_discriminant(self.tcx, &self.body, &self.body[block], discr)
- });
-
- let Some((enum_place, enum_def)) = enum_ else {
- return;
- };
-
- let mut discriminants = enum_def.discriminants(self.tcx);
- edge_effects.apply(|trans, edge| {
- let Some(value) = edge.value else {
- return;
- };
-
- // MIR building adds discriminants to the `values` array in the same order as they
- // are yielded by `AdtDef::discriminants`. We rely on this to match each
- // discriminant in `values` to its corresponding variant in linear time.
- let (variant, _) = discriminants
- .find(|&(_, discr)| discr.val == value)
- .expect("Order of `AdtDef::discriminants` differed from `SwitchInt::values`");
-
- // Mark all move paths that correspond to variants other than this one as maybe
- // uninitialized (in reality, they are *definitely* uninitialized).
- drop_flag_effects::on_all_inactive_variants(
- self.tcx,
- self.body,
- self.move_data(),
- enum_place,
- variant,
- |mpi| trans.gen(mpi),
- );
- });
- }
-}
-
-impl<'a, 'tcx> AnalysisDomain<'tcx> for DefinitelyInitializedPlaces<'a, 'tcx> {
- /// Use set intersection as the join operator.
- type Domain = lattice::Dual<BitSet<MovePathIndex>>;
-
- const NAME: &'static str = "definite_init";
-
- fn bottom_value(&self, _: &mir::Body<'tcx>) -> Self::Domain {
- // bottom = initialized (start_block_effect counters this at outset)
- lattice::Dual(BitSet::new_filled(self.move_data().move_paths.len()))
- }
-
- // sets on_entry bits for Arg places
- fn initialize_start_block(&self, _: &mir::Body<'tcx>, state: &mut Self::Domain) {
- state.0.clear();
-
- drop_flag_effects_for_function_entry(self.tcx, self.body, self.mdpe, |path, s| {
- assert!(s == DropFlagState::Present);
- state.0.insert(path);
- });
- }
-}
-
-impl<'tcx> GenKillAnalysis<'tcx> for DefinitelyInitializedPlaces<'_, 'tcx> {
- type Idx = MovePathIndex;
-
- fn statement_effect(
- &mut self,
- trans: &mut impl GenKill<Self::Idx>,
- _statement: &mir::Statement<'tcx>,
- location: Location,
- ) {
- drop_flag_effects_for_location(self.tcx, self.body, self.mdpe, location, |path, s| {
- Self::update_bits(trans, path, s)
- })
- }
-
- fn terminator_effect(
- &mut self,
- trans: &mut impl GenKill<Self::Idx>,
- _terminator: &mir::Terminator<'tcx>,
- location: Location,
- ) {
- drop_flag_effects_for_location(self.tcx, self.body, self.mdpe, location, |path, s| {
- Self::update_bits(trans, path, s)
- })
- }
-
- fn call_return_effect(
- &mut self,
- trans: &mut impl GenKill<Self::Idx>,
- _block: mir::BasicBlock,
- return_places: CallReturnPlaces<'_, 'tcx>,
- ) {
- return_places.for_each(|place| {
- // when a call returns successfully, that means we need to set
- // the bits for that dest_place to 1 (initialized).
- on_lookup_result_bits(
- self.tcx,
- self.body,
- self.move_data(),
- self.move_data().rev_lookup.find(place.as_ref()),
- |mpi| {
- trans.gen(mpi);
- },
- );
- });
- }
-}
-
-impl<'tcx> AnalysisDomain<'tcx> for EverInitializedPlaces<'_, 'tcx> {
- type Domain = ChunkedBitSet<InitIndex>;
-
- const NAME: &'static str = "ever_init";
-
- fn bottom_value(&self, _: &mir::Body<'tcx>) -> Self::Domain {
- // bottom = no initialized variables by default
- ChunkedBitSet::new_empty(self.move_data().inits.len())
- }
-
- fn initialize_start_block(&self, body: &mir::Body<'tcx>, state: &mut Self::Domain) {
- for arg_init in 0..body.arg_count {
- state.insert(InitIndex::new(arg_init));
- }
- }
-}
-
-impl<'tcx> GenKillAnalysis<'tcx> for EverInitializedPlaces<'_, 'tcx> {
- type Idx = InitIndex;
-
- #[instrument(skip(self, trans), level = "debug")]
- fn statement_effect(
- &mut self,
- trans: &mut impl GenKill<Self::Idx>,
- stmt: &mir::Statement<'tcx>,
- location: Location,
- ) {
- let move_data = self.move_data();
- let init_path_map = &move_data.init_path_map;
- let init_loc_map = &move_data.init_loc_map;
- let rev_lookup = &move_data.rev_lookup;
-
- debug!("initializes move_indexes {:?}", &init_loc_map[location]);
- trans.gen_all(init_loc_map[location].iter().copied());
-
- if let mir::StatementKind::StorageDead(local) = stmt.kind {
- // End inits for StorageDead, so that an immutable variable can
- // be reinitialized on the next iteration of the loop.
- let move_path_index = rev_lookup.find_local(local);
- debug!("clears the ever initialized status of {:?}", init_path_map[move_path_index]);
- trans.kill_all(init_path_map[move_path_index].iter().copied());
- }
- }
-
- #[instrument(skip(self, trans, _terminator), level = "debug")]
- fn terminator_effect(
- &mut self,
- trans: &mut impl GenKill<Self::Idx>,
- _terminator: &mir::Terminator<'tcx>,
- location: Location,
- ) {
- let (body, move_data) = (self.body, self.move_data());
- let term = body[location.block].terminator();
- let init_loc_map = &move_data.init_loc_map;
- debug!(?term);
- debug!("initializes move_indexes {:?}", init_loc_map[location]);
- trans.gen_all(
- init_loc_map[location]
- .iter()
- .filter(|init_index| {
- move_data.inits[**init_index].kind != InitKind::NonPanicPathOnly
- })
- .copied(),
- );
- }
-
- fn call_return_effect(
- &mut self,
- trans: &mut impl GenKill<Self::Idx>,
- block: mir::BasicBlock,
- _return_places: CallReturnPlaces<'_, 'tcx>,
- ) {
- let move_data = self.move_data();
- let init_loc_map = &move_data.init_loc_map;
-
- let call_loc = self.body.terminator_loc(block);
- for init_index in &init_loc_map[call_loc] {
- trans.gen(*init_index);
- }
- }
-}
-
-/// Inspect a `SwitchInt`-terminated basic block to see if the condition of that `SwitchInt` is
-/// an enum discriminant.
-///
-/// We expect such blocks to have a call to `discriminant` as their last statement like so:
-///
-/// ```text
-/// ...
-/// _42 = discriminant(_1)
-/// SwitchInt(_42, ..)
-/// ```
-///
-/// If the basic block matches this pattern, this function returns the place corresponding to the
-/// enum (`_1` in the example above) as well as the `AdtDef` of that enum.
-fn switch_on_enum_discriminant<'mir, 'tcx>(
- tcx: TyCtxt<'tcx>,
- body: &'mir mir::Body<'tcx>,
- block: &'mir mir::BasicBlockData<'tcx>,
- switch_on: mir::Place<'tcx>,
-) -> Option<(mir::Place<'tcx>, ty::AdtDef<'tcx>)> {
- for statement in block.statements.iter().rev() {
- match &statement.kind {
- mir::StatementKind::Assign(box (lhs, mir::Rvalue::Discriminant(discriminated)))
- if *lhs == switch_on =>
- {
- match discriminated.ty(body, tcx).ty.kind() {
- ty::Adt(def, _) => return Some((*discriminated, *def)),
-
- // `Rvalue::Discriminant` is also used to get the active yield point for a
- // generator, but we do not need edge-specific effects in that case. This may
- // change in the future.
- ty::Generator(..) => return None,
-
- t => bug!("`discriminant` called on unexpected type {:?}", t),
- }
- }
- mir::StatementKind::Coverage(_) => continue,
- _ => return None,
- }
- }
- None
-}
-
-struct OnMutBorrow<F>(F);
-
-impl<F> Visitor<'_> for OnMutBorrow<F>
-where
- F: FnMut(&mir::Place<'_>),
-{
- fn visit_rvalue(&mut self, rvalue: &mir::Rvalue<'_>, location: Location) {
- // FIXME: Does `&raw const foo` allow mutation? See #90413.
- match rvalue {
- mir::Rvalue::Ref(_, mir::BorrowKind::Mut { .. }, place)
- | mir::Rvalue::AddressOf(_, place) => (self.0)(place),
-
- _ => {}
- }
-
- self.super_rvalue(rvalue, location)
- }
-}
-
-/// Calls `f` for each mutable borrow or raw reference in the program.
-///
-/// This DOES NOT call `f` for a shared borrow of a type with interior mutability. That's okay for
-/// initializedness, because we cannot move from an `UnsafeCell` (outside of `core::cell`), but
-/// other analyses will likely need to check for `!Freeze`.
-fn for_each_mut_borrow<'tcx>(
- mir: &impl MirVisitable<'tcx>,
- location: Location,
- f: impl FnMut(&mir::Place<'_>),
-) {
- let mut vis = OnMutBorrow(f);
-
- mir.apply(location, &mut vis);
-}
diff --git a/compiler/rustc_mir_dataflow/src/impls/storage_liveness.rs b/compiler/rustc_mir_dataflow/src/impls/storage_liveness.rs
index 666c8d50a..bea23b7f7 100644
--- a/compiler/rustc_mir_dataflow/src/impls/storage_liveness.rs
+++ b/compiler/rustc_mir_dataflow/src/impls/storage_liveness.rs
@@ -1,10 +1,12 @@
-pub use super::*;
-
-use crate::{CallReturnPlaces, GenKill, ResultsClonedCursor};
+use rustc_index::bit_set::BitSet;
use rustc_middle::mir::visit::{NonMutatingUseContext, PlaceContext, Visitor};
use rustc_middle::mir::*;
+
use std::borrow::Cow;
+use super::MaybeBorrowedLocals;
+use crate::{GenKill, ResultsClonedCursor};
+
#[derive(Clone)]
pub struct MaybeStorageLive<'a> {
always_live_locals: Cow<'a, BitSet<Local>>,
@@ -27,12 +29,12 @@ impl<'tcx, 'a> crate::AnalysisDomain<'tcx> for MaybeStorageLive<'a> {
const NAME: &'static str = "maybe_storage_live";
- fn bottom_value(&self, body: &mir::Body<'tcx>) -> Self::Domain {
+ fn bottom_value(&self, body: &Body<'tcx>) -> Self::Domain {
// bottom = dead
BitSet::new_empty(body.local_decls.len())
}
- fn initialize_start_block(&self, body: &mir::Body<'tcx>, on_entry: &mut Self::Domain) {
+ fn initialize_start_block(&self, body: &Body<'tcx>, on_entry: &mut Self::Domain) {
assert_eq!(body.local_decls.len(), self.always_live_locals.domain_size());
for local in self.always_live_locals.iter() {
on_entry.insert(local);
@@ -47,10 +49,14 @@ impl<'tcx, 'a> crate::AnalysisDomain<'tcx> for MaybeStorageLive<'a> {
impl<'tcx, 'a> crate::GenKillAnalysis<'tcx> for MaybeStorageLive<'a> {
type Idx = Local;
+ fn domain_size(&self, body: &Body<'tcx>) -> usize {
+ body.local_decls.len()
+ }
+
fn statement_effect(
&mut self,
trans: &mut impl GenKill<Self::Idx>,
- stmt: &mir::Statement<'tcx>,
+ stmt: &Statement<'tcx>,
_: Location,
) {
match stmt.kind {
@@ -60,13 +66,14 @@ impl<'tcx, 'a> crate::GenKillAnalysis<'tcx> for MaybeStorageLive<'a> {
}
}
- fn terminator_effect(
+ fn terminator_effect<'mir>(
&mut self,
- _trans: &mut impl GenKill<Self::Idx>,
- _: &mir::Terminator<'tcx>,
+ _trans: &mut Self::Domain,
+ terminator: &'mir Terminator<'tcx>,
_: Location,
- ) {
+ ) -> TerminatorEdges<'mir, 'tcx> {
// Terminators have no effect
+ terminator.edges()
}
fn call_return_effect(
@@ -95,12 +102,12 @@ impl<'tcx> crate::AnalysisDomain<'tcx> for MaybeStorageDead {
const NAME: &'static str = "maybe_storage_dead";
- fn bottom_value(&self, body: &mir::Body<'tcx>) -> Self::Domain {
+ fn bottom_value(&self, body: &Body<'tcx>) -> Self::Domain {
// bottom = live
BitSet::new_empty(body.local_decls.len())
}
- fn initialize_start_block(&self, body: &mir::Body<'tcx>, on_entry: &mut Self::Domain) {
+ fn initialize_start_block(&self, body: &Body<'tcx>, on_entry: &mut Self::Domain) {
assert_eq!(body.local_decls.len(), self.always_live_locals.domain_size());
// Do not iterate on return place and args, as they are trivially always live.
for local in body.vars_and_temps_iter() {
@@ -114,10 +121,14 @@ impl<'tcx> crate::AnalysisDomain<'tcx> for MaybeStorageDead {
impl<'tcx> crate::GenKillAnalysis<'tcx> for MaybeStorageDead {
type Idx = Local;
+ fn domain_size(&self, body: &Body<'tcx>) -> usize {
+ body.local_decls.len()
+ }
+
fn statement_effect(
&mut self,
trans: &mut impl GenKill<Self::Idx>,
- stmt: &mir::Statement<'tcx>,
+ stmt: &Statement<'tcx>,
_: Location,
) {
match stmt.kind {
@@ -127,13 +138,14 @@ impl<'tcx> crate::GenKillAnalysis<'tcx> for MaybeStorageDead {
}
}
- fn terminator_effect(
+ fn terminator_effect<'mir>(
&mut self,
- _trans: &mut impl GenKill<Self::Idx>,
- _: &mir::Terminator<'tcx>,
+ _: &mut Self::Domain,
+ terminator: &'mir Terminator<'tcx>,
_: Location,
- ) {
+ ) -> TerminatorEdges<'mir, 'tcx> {
// Terminators have no effect
+ terminator.edges()
}
fn call_return_effect(
@@ -172,12 +184,12 @@ impl<'tcx> crate::AnalysisDomain<'tcx> for MaybeRequiresStorage<'_, '_, 'tcx> {
const NAME: &'static str = "requires_storage";
- fn bottom_value(&self, body: &mir::Body<'tcx>) -> Self::Domain {
+ fn bottom_value(&self, body: &Body<'tcx>) -> Self::Domain {
// bottom = dead
BitSet::new_empty(body.local_decls.len())
}
- fn initialize_start_block(&self, body: &mir::Body<'tcx>, on_entry: &mut Self::Domain) {
+ fn initialize_start_block(&self, body: &Body<'tcx>, on_entry: &mut Self::Domain) {
// The resume argument is live on function entry (we don't care about
// the `self` argument)
for arg in body.args_iter().skip(1) {
@@ -189,10 +201,14 @@ impl<'tcx> crate::AnalysisDomain<'tcx> for MaybeRequiresStorage<'_, '_, 'tcx> {
impl<'tcx> crate::GenKillAnalysis<'tcx> for MaybeRequiresStorage<'_, '_, 'tcx> {
type Idx = Local;
+ fn domain_size(&self, body: &Body<'tcx>) -> usize {
+ body.local_decls.len()
+ }
+
fn before_statement_effect(
&mut self,
trans: &mut impl GenKill<Self::Idx>,
- stmt: &mir::Statement<'tcx>,
+ stmt: &Statement<'tcx>,
loc: Location,
) {
// If a place is borrowed in a statement, it needs storage for that statement.
@@ -225,7 +241,7 @@ impl<'tcx> crate::GenKillAnalysis<'tcx> for MaybeRequiresStorage<'_, '_, 'tcx> {
fn statement_effect(
&mut self,
trans: &mut impl GenKill<Self::Idx>,
- _: &mir::Statement<'tcx>,
+ _: &Statement<'tcx>,
loc: Location,
) {
// If we move from a place then it only stops needing storage *after*
@@ -236,11 +252,14 @@ impl<'tcx> crate::GenKillAnalysis<'tcx> for MaybeRequiresStorage<'_, '_, 'tcx> {
fn before_terminator_effect(
&mut self,
trans: &mut impl GenKill<Self::Idx>,
- terminator: &mir::Terminator<'tcx>,
+ terminator: &Terminator<'tcx>,
loc: Location,
) {
// If a place is borrowed in a terminator, it needs storage for that terminator.
- self.borrowed_locals.mut_analysis().terminator_effect(trans, terminator, loc);
+ self.borrowed_locals
+ .mut_analysis()
+ .transfer_function(trans)
+ .visit_terminator(terminator, loc);
match &terminator.kind {
TerminatorKind::Call { destination, .. } => {
@@ -286,12 +305,12 @@ impl<'tcx> crate::GenKillAnalysis<'tcx> for MaybeRequiresStorage<'_, '_, 'tcx> {
}
}
- fn terminator_effect(
+ fn terminator_effect<'t>(
&mut self,
- trans: &mut impl GenKill<Self::Idx>,
- terminator: &mir::Terminator<'tcx>,
+ trans: &mut Self::Domain,
+ terminator: &'t Terminator<'tcx>,
loc: Location,
- ) {
+ ) -> TerminatorEdges<'t, 'tcx> {
match terminator.kind {
// For call terminators the destination requires storage for the call
// and after the call returns successfully, but not after a panic.
@@ -323,6 +342,7 @@ impl<'tcx> crate::GenKillAnalysis<'tcx> for MaybeRequiresStorage<'_, '_, 'tcx> {
}
self.check_for_move(trans, loc);
+ terminator.edges()
}
fn call_return_effect(
@@ -333,15 +353,6 @@ impl<'tcx> crate::GenKillAnalysis<'tcx> for MaybeRequiresStorage<'_, '_, 'tcx> {
) {
return_places.for_each(|place| trans.gen(place.local));
}
-
- fn yield_resume_effect(
- &mut self,
- trans: &mut impl GenKill<Self::Idx>,
- _resume_block: BasicBlock,
- resume_place: mir::Place<'tcx>,
- ) {
- trans.gen(resume_place.local);
- }
}
impl<'tcx> MaybeRequiresStorage<'_, '_, 'tcx> {
diff --git a/compiler/rustc_mir_dataflow/src/lib.rs b/compiler/rustc_mir_dataflow/src/lib.rs
index d43446bc5..0cdbee19d 100644
--- a/compiler/rustc_mir_dataflow/src/lib.rs
+++ b/compiler/rustc_mir_dataflow/src/lib.rs
@@ -28,8 +28,8 @@ pub use self::drop_flag_effects::{
};
pub use self::framework::{
fmt, graphviz, lattice, visit_results, Analysis, AnalysisDomain, AnalysisResults, Backward,
- CallReturnPlaces, CloneAnalysis, Direction, Engine, Forward, GenKill, GenKillAnalysis,
- JoinSemiLattice, Results, ResultsCloned, ResultsClonedCursor, ResultsCursor, ResultsRefCursor,
+ CloneAnalysis, Direction, Engine, Forward, GenKill, GenKillAnalysis, JoinSemiLattice,
+ MaybeReachable, Results, ResultsCloned, ResultsClonedCursor, ResultsCursor, ResultsRefCursor,
ResultsVisitable, ResultsVisitor, SwitchIntEdgeEffects,
};
@@ -43,6 +43,7 @@ pub mod impls;
pub mod move_paths;
pub mod rustc_peek;
pub mod storage;
+pub mod un_derefer;
pub mod value_analysis;
fluent_messages! { "../messages.ftl" }
diff --git a/compiler/rustc_mir_dataflow/src/move_paths/builder.rs b/compiler/rustc_mir_dataflow/src/move_paths/builder.rs
index dc7e9ab3c..5052de991 100644
--- a/compiler/rustc_mir_dataflow/src/move_paths/builder.rs
+++ b/compiler/rustc_mir_dataflow/src/move_paths/builder.rs
@@ -4,7 +4,6 @@ use rustc_middle::mir::*;
use rustc_middle::ty::{self, TyCtxt};
use smallvec::{smallvec, SmallVec};
-use std::iter;
use std::mem;
use super::abs_domain::Lift;
@@ -40,22 +39,22 @@ impl<'a, 'tcx> MoveDataBuilder<'a, 'tcx> {
locals: body
.local_decls
.iter_enumerated()
- .filter(|(_, l)| !l.is_deref_temp())
- .map(|(i, _)| {
- (
- i,
+ .map(|(i, l)| {
+ if l.is_deref_temp() {
+ MovePathIndex::MAX
+ } else {
Self::new_move_path(
&mut move_paths,
&mut path_map,
&mut init_path_map,
None,
Place::from(i),
- ),
- )
+ )
+ }
})
.collect(),
projections: Default::default(),
- derefer_sidetable: Default::default(),
+ un_derefer: Default::default(),
},
move_paths,
path_map,
@@ -100,11 +99,10 @@ impl<'b, 'a, 'tcx> Gatherer<'b, 'a, 'tcx> {
///
/// Maybe we should have separate "borrowck" and "moveck" modes.
fn move_path_for(&mut self, place: Place<'tcx>) -> Result<MovePathIndex, MoveError<'tcx>> {
- let deref_chain = self.builder.data.rev_lookup.deref_chain(place.as_ref());
+ let data = &mut self.builder.data;
debug!("lookup({:?})", place);
- let mut base =
- self.builder.data.rev_lookup.find_local(deref_chain.first().unwrap_or(&place).local);
+ let mut base = data.rev_lookup.find_local(place.local);
// The move path index of the first union that we find. Once this is
// some we stop creating child move paths, since moves from unions
@@ -113,55 +111,60 @@ impl<'b, 'a, 'tcx> Gatherer<'b, 'a, 'tcx> {
// from `*(u.f: &_)` isn't allowed.
let mut union_path = None;
- for place in deref_chain.into_iter().chain(iter::once(place)) {
- for (place_ref, elem) in place.as_ref().iter_projections() {
- let body = self.builder.body;
- let tcx = self.builder.tcx;
- let place_ty = place_ref.ty(body, tcx).ty;
- match place_ty.kind() {
- ty::Ref(..) | ty::RawPtr(..) => {
- return Err(MoveError::cannot_move_out_of(
- self.loc,
- BorrowedContent {
- target_place: place_ref.project_deeper(&[elem], tcx),
- },
- ));
- }
- ty::Adt(adt, _) if adt.has_dtor(tcx) && !adt.is_box() => {
- return Err(MoveError::cannot_move_out_of(
- self.loc,
- InteriorOfTypeWithDestructor { container_ty: place_ty },
- ));
- }
- ty::Adt(adt, _) if adt.is_union() => {
- union_path.get_or_insert(base);
- }
- ty::Slice(_) => {
+ for (place_ref, elem) in data.rev_lookup.un_derefer.iter_projections(place.as_ref()) {
+ let body = self.builder.body;
+ let tcx = self.builder.tcx;
+ let place_ty = place_ref.ty(body, tcx).ty;
+ match place_ty.kind() {
+ ty::Ref(..) | ty::RawPtr(..) => {
+ return Err(MoveError::cannot_move_out_of(
+ self.loc,
+ BorrowedContent { target_place: place_ref.project_deeper(&[elem], tcx) },
+ ));
+ }
+ ty::Adt(adt, _) if adt.has_dtor(tcx) && !adt.is_box() => {
+ return Err(MoveError::cannot_move_out_of(
+ self.loc,
+ InteriorOfTypeWithDestructor { container_ty: place_ty },
+ ));
+ }
+ ty::Adt(adt, _) if adt.is_union() => {
+ union_path.get_or_insert(base);
+ }
+ ty::Slice(_) => {
+ return Err(MoveError::cannot_move_out_of(
+ self.loc,
+ InteriorOfSliceOrArray {
+ ty: place_ty,
+ is_index: matches!(elem, ProjectionElem::Index(..)),
+ },
+ ));
+ }
+
+ ty::Array(..) => {
+ if let ProjectionElem::Index(..) = elem {
return Err(MoveError::cannot_move_out_of(
self.loc,
- InteriorOfSliceOrArray {
- ty: place_ty,
- is_index: matches!(elem, ProjectionElem::Index(..)),
- },
+ InteriorOfSliceOrArray { ty: place_ty, is_index: true },
));
}
+ }
- ty::Array(..) => {
- if let ProjectionElem::Index(..) = elem {
- return Err(MoveError::cannot_move_out_of(
- self.loc,
- InteriorOfSliceOrArray { ty: place_ty, is_index: true },
- ));
- }
- }
-
- _ => {}
- };
+ _ => {}
+ };
- if union_path.is_none() {
- base = self
- .add_move_path(base, elem, |tcx| place_ref.project_deeper(&[elem], tcx));
- }
+ if union_path.is_none() {
+ // inlined from add_move_path because of a borrowck conflict with the iterator
+ base =
+ *data.rev_lookup.projections.entry((base, elem.lift())).or_insert_with(|| {
+ MoveDataBuilder::new_move_path(
+ &mut data.move_paths,
+ &mut data.path_map,
+ &mut data.init_path_map,
+ Some(base),
+ place_ref.project_deeper(&[elem], tcx),
+ )
+ })
}
}
@@ -282,10 +285,14 @@ impl<'b, 'a, 'tcx> Gatherer<'b, 'a, 'tcx> {
fn gather_statement(&mut self, stmt: &Statement<'tcx>) {
match &stmt.kind {
StatementKind::Assign(box (place, Rvalue::CopyForDeref(reffed))) => {
- assert!(place.projection.is_empty());
- if self.builder.body.local_decls[place.local].is_deref_temp() {
- self.builder.data.rev_lookup.derefer_sidetable.insert(place.local, *reffed);
- }
+ let local = place.as_local().unwrap();
+ assert!(self.builder.body.local_decls[local].is_deref_temp());
+
+ let rev_lookup = &mut self.builder.data.rev_lookup;
+
+ rev_lookup.un_derefer.insert(local, reffed.as_ref());
+ let base_local = rev_lookup.un_derefer.deref_chain(local).first().unwrap().local;
+ rev_lookup.locals[local] = rev_lookup.locals[base_local];
}
StatementKind::Assign(box (place, rval)) => {
self.create_move_path(*place);
@@ -306,7 +313,7 @@ impl<'b, 'a, 'tcx> Gatherer<'b, 'a, 'tcx> {
StatementKind::StorageLive(_) => {}
StatementKind::StorageDead(local) => {
// DerefTemp locals (results of CopyForDeref) don't actually move anything.
- if !self.builder.data.rev_lookup.derefer_sidetable.contains_key(&local) {
+ if !self.builder.body.local_decls[*local].is_deref_temp() {
self.gather_move(Place::from(*local));
}
}
diff --git a/compiler/rustc_mir_dataflow/src/move_paths/mod.rs b/compiler/rustc_mir_dataflow/src/move_paths/mod.rs
index aa901f66d..0c7aa6676 100644
--- a/compiler/rustc_mir_dataflow/src/move_paths/mod.rs
+++ b/compiler/rustc_mir_dataflow/src/move_paths/mod.rs
@@ -1,5 +1,6 @@
use crate::move_paths::builder::MoveDat;
-use rustc_data_structures::fx::{FxHashMap, FxIndexMap};
+use crate::un_derefer::UnDerefer;
+use rustc_data_structures::fx::FxHashMap;
use rustc_index::{IndexSlice, IndexVec};
use rustc_middle::mir::*;
use rustc_middle::ty::{ParamEnv, Ty, TyCtxt};
@@ -290,7 +291,7 @@ impl Init {
/// Tables mapping from a place to its MovePathIndex.
#[derive(Debug)]
pub struct MovePathLookup<'tcx> {
- locals: FxIndexMap<Local, MovePathIndex>,
+ locals: IndexVec<Local, MovePathIndex>,
/// projections are made from a base-place and a projection
/// elem. The base-place will have a unique MovePathIndex; we use
@@ -300,8 +301,7 @@ pub struct MovePathLookup<'tcx> {
/// elem to the associated MovePathIndex.
projections: FxHashMap<(MovePathIndex, AbstractElem), MovePathIndex>,
- /// Maps `DerefTemp` locals to the `Place`s assigned to them.
- derefer_sidetable: FxHashMap<Local, Place<'tcx>>,
+ un_derefer: UnDerefer<'tcx>,
}
mod builder;
@@ -317,54 +317,23 @@ impl<'tcx> MovePathLookup<'tcx> {
// alternative will *not* create a MovePath on the fly for an
// unknown place, but will rather return the nearest available
// parent.
- pub fn find(&self, place: PlaceRef<'_>) -> LookupResult {
- let deref_chain = self.deref_chain(place);
+ pub fn find(&self, place: PlaceRef<'tcx>) -> LookupResult {
+ let mut result = self.find_local(place.local);
- let local = match deref_chain.first() {
- Some(place) => place.local,
- None => place.local,
- };
-
- let mut result = *self.locals.get(&local).unwrap_or_else(|| {
- bug!("base local ({local:?}) of deref_chain should not be a deref temp")
- });
-
- // this needs to be a closure because `place` has a different lifetime than `prefix`'s places
- let mut subpaths_for_place = |place: PlaceRef<'_>| {
- for elem in place.projection.iter() {
- if let Some(&subpath) = self.projections.get(&(result, elem.lift())) {
- result = subpath;
- } else {
- return Some(result);
- }
- }
- None
- };
-
- for place in deref_chain {
- if let Some(result) = subpaths_for_place(place.as_ref()) {
+ for (_, elem) in self.un_derefer.iter_projections(place) {
+ if let Some(&subpath) = self.projections.get(&(result, elem.lift())) {
+ result = subpath;
+ } else {
return LookupResult::Parent(Some(result));
}
}
- if let Some(result) = subpaths_for_place(place) {
- return LookupResult::Parent(Some(result));
- }
-
LookupResult::Exact(result)
}
+ #[inline]
pub fn find_local(&self, local: Local) -> MovePathIndex {
- let deref_chain = self.deref_chain(Place::from(local).as_ref());
-
- let local = match deref_chain.last() {
- Some(place) => place.local,
- None => local,
- };
-
- *self.locals.get(&local).unwrap_or_else(|| {
- bug!("base local ({local:?}) of deref_chain should not be a deref temp")
- })
+ self.locals[local]
}
/// An enumerated iterator of `local`s and their associated
@@ -372,22 +341,7 @@ impl<'tcx> MovePathLookup<'tcx> {
pub fn iter_locals_enumerated(
&self,
) -> impl DoubleEndedIterator<Item = (Local, MovePathIndex)> + ExactSizeIterator + '_ {
- self.locals.iter().map(|(&l, &idx)| (l, idx))
- }
-
- /// Returns the chain of places behind `DerefTemp` locals in `place`
- pub fn deref_chain(&self, place: PlaceRef<'_>) -> Vec<Place<'tcx>> {
- let mut prefix = Vec::new();
- let mut local = place.local;
-
- while let Some(&reffed) = self.derefer_sidetable.get(&local) {
- prefix.insert(0, reffed);
- local = reffed.local;
- }
-
- debug!("deref_chain({place:?}) = {prefix:?}");
-
- prefix
+ self.locals.iter_enumerated().map(|(l, &idx)| (l, idx))
}
}
diff --git a/compiler/rustc_mir_dataflow/src/rustc_peek.rs b/compiler/rustc_mir_dataflow/src/rustc_peek.rs
index 156231c3a..775c522b4 100644
--- a/compiler/rustc_mir_dataflow/src/rustc_peek.rs
+++ b/compiler/rustc_mir_dataflow/src/rustc_peek.rs
@@ -190,14 +190,14 @@ impl PeekCall {
if let mir::TerminatorKind::Call { func: Operand::Constant(func), args, .. } =
&terminator.kind
{
- if let ty::FnDef(def_id, substs) = *func.literal.ty().kind() {
+ if let ty::FnDef(def_id, fn_args) = *func.literal.ty().kind() {
let name = tcx.item_name(def_id);
if !tcx.is_intrinsic(def_id) || name != sym::rustc_peek {
return None;
}
- assert_eq!(args.len(), 1);
- let kind = PeekCallKind::from_arg_ty(substs.type_at(0));
+ assert_eq!(fn_args.len(), 1);
+ let kind = PeekCallKind::from_arg_ty(fn_args.type_at(0));
let arg = match &args[0] {
Operand::Copy(place) | Operand::Move(place) => {
if let Some(local) = place.as_local() {
diff --git a/compiler/rustc_mir_dataflow/src/un_derefer.rs b/compiler/rustc_mir_dataflow/src/un_derefer.rs
new file mode 100644
index 000000000..874d50ffd
--- /dev/null
+++ b/compiler/rustc_mir_dataflow/src/un_derefer.rs
@@ -0,0 +1,100 @@
+use rustc_data_structures::fx::FxHashMap;
+use rustc_middle::mir::*;
+
+/// Used for reverting changes made by `DerefSeparator`
+#[derive(Default, Debug)]
+pub struct UnDerefer<'tcx> {
+ deref_chains: FxHashMap<Local, Vec<PlaceRef<'tcx>>>,
+}
+
+impl<'tcx> UnDerefer<'tcx> {
+ #[inline]
+ pub fn insert(&mut self, local: Local, reffed: PlaceRef<'tcx>) {
+ let mut chain = self.deref_chains.remove(&reffed.local).unwrap_or_default();
+ chain.push(reffed);
+ self.deref_chains.insert(local, chain);
+ }
+
+ /// Returns the chain of places behind `DerefTemp` locals
+ #[inline]
+ pub fn deref_chain(&self, local: Local) -> &[PlaceRef<'tcx>] {
+ self.deref_chains.get(&local).map(Vec::as_slice).unwrap_or_default()
+ }
+
+ /// Iterates over the projections of a place and its deref chain.
+ ///
+ /// See [`PlaceRef::iter_projections`]
+ #[inline]
+ pub fn iter_projections(
+ &self,
+ place: PlaceRef<'tcx>,
+ ) -> impl Iterator<Item = (PlaceRef<'tcx>, PlaceElem<'tcx>)> + '_ {
+ ProjectionIter::new(self.deref_chain(place.local), place)
+ }
+}
+
+/// The iterator returned by [`UnDerefer::iter_projections`].
+struct ProjectionIter<'a, 'tcx> {
+ places: SlicePlusOne<'a, PlaceRef<'tcx>>,
+ proj_idx: usize,
+}
+
+impl<'a, 'tcx> ProjectionIter<'a, 'tcx> {
+ #[inline]
+ fn new(deref_chain: &'a [PlaceRef<'tcx>], place: PlaceRef<'tcx>) -> Self {
+ // just return an empty iterator for a bare local
+ let last = if place.as_local().is_none() {
+ Some(place)
+ } else {
+ debug_assert!(deref_chain.is_empty());
+ None
+ };
+
+ ProjectionIter { places: SlicePlusOne { slice: deref_chain, last }, proj_idx: 0 }
+ }
+}
+
+impl<'tcx> Iterator for ProjectionIter<'_, 'tcx> {
+ type Item = (PlaceRef<'tcx>, PlaceElem<'tcx>);
+
+ #[inline]
+ fn next(&mut self) -> Option<(PlaceRef<'tcx>, PlaceElem<'tcx>)> {
+ let place = self.places.read()?;
+
+ // the projection should never be empty except for a bare local which is handled in new
+ let partial_place =
+ PlaceRef { local: place.local, projection: &place.projection[..self.proj_idx] };
+ let elem = place.projection[self.proj_idx];
+
+ if self.proj_idx == place.projection.len() - 1 {
+ self.proj_idx = 0;
+ self.places.advance();
+ } else {
+ self.proj_idx += 1;
+ }
+
+ Some((partial_place, elem))
+ }
+}
+
+struct SlicePlusOne<'a, T> {
+ slice: &'a [T],
+ last: Option<T>,
+}
+
+impl<T: Copy> SlicePlusOne<'_, T> {
+ #[inline]
+ fn read(&self) -> Option<T> {
+ self.slice.first().copied().or(self.last)
+ }
+
+ #[inline]
+ fn advance(&mut self) {
+ match self.slice {
+ [_, ref remainder @ ..] => {
+ self.slice = remainder;
+ }
+ [] => self.last = None,
+ }
+ }
+}
diff --git a/compiler/rustc_mir_dataflow/src/value_analysis.rs b/compiler/rustc_mir_dataflow/src/value_analysis.rs
index 5693e5a4a..766e0257e 100644
--- a/compiler/rustc_mir_dataflow/src/value_analysis.rs
+++ b/compiler/rustc_mir_dataflow/src/value_analysis.rs
@@ -47,8 +47,7 @@ use rustc_target::abi::{FieldIdx, VariantIdx};
use crate::lattice::{HasBottom, HasTop};
use crate::{
- fmt::DebugWithContext, Analysis, AnalysisDomain, CallReturnPlaces, JoinSemiLattice,
- SwitchIntEdgeEffects,
+ fmt::DebugWithContext, Analysis, AnalysisDomain, JoinSemiLattice, SwitchIntEdgeEffects,
};
pub trait ValueAnalysis<'tcx> {
@@ -242,11 +241,19 @@ pub trait ValueAnalysis<'tcx> {
/// The effect of a successful function call return should not be
/// applied here, see [`Analysis::apply_terminator_effect`].
- fn handle_terminator(&self, terminator: &Terminator<'tcx>, state: &mut State<Self::Value>) {
+ fn handle_terminator<'mir>(
+ &self,
+ terminator: &'mir Terminator<'tcx>,
+ state: &mut State<Self::Value>,
+ ) -> TerminatorEdges<'mir, 'tcx> {
self.super_terminator(terminator, state)
}
- fn super_terminator(&self, terminator: &Terminator<'tcx>, state: &mut State<Self::Value>) {
+ fn super_terminator<'mir>(
+ &self,
+ terminator: &'mir Terminator<'tcx>,
+ state: &mut State<Self::Value>,
+ ) -> TerminatorEdges<'mir, 'tcx> {
match &terminator.kind {
TerminatorKind::Call { .. } | TerminatorKind::InlineAsm { .. } => {
// Effect is applied by `handle_call_return`.
@@ -258,8 +265,10 @@ pub trait ValueAnalysis<'tcx> {
// They would have an effect, but are not allowed in this phase.
bug!("encountered disallowed terminator");
}
+ TerminatorKind::SwitchInt { discr, targets } => {
+ return self.handle_switch_int(discr, targets, state);
+ }
TerminatorKind::Goto { .. }
- | TerminatorKind::SwitchInt { .. }
| TerminatorKind::Resume
| TerminatorKind::Terminate
| TerminatorKind::Return
@@ -271,6 +280,7 @@ pub trait ValueAnalysis<'tcx> {
// These terminators have no effect on the analysis.
}
}
+ terminator.edges()
}
fn handle_call_return(
@@ -291,19 +301,22 @@ pub trait ValueAnalysis<'tcx> {
})
}
- fn handle_switch_int(
+ fn handle_switch_int<'mir>(
&self,
- discr: &Operand<'tcx>,
- apply_edge_effects: &mut impl SwitchIntEdgeEffects<State<Self::Value>>,
- ) {
- self.super_switch_int(discr, apply_edge_effects)
+ discr: &'mir Operand<'tcx>,
+ targets: &'mir SwitchTargets,
+ state: &mut State<Self::Value>,
+ ) -> TerminatorEdges<'mir, 'tcx> {
+ self.super_switch_int(discr, targets, state)
}
- fn super_switch_int(
+ fn super_switch_int<'mir>(
&self,
- _discr: &Operand<'tcx>,
- _apply_edge_effects: &mut impl SwitchIntEdgeEffects<State<Self::Value>>,
- ) {
+ discr: &'mir Operand<'tcx>,
+ targets: &'mir SwitchTargets,
+ _state: &mut State<Self::Value>,
+ ) -> TerminatorEdges<'mir, 'tcx> {
+ TerminatorEdges::SwitchInt { discr, targets }
}
fn wrap(self) -> ValueAnalysisWrapper<Self>
@@ -353,14 +366,16 @@ where
}
}
- fn apply_terminator_effect(
+ fn apply_terminator_effect<'mir>(
&mut self,
state: &mut Self::Domain,
- terminator: &Terminator<'tcx>,
+ terminator: &'mir Terminator<'tcx>,
_location: Location,
- ) {
+ ) -> TerminatorEdges<'mir, 'tcx> {
if state.is_reachable() {
- self.0.handle_terminator(terminator, state);
+ self.0.handle_terminator(terminator, state)
+ } else {
+ TerminatorEdges::None
}
}
@@ -368,7 +383,7 @@ where
&mut self,
state: &mut Self::Domain,
_block: BasicBlock,
- return_places: crate::CallReturnPlaces<'_, 'tcx>,
+ return_places: CallReturnPlaces<'_, 'tcx>,
) {
if state.is_reachable() {
self.0.handle_call_return(return_places, state)
@@ -378,11 +393,9 @@ where
fn apply_switch_int_edge_effects(
&mut self,
_block: BasicBlock,
- discr: &Operand<'tcx>,
- apply_edge_effects: &mut impl SwitchIntEdgeEffects<Self::Domain>,
+ _discr: &Operand<'tcx>,
+ _apply_edge_effects: &mut impl SwitchIntEdgeEffects<Self::Domain>,
) {
- // FIXME: Dataflow framework provides no access to current state here.
- self.0.handle_switch_int(discr, apply_edge_effects)
}
}
@@ -839,7 +852,7 @@ impl Map {
tail_elem: Option<TrackElem>,
f: &mut impl FnMut(ValueIndex),
) {
- if place.has_deref() {
+ if place.is_indirect_first_projection() {
// We do not track indirect places.
return;
}
@@ -999,14 +1012,14 @@ pub fn iter_fields<'tcx>(
f(None, field.into(), ty);
}
}
- ty::Adt(def, substs) => {
+ ty::Adt(def, args) => {
if def.is_union() {
return;
}
for (v_index, v_def) in def.variants().iter_enumerated() {
let variant = if def.is_struct() { None } else { Some(v_index) };
for (f_index, f_def) in v_def.fields.iter().enumerate() {
- let field_ty = f_def.ty(tcx, substs);
+ let field_ty = f_def.ty(tcx, args);
let field_ty = tcx
.try_normalize_erasing_regions(param_env, field_ty)
.unwrap_or_else(|_| tcx.erase_regions(field_ty));
@@ -1014,8 +1027,8 @@ pub fn iter_fields<'tcx>(
}
}
}
- ty::Closure(_, substs) => {
- iter_fields(substs.as_closure().tupled_upvars_ty(), tcx, param_env, f);
+ ty::Closure(_, args) => {
+ iter_fields(args.as_closure().tupled_upvars_ty(), tcx, param_env, f);
}
_ => (),
}
@@ -1099,10 +1112,10 @@ fn debug_with_context_rec<V: Debug + Eq>(
let info_elem = map.places[child].proj_elem.unwrap();
let child_place_str = match info_elem {
TrackElem::Discriminant => {
- format!("discriminant({})", place_str)
+ format!("discriminant({place_str})")
}
TrackElem::Variant(idx) => {
- format!("({} as {:?})", place_str, idx)
+ format!("({place_str} as {idx:?})")
}
TrackElem::Field(field) => {
if place_str.starts_with('*') {
diff --git a/compiler/rustc_mir_transform/Cargo.toml b/compiler/rustc_mir_transform/Cargo.toml
index eca5f98a2..f1198d9bf 100644
--- a/compiler/rustc_mir_transform/Cargo.toml
+++ b/compiler/rustc_mir_transform/Cargo.toml
@@ -18,6 +18,7 @@ rustc_hir = { path = "../rustc_hir" }
rustc_index = { path = "../rustc_index" }
rustc_middle = { path = "../rustc_middle" }
rustc_const_eval = { path = "../rustc_const_eval" }
+rustc_mir_build = { path = "../rustc_mir_build" }
rustc_mir_dataflow = { path = "../rustc_mir_dataflow" }
rustc_serialize = { path = "../rustc_serialize" }
rustc_session = { path = "../rustc_session" }
diff --git a/compiler/rustc_mir_transform/src/add_retag.rs b/compiler/rustc_mir_transform/src/add_retag.rs
index d9e7339f1..75473ca53 100644
--- a/compiler/rustc_mir_transform/src/add_retag.rs
+++ b/compiler/rustc_mir_transform/src/add_retag.rs
@@ -60,7 +60,7 @@ impl<'tcx> MirPass<'tcx> for AddRetag {
let basic_blocks = body.basic_blocks.as_mut();
let local_decls = &body.local_decls;
let needs_retag = |place: &Place<'tcx>| {
- !place.has_deref() // we're not really interested in stores to "outside" locations, they are hard to keep track of anyway
+ !place.is_indirect_first_projection() // we're not really interested in stores to "outside" locations, they are hard to keep track of anyway
&& may_contain_reference(place.ty(&*local_decls, tcx).ty, /*depth*/ 3, tcx)
&& !local_decls[place.local].is_deref_temp()
};
diff --git a/compiler/rustc_mir_transform/src/check_unsafety.rs b/compiler/rustc_mir_transform/src/check_unsafety.rs
index 70812761e..58e9786ec 100644
--- a/compiler/rustc_mir_transform/src/check_unsafety.rs
+++ b/compiler/rustc_mir_transform/src/check_unsafety.rs
@@ -1,4 +1,4 @@
-use rustc_data_structures::unord::{UnordItems, UnordSet};
+use rustc_data_structures::unord::{ExtendUnord, UnordItems, UnordSet};
use rustc_hir as hir;
use rustc_hir::def::DefKind;
use rustc_hir::def_id::{DefId, LocalDefId};
diff --git a/compiler/rustc_mir_transform/src/const_prop.rs b/compiler/rustc_mir_transform/src/const_prop.rs
index 2f2c7357b..7529ed818 100644
--- a/compiler/rustc_mir_transform/src/const_prop.rs
+++ b/compiler/rustc_mir_transform/src/const_prop.rs
@@ -14,16 +14,15 @@ use rustc_middle::mir::visit::{
};
use rustc_middle::mir::*;
use rustc_middle::ty::layout::{LayoutError, LayoutOf, LayoutOfHelpers, TyAndLayout};
-use rustc_middle::ty::InternalSubsts;
-use rustc_middle::ty::{self, ConstKind, Instance, ParamEnv, Ty, TyCtxt, TypeVisitableExt};
+use rustc_middle::ty::{self, GenericArgs, Instance, ParamEnv, Ty, TyCtxt, TypeVisitableExt};
use rustc_span::{def_id::DefId, Span, DUMMY_SP};
use rustc_target::abi::{self, Align, HasDataLayout, Size, TargetDataLayout};
use rustc_target::spec::abi::Abi as CallAbi;
use crate::MirPass;
use rustc_const_eval::interpret::{
- self, compile_time_machine, AllocId, ConstAllocation, ConstValue, Frame, ImmTy, Immediate,
- InterpCx, InterpResult, LocalValue, MemoryKind, OpTy, PlaceTy, Pointer, Scalar,
+ self, compile_time_machine, AllocId, ConstAllocation, ConstValue, FnArg, Frame, ImmTy,
+ Immediate, InterpCx, InterpResult, LocalValue, MemoryKind, OpTy, PlaceTy, Pointer, Scalar,
StackPopCleanup,
};
@@ -87,7 +86,7 @@ impl<'tcx> MirPass<'tcx> for ConstProp {
return;
}
- let is_generator = tcx.type_of(def_id.to_def_id()).subst_identity().is_generator();
+ let is_generator = tcx.type_of(def_id.to_def_id()).instantiate_identity().is_generator();
// FIXME(welseywiser) const prop doesn't work on generators because of query cycles
// computing their layout.
if is_generator {
@@ -185,7 +184,7 @@ impl<'mir, 'tcx> interpret::Machine<'mir, 'tcx> for ConstPropMachine<'mir, 'tcx>
_ecx: &mut InterpCx<'mir, 'tcx, Self>,
_instance: ty::Instance<'tcx>,
_abi: CallAbi,
- _args: &[OpTy<'tcx>],
+ _args: &[FnArg<'tcx>],
_destination: &PlaceTy<'tcx>,
_target: Option<BasicBlock>,
_unwind: UnwindAction,
@@ -338,7 +337,7 @@ impl<'mir, 'tcx> ConstPropagator<'mir, 'tcx> {
tcx: TyCtxt<'tcx>,
) -> ConstPropagator<'mir, 'tcx> {
let def_id = body.source.def_id();
- let substs = &InternalSubsts::identity_for_item(tcx, def_id);
+ let args = &GenericArgs::identity_for_item(tcx, def_id);
let param_env = tcx.param_env_reveal_all_normalized(def_id);
let can_const_prop = CanConstProp::check(tcx, param_env, body);
@@ -350,7 +349,7 @@ impl<'mir, 'tcx> ConstPropagator<'mir, 'tcx> {
);
let ret_layout = ecx
- .layout_of(body.bound_return_ty().subst(tcx, substs))
+ .layout_of(body.bound_return_ty().instantiate(tcx, args))
.ok()
// Don't bother allocating memory for large values.
// I don't know how return types can seem to be unsized but this happens in the
@@ -366,7 +365,7 @@ impl<'mir, 'tcx> ConstPropagator<'mir, 'tcx> {
.into();
ecx.push_stack_frame(
- Instance::new(def_id, substs),
+ Instance::new(def_id, args),
dummy_body,
&ret,
StackPopCleanup::Root { cleanup: false },
@@ -407,51 +406,9 @@ impl<'mir, 'tcx> ConstPropagator<'mir, 'tcx> {
ecx.machine.written_only_inside_own_block_locals.remove(&local);
}
- /// Returns the value, if any, of evaluating `c`.
- fn eval_constant(&mut self, c: &Constant<'tcx>) -> Option<OpTy<'tcx>> {
- // FIXME we need to revisit this for #67176
- if c.has_param() {
- return None;
- }
-
- // No span, we don't want errors to be shown.
- self.ecx.eval_mir_constant(&c.literal, None, None).ok()
- }
-
- /// Returns the value, if any, of evaluating `place`.
- fn eval_place(&mut self, place: Place<'tcx>) -> Option<OpTy<'tcx>> {
- trace!("eval_place(place={:?})", place);
- self.ecx.eval_place_to_op(place, None).ok()
- }
-
- /// Returns the value, if any, of evaluating `op`. Calls upon `eval_constant`
- /// or `eval_place`, depending on the variant of `Operand` used.
- fn eval_operand(&mut self, op: &Operand<'tcx>) -> Option<OpTy<'tcx>> {
- match *op {
- Operand::Constant(ref c) => self.eval_constant(c),
- Operand::Move(place) | Operand::Copy(place) => self.eval_place(place),
- }
- }
-
fn propagate_operand(&mut self, operand: &mut Operand<'tcx>) {
- match *operand {
- Operand::Copy(l) | Operand::Move(l) => {
- if let Some(value) = self.get_const(l) && self.should_const_prop(&value) {
- // FIXME(felix91gr): this code only handles `Scalar` cases.
- // For now, we're not handling `ScalarPair` cases because
- // doing so here would require a lot of code duplication.
- // We should hopefully generalize `Operand` handling into a fn,
- // and use it to do const-prop here and everywhere else
- // where it makes sense.
- if let interpret::Operand::Immediate(interpret::Immediate::Scalar(
- scalar,
- )) = *value
- {
- *operand = self.operand_from_scalar(scalar, value.layout.ty);
- }
- }
- }
- Operand::Constant(_) => (),
+ if let Some(place) = operand.place() && let Some(op) = self.replace_with_const(place) {
+ *operand = op;
}
}
@@ -579,93 +536,45 @@ impl<'mir, 'tcx> ConstPropagator<'mir, 'tcx> {
}))
}
- fn replace_with_const(&mut self, place: Place<'tcx>, rval: &mut Rvalue<'tcx>) {
+ fn replace_with_const(&mut self, place: Place<'tcx>) -> Option<Operand<'tcx>> {
// This will return None if the above `const_prop` invocation only "wrote" a
// type whose creation requires no write. E.g. a generator whose initial state
// consists solely of uninitialized memory (so it doesn't capture any locals).
- let Some(ref value) = self.get_const(place) else { return };
- if !self.should_const_prop(value) {
- return;
- }
- trace!("replacing {:?}={:?} with {:?}", place, rval, value);
-
- if let Rvalue::Use(Operand::Constant(c)) = rval {
- match c.literal {
- ConstantKind::Ty(c) if matches!(c.kind(), ConstKind::Unevaluated(..)) => {}
- _ => {
- trace!("skipping replace of Rvalue::Use({:?} because it is already a const", c);
- return;
- }
- }
+ let value = self.get_const(place)?;
+ if !self.tcx.consider_optimizing(|| format!("ConstantPropagation - {value:?}")) {
+ return None;
}
+ trace!("replacing {:?} with {:?}", place, value);
- trace!("attempting to replace {:?} with {:?}", rval, value);
// FIXME> figure out what to do when read_immediate_raw fails
- let imm = self.ecx.read_immediate_raw(value).ok();
+ let imm = self.ecx.read_immediate_raw(&value).ok()?;
- if let Some(Right(imm)) = imm {
- match *imm {
- interpret::Immediate::Scalar(scalar) => {
- *rval = Rvalue::Use(self.operand_from_scalar(scalar, value.layout.ty));
- }
- Immediate::ScalarPair(..) => {
- // Found a value represented as a pair. For now only do const-prop if the type
- // of `rvalue` is also a tuple with two scalars.
- // FIXME: enable the general case stated above ^.
- let ty = value.layout.ty;
- // Only do it for tuples
- if let ty::Tuple(types) = ty.kind() {
- // Only do it if tuple is also a pair with two scalars
- if let [ty1, ty2] = types[..] {
- let ty_is_scalar = |ty| {
- self.ecx.layout_of(ty).ok().map(|layout| layout.abi.is_scalar())
- == Some(true)
- };
- let alloc = if ty_is_scalar(ty1) && ty_is_scalar(ty2) {
- let alloc = self
- .ecx
- .intern_with_temp_alloc(value.layout, |ecx, dest| {
- ecx.write_immediate(*imm, dest)
- })
- .unwrap();
- Some(alloc)
- } else {
- None
- };
-
- if let Some(alloc) = alloc {
- // Assign entire constant in a single statement.
- // We can't use aggregates, as we run after the aggregate-lowering `MirPhase`.
- let const_val = ConstValue::ByRef { alloc, offset: Size::ZERO };
- let literal = ConstantKind::Val(const_val, ty);
- *rval = Rvalue::Use(Operand::Constant(Box::new(Constant {
- span: DUMMY_SP,
- user_ty: None,
- literal,
- })));
- }
- }
- }
- }
- // Scalars or scalar pairs that contain undef values are assumed to not have
- // successfully evaluated and are thus not propagated.
- _ => {}
+ let Right(imm) = imm else { return None };
+ match *imm {
+ Immediate::Scalar(scalar) if scalar.try_to_int().is_ok() => {
+ Some(self.operand_from_scalar(scalar, value.layout.ty))
}
- }
- }
-
- /// Returns `true` if and only if this `op` should be const-propagated into.
- fn should_const_prop(&mut self, op: &OpTy<'tcx>) -> bool {
- if !self.tcx.consider_optimizing(|| format!("ConstantPropagation - OpTy: {:?}", op)) {
- return false;
- }
-
- match **op {
- interpret::Operand::Immediate(Immediate::Scalar(s)) => s.try_to_int().is_ok(),
- interpret::Operand::Immediate(Immediate::ScalarPair(l, r)) => {
- l.try_to_int().is_ok() && r.try_to_int().is_ok()
+ Immediate::ScalarPair(l, r) if l.try_to_int().is_ok() && r.try_to_int().is_ok() => {
+ let alloc = self
+ .ecx
+ .intern_with_temp_alloc(value.layout, |ecx, dest| {
+ ecx.write_immediate(*imm, dest)
+ })
+ .ok()?;
+
+ let literal = ConstantKind::Val(
+ ConstValue::ByRef { alloc, offset: Size::ZERO },
+ value.layout.ty,
+ );
+ Some(Operand::Constant(Box::new(Constant {
+ span: DUMMY_SP,
+ user_ty: None,
+ literal,
+ })))
}
- _ => false,
+ // Scalars or scalar pairs that contain undef values are assumed to not have
+ // successfully evaluated and are thus not propagated.
+ _ => None,
}
}
@@ -810,12 +719,7 @@ impl<'tcx> MutVisitor<'tcx> for ConstPropagator<'_, 'tcx> {
fn visit_operand(&mut self, operand: &mut Operand<'tcx>, location: Location) {
self.super_operand(operand, location);
-
- // Only const prop copies and moves on `mir_opt_level=3` as doing so
- // currently slightly increases compile time in some cases.
- if self.tcx.sess.mir_opt_level() >= 3 {
- self.propagate_operand(operand)
- }
+ self.propagate_operand(operand)
}
fn process_projection_elem(
@@ -825,8 +729,7 @@ impl<'tcx> MutVisitor<'tcx> for ConstPropagator<'_, 'tcx> {
) -> Option<PlaceElem<'tcx>> {
if let PlaceElem::Index(local) = elem
&& let Some(value) = self.get_const(local.into())
- && self.should_const_prop(&value)
- && let interpret::Operand::Immediate(interpret::Immediate::Scalar(scalar)) = *value
+ && let interpret::Operand::Immediate(Immediate::Scalar(scalar)) = *value
&& let Ok(offset) = scalar.to_target_usize(&self.tcx)
&& let Some(min_length) = offset.checked_add(1)
{
@@ -852,7 +755,14 @@ impl<'tcx> MutVisitor<'tcx> for ConstPropagator<'_, 'tcx> {
ConstPropMode::NoPropagation => self.ensure_not_propagated(place.local),
ConstPropMode::OnlyInsideOwnBlock | ConstPropMode::FullConstProp => {
if let Some(()) = self.eval_rvalue_with_identities(rvalue, *place) {
- self.replace_with_const(*place, rvalue);
+ // If this was already an evaluated constant, keep it.
+ if let Rvalue::Use(Operand::Constant(c)) = rvalue
+ && let ConstantKind::Val(..) = c.literal
+ {
+ trace!("skipping replace of Rvalue::Use({:?} because it is already a const", c);
+ } else if let Some(operand) = self.replace_with_const(*place) {
+ *rvalue = Rvalue::Use(operand);
+ }
} else {
// Const prop failed, so erase the destination, ensuring that whatever happens
// from here on, does not know about the previous value.
@@ -919,45 +829,6 @@ impl<'tcx> MutVisitor<'tcx> for ConstPropagator<'_, 'tcx> {
}
}
- fn visit_terminator(&mut self, terminator: &mut Terminator<'tcx>, location: Location) {
- self.super_terminator(terminator, location);
-
- match &mut terminator.kind {
- TerminatorKind::Assert { expected, ref mut cond, .. } => {
- if let Some(ref value) = self.eval_operand(&cond)
- && let Ok(value_const) = self.ecx.read_scalar(&value)
- && self.should_const_prop(value)
- {
- trace!("assertion on {:?} should be {:?}", value, expected);
- *cond = self.operand_from_scalar(value_const, self.tcx.types.bool);
- }
- }
- TerminatorKind::SwitchInt { ref mut discr, .. } => {
- // FIXME: This is currently redundant with `visit_operand`, but sadly
- // always visiting operands currently causes a perf regression in LLVM codegen, so
- // `visit_operand` currently only runs for propagates places for `mir_opt_level=4`.
- self.propagate_operand(discr)
- }
- // None of these have Operands to const-propagate.
- TerminatorKind::Goto { .. }
- | TerminatorKind::Resume
- | TerminatorKind::Terminate
- | TerminatorKind::Return
- | TerminatorKind::Unreachable
- | TerminatorKind::Drop { .. }
- | TerminatorKind::Yield { .. }
- | TerminatorKind::GeneratorDrop
- | TerminatorKind::FalseEdge { .. }
- | TerminatorKind::FalseUnwind { .. }
- | TerminatorKind::InlineAsm { .. } => {}
- // Every argument in our function calls have already been propagated in `visit_operand`.
- //
- // NOTE: because LLVM codegen gives slight performance regressions with it, so this is
- // gated on `mir_opt_level=3`.
- TerminatorKind::Call { .. } => {}
- }
- }
-
fn visit_basic_block_data(&mut self, block: BasicBlock, data: &mut BasicBlockData<'tcx>) {
self.super_basic_block_data(block, data);
diff --git a/compiler/rustc_mir_transform/src/const_prop_lint.rs b/compiler/rustc_mir_transform/src/const_prop_lint.rs
index 759650fe4..ac07c2576 100644
--- a/compiler/rustc_mir_transform/src/const_prop_lint.rs
+++ b/compiler/rustc_mir_transform/src/const_prop_lint.rs
@@ -16,7 +16,7 @@ use rustc_index::bit_set::BitSet;
use rustc_middle::mir::visit::Visitor;
use rustc_middle::mir::*;
use rustc_middle::ty::layout::{LayoutError, LayoutOf, LayoutOfHelpers, TyAndLayout};
-use rustc_middle::ty::InternalSubsts;
+use rustc_middle::ty::GenericArgs;
use rustc_middle::ty::{
self, ConstInt, Instance, ParamEnv, ScalarInt, Ty, TyCtxt, TypeVisitableExt,
};
@@ -55,7 +55,7 @@ impl<'tcx> MirLint<'tcx> for ConstProp {
return;
}
- let is_generator = tcx.type_of(def_id.to_def_id()).subst_identity().is_generator();
+ let is_generator = tcx.type_of(def_id.to_def_id()).instantiate_identity().is_generator();
// FIXME(welseywiser) const prop doesn't work on generators because of query cycles
// computing their layout.
if is_generator {
@@ -171,7 +171,7 @@ impl<'mir, 'tcx> ConstPropagator<'mir, 'tcx> {
tcx: TyCtxt<'tcx>,
) -> ConstPropagator<'mir, 'tcx> {
let def_id = body.source.def_id();
- let substs = &InternalSubsts::identity_for_item(tcx, def_id);
+ let args = &GenericArgs::identity_for_item(tcx, def_id);
let param_env = tcx.param_env_reveal_all_normalized(def_id);
let can_const_prop = CanConstProp::check(tcx, param_env, body);
@@ -183,7 +183,7 @@ impl<'mir, 'tcx> ConstPropagator<'mir, 'tcx> {
);
let ret_layout = ecx
- .layout_of(body.bound_return_ty().subst(tcx, substs))
+ .layout_of(body.bound_return_ty().instantiate(tcx, args))
.ok()
// Don't bother allocating memory for large values.
// I don't know how return types can seem to be unsized but this happens in the
@@ -199,7 +199,7 @@ impl<'mir, 'tcx> ConstPropagator<'mir, 'tcx> {
.into();
ecx.push_stack_frame(
- Instance::new(def_id, substs),
+ Instance::new(def_id, args),
dummy_body,
&ret,
StackPopCleanup::Root { cleanup: false },
@@ -494,7 +494,7 @@ impl<'mir, 'tcx> ConstPropagator<'mir, 'tcx> {
trace!("assertion on {:?} should be {:?}", value, expected);
let expected = Scalar::from_bool(expected);
- let value_const = self.use_ecx(location, |this| this.ecx.read_scalar(&value))?;
+ let value_const = self.use_ecx(location, |this| this.ecx.read_scalar(value))?;
if expected != value_const {
// Poison all places this operand references so that further code
@@ -664,7 +664,7 @@ impl<'tcx> Visitor<'tcx> for ConstPropagator<'_, 'tcx> {
}
TerminatorKind::SwitchInt { ref discr, ref targets } => {
if let Some(ref value) = self.eval_operand(&discr, location)
- && let Some(value_const) = self.use_ecx(location, |this| this.ecx.read_scalar(&value))
+ && let Some(value_const) = self.use_ecx(location, |this| this.ecx.read_scalar(value))
&& let Ok(constant) = value_const.try_to_int()
&& let Ok(constant) = constant.to_bits(constant.size())
{
diff --git a/compiler/rustc_mir_transform/src/copy_prop.rs b/compiler/rustc_mir_transform/src/copy_prop.rs
index 3df459dfa..9a3798eea 100644
--- a/compiler/rustc_mir_transform/src/copy_prop.rs
+++ b/compiler/rustc_mir_transform/src/copy_prop.rs
@@ -76,9 +76,11 @@ fn fully_moved_locals(ssa: &SsaLocals, body: &Body<'_>) -> BitSet<Local> {
let mut fully_moved = BitSet::new_filled(body.local_decls.len());
for (_, rvalue, _) in ssa.assignments(body) {
- let (Rvalue::Use(Operand::Copy(place) | Operand::Move(place)) | Rvalue::CopyForDeref(place))
- = rvalue
- else { continue };
+ let (Rvalue::Use(Operand::Copy(place) | Operand::Move(place))
+ | Rvalue::CopyForDeref(place)) = rvalue
+ else {
+ continue;
+ };
let Some(rhs) = place.as_local() else { continue };
if !ssa.is_ssa(rhs) {
@@ -152,7 +154,7 @@ impl<'tcx> MutVisitor<'tcx> for Replacer<'_, 'tcx> {
fn visit_operand(&mut self, operand: &mut Operand<'tcx>, loc: Location) {
if let Operand::Move(place) = *operand
// A move out of a projection of a copy is equivalent to a copy of the original projection.
- && !place.has_deref()
+ && !place.is_indirect_first_projection()
&& !self.fully_moved.contains(place.local)
{
*operand = Operand::Copy(place);
diff --git a/compiler/rustc_mir_transform/src/coverage/counters.rs b/compiler/rustc_mir_transform/src/coverage/counters.rs
index 658e01d93..3d442e5dc 100644
--- a/compiler/rustc_mir_transform/src/coverage/counters.rs
+++ b/compiler/rustc_mir_transform/src/coverage/counters.rs
@@ -8,55 +8,116 @@ use debug::{DebugCounters, NESTED_INDENT};
use graph::{BasicCoverageBlock, BcbBranch, CoverageGraph, TraverseCoverageGraphWithLoops};
use spans::CoverageSpan;
+use rustc_data_structures::fx::FxHashMap;
use rustc_data_structures::graph::WithNumNodes;
use rustc_index::bit_set::BitSet;
+use rustc_index::IndexVec;
use rustc_middle::mir::coverage::*;
-/// Manages the counter and expression indexes/IDs to generate `CoverageKind` components for MIR
-/// `Coverage` statements.
+use std::fmt::{self, Debug};
+
+/// The coverage counter or counter expression associated with a particular
+/// BCB node or BCB edge.
+#[derive(Clone)]
+pub(super) enum BcbCounter {
+ Counter { id: CounterId },
+ Expression { id: ExpressionId, lhs: Operand, op: Op, rhs: Operand },
+}
+
+impl BcbCounter {
+ fn is_expression(&self) -> bool {
+ matches!(self, Self::Expression { .. })
+ }
+
+ pub(super) fn as_operand(&self) -> Operand {
+ match *self {
+ BcbCounter::Counter { id, .. } => Operand::Counter(id),
+ BcbCounter::Expression { id, .. } => Operand::Expression(id),
+ }
+ }
+}
+
+impl Debug for BcbCounter {
+ fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result {
+ match self {
+ Self::Counter { id, .. } => write!(fmt, "Counter({:?})", id.index()),
+ Self::Expression { id, lhs, op, rhs } => write!(
+ fmt,
+ "Expression({:?}) = {:?} {} {:?}",
+ id.index(),
+ lhs,
+ match op {
+ Op::Add => "+",
+ Op::Subtract => "-",
+ },
+ rhs,
+ ),
+ }
+ }
+}
+
+/// Generates and stores coverage counter and coverage expression information
+/// associated with nodes/edges in the BCB graph.
pub(super) struct CoverageCounters {
- function_source_hash: u64,
- next_counter_id: u32,
- num_expressions: u32,
+ next_counter_id: CounterId,
+ next_expression_id: ExpressionId,
+
+ /// Coverage counters/expressions that are associated with individual BCBs.
+ bcb_counters: IndexVec<BasicCoverageBlock, Option<BcbCounter>>,
+ /// Coverage counters/expressions that are associated with the control-flow
+ /// edge between two BCBs.
+ bcb_edge_counters: FxHashMap<(BasicCoverageBlock, BasicCoverageBlock), BcbCounter>,
+ /// Tracks which BCBs have a counter associated with some incoming edge.
+ /// Only used by debug assertions, to verify that BCBs with incoming edge
+ /// counters do not have their own physical counters (expressions are allowed).
+ bcb_has_incoming_edge_counters: BitSet<BasicCoverageBlock>,
+ /// Expression nodes that are not directly associated with any particular
+ /// BCB/edge, but are needed as operands to more complex expressions.
+ /// These are always [`BcbCounter::Expression`].
+ pub(super) intermediate_expressions: Vec<BcbCounter>,
+
pub debug_counters: DebugCounters,
}
impl CoverageCounters {
- pub fn new(function_source_hash: u64) -> Self {
+ pub(super) fn new(basic_coverage_blocks: &CoverageGraph) -> Self {
+ let num_bcbs = basic_coverage_blocks.num_nodes();
+
Self {
- function_source_hash,
- next_counter_id: CounterValueReference::START.as_u32(),
- num_expressions: 0,
+ next_counter_id: CounterId::START,
+ next_expression_id: ExpressionId::START,
+
+ bcb_counters: IndexVec::from_elem_n(None, num_bcbs),
+ bcb_edge_counters: FxHashMap::default(),
+ bcb_has_incoming_edge_counters: BitSet::new_empty(num_bcbs),
+ intermediate_expressions: Vec::new(),
+
debug_counters: DebugCounters::new(),
}
}
/// Activate the `DebugCounters` data structures, to provide additional debug formatting
- /// features when formatting `CoverageKind` (counter) values.
+ /// features when formatting [`BcbCounter`] (counter) values.
pub fn enable_debug(&mut self) {
self.debug_counters.enable();
}
- /// Makes `CoverageKind` `Counter`s and `Expressions` for the `BasicCoverageBlock`s directly or
- /// indirectly associated with `CoverageSpans`, and returns additional `Expression`s
+ /// Makes [`BcbCounter`] `Counter`s and `Expressions` for the `BasicCoverageBlock`s directly or
+ /// indirectly associated with `CoverageSpans`, and accumulates additional `Expression`s
/// representing intermediate values.
pub fn make_bcb_counters(
&mut self,
- basic_coverage_blocks: &mut CoverageGraph,
+ basic_coverage_blocks: &CoverageGraph,
coverage_spans: &[CoverageSpan],
- ) -> Result<Vec<CoverageKind>, Error> {
- let mut bcb_counters = BcbCounters::new(self, basic_coverage_blocks);
- bcb_counters.make_bcb_counters(coverage_spans)
+ ) -> Result<(), Error> {
+ MakeBcbCounters::new(self, basic_coverage_blocks).make_bcb_counters(coverage_spans)
}
- fn make_counter<F>(&mut self, debug_block_label_fn: F) -> CoverageKind
+ fn make_counter<F>(&mut self, debug_block_label_fn: F) -> BcbCounter
where
F: Fn() -> Option<String>,
{
- let counter = CoverageKind::Counter {
- function_source_hash: self.function_source_hash,
- id: self.next_counter(),
- };
+ let counter = BcbCounter::Counter { id: self.next_counter() };
if self.debug_counters.is_enabled() {
self.debug_counters.add_counter(&counter, (debug_block_label_fn)());
}
@@ -65,49 +126,120 @@ impl CoverageCounters {
fn make_expression<F>(
&mut self,
- lhs: ExpressionOperandId,
+ lhs: Operand,
op: Op,
- rhs: ExpressionOperandId,
+ rhs: Operand,
debug_block_label_fn: F,
- ) -> CoverageKind
+ ) -> BcbCounter
where
F: Fn() -> Option<String>,
{
let id = self.next_expression();
- let expression = CoverageKind::Expression { id, lhs, op, rhs };
+ let expression = BcbCounter::Expression { id, lhs, op, rhs };
if self.debug_counters.is_enabled() {
self.debug_counters.add_counter(&expression, (debug_block_label_fn)());
}
expression
}
- pub fn make_identity_counter(&mut self, counter_operand: ExpressionOperandId) -> CoverageKind {
+ pub fn make_identity_counter(&mut self, counter_operand: Operand) -> BcbCounter {
let some_debug_block_label = if self.debug_counters.is_enabled() {
self.debug_counters.some_block_label(counter_operand).cloned()
} else {
None
};
- self.make_expression(counter_operand, Op::Add, ExpressionOperandId::ZERO, || {
+ self.make_expression(counter_operand, Op::Add, Operand::Zero, || {
some_debug_block_label.clone()
})
}
/// Counter IDs start from one and go up.
- fn next_counter(&mut self) -> CounterValueReference {
- assert!(self.next_counter_id < u32::MAX - self.num_expressions);
+ fn next_counter(&mut self) -> CounterId {
let next = self.next_counter_id;
- self.next_counter_id += 1;
- CounterValueReference::from(next)
+ self.next_counter_id = next.next_id();
+ next
+ }
+
+ /// Expression IDs start from 0 and go up.
+ /// (Counter IDs and Expression IDs are distinguished by the `Operand` enum.)
+ fn next_expression(&mut self) -> ExpressionId {
+ let next = self.next_expression_id;
+ self.next_expression_id = next.next_id();
+ next
+ }
+
+ fn set_bcb_counter(
+ &mut self,
+ bcb: BasicCoverageBlock,
+ counter_kind: BcbCounter,
+ ) -> Result<Operand, Error> {
+ debug_assert!(
+ // If the BCB has an edge counter (to be injected into a new `BasicBlock`), it can also
+ // have an expression (to be injected into an existing `BasicBlock` represented by this
+ // `BasicCoverageBlock`).
+ counter_kind.is_expression() || !self.bcb_has_incoming_edge_counters.contains(bcb),
+ "attempt to add a `Counter` to a BCB target with existing incoming edge counters"
+ );
+ let operand = counter_kind.as_operand();
+ if let Some(replaced) = self.bcb_counters[bcb].replace(counter_kind) {
+ Error::from_string(format!(
+ "attempt to set a BasicCoverageBlock coverage counter more than once; \
+ {bcb:?} already had counter {replaced:?}",
+ ))
+ } else {
+ Ok(operand)
+ }
+ }
+
+ fn set_bcb_edge_counter(
+ &mut self,
+ from_bcb: BasicCoverageBlock,
+ to_bcb: BasicCoverageBlock,
+ counter_kind: BcbCounter,
+ ) -> Result<Operand, Error> {
+ if level_enabled!(tracing::Level::DEBUG) {
+ // If the BCB has an edge counter (to be injected into a new `BasicBlock`), it can also
+ // have an expression (to be injected into an existing `BasicBlock` represented by this
+ // `BasicCoverageBlock`).
+ if self.bcb_counter(to_bcb).is_some_and(|c| !c.is_expression()) {
+ return Error::from_string(format!(
+ "attempt to add an incoming edge counter from {from_bcb:?} when the target BCB already \
+ has a `Counter`"
+ ));
+ }
+ }
+ self.bcb_has_incoming_edge_counters.insert(to_bcb);
+ let operand = counter_kind.as_operand();
+ if let Some(replaced) = self.bcb_edge_counters.insert((from_bcb, to_bcb), counter_kind) {
+ Error::from_string(format!(
+ "attempt to set an edge counter more than once; from_bcb: \
+ {from_bcb:?} already had counter {replaced:?}",
+ ))
+ } else {
+ Ok(operand)
+ }
}
- /// Expression IDs start from u32::MAX and go down because an Expression can reference
- /// (add or subtract counts) of both Counter regions and Expression regions. The counter
- /// expression operand IDs must be unique across both types.
- fn next_expression(&mut self) -> InjectedExpressionId {
- assert!(self.next_counter_id < u32::MAX - self.num_expressions);
- let next = u32::MAX - self.num_expressions;
- self.num_expressions += 1;
- InjectedExpressionId::from(next)
+ pub(super) fn bcb_counter(&self, bcb: BasicCoverageBlock) -> Option<&BcbCounter> {
+ self.bcb_counters[bcb].as_ref()
+ }
+
+ pub(super) fn take_bcb_counter(&mut self, bcb: BasicCoverageBlock) -> Option<BcbCounter> {
+ self.bcb_counters[bcb].take()
+ }
+
+ pub(super) fn drain_bcb_counters(
+ &mut self,
+ ) -> impl Iterator<Item = (BasicCoverageBlock, BcbCounter)> + '_ {
+ self.bcb_counters
+ .iter_enumerated_mut()
+ .filter_map(|(bcb, counter)| Some((bcb, counter.take()?)))
+ }
+
+ pub(super) fn drain_bcb_edge_counters(
+ &mut self,
+ ) -> impl Iterator<Item = ((BasicCoverageBlock, BasicCoverageBlock), BcbCounter)> + '_ {
+ self.bcb_edge_counters.drain()
}
}
@@ -115,15 +247,15 @@ impl CoverageCounters {
/// injected with `CoverageSpan`s. `Expressions` have no runtime overhead, so if a viable expression
/// (adding or subtracting two other counters or expressions) can compute the same result as an
/// embedded counter, an `Expression` should be used.
-struct BcbCounters<'a> {
+struct MakeBcbCounters<'a> {
coverage_counters: &'a mut CoverageCounters,
- basic_coverage_blocks: &'a mut CoverageGraph,
+ basic_coverage_blocks: &'a CoverageGraph,
}
-impl<'a> BcbCounters<'a> {
+impl<'a> MakeBcbCounters<'a> {
fn new(
coverage_counters: &'a mut CoverageCounters,
- basic_coverage_blocks: &'a mut CoverageGraph,
+ basic_coverage_blocks: &'a CoverageGraph,
) -> Self {
Self { coverage_counters, basic_coverage_blocks }
}
@@ -138,13 +270,9 @@ impl<'a> BcbCounters<'a> {
/// Returns any non-code-span expressions created to represent intermediate values (such as to
/// add two counters so the result can be subtracted from another counter), or an Error with
/// message for subsequent debugging.
- fn make_bcb_counters(
- &mut self,
- coverage_spans: &[CoverageSpan],
- ) -> Result<Vec<CoverageKind>, Error> {
+ fn make_bcb_counters(&mut self, coverage_spans: &[CoverageSpan]) -> Result<(), Error> {
debug!("make_bcb_counters(): adding a counter or expression to each BasicCoverageBlock");
let num_bcbs = self.basic_coverage_blocks.num_nodes();
- let mut collect_intermediate_expressions = Vec::with_capacity(num_bcbs);
let mut bcbs_with_coverage = BitSet::new_empty(num_bcbs);
for covspan in coverage_spans {
@@ -165,16 +293,10 @@ impl<'a> BcbCounters<'a> {
while let Some(bcb) = traversal.next(self.basic_coverage_blocks) {
if bcbs_with_coverage.contains(bcb) {
debug!("{:?} has at least one `CoverageSpan`. Get or make its counter", bcb);
- let branching_counter_operand =
- self.get_or_make_counter_operand(bcb, &mut collect_intermediate_expressions)?;
+ let branching_counter_operand = self.get_or_make_counter_operand(bcb)?;
if self.bcb_needs_branch_counters(bcb) {
- self.make_branch_counters(
- &mut traversal,
- bcb,
- branching_counter_operand,
- &mut collect_intermediate_expressions,
- )?;
+ self.make_branch_counters(&mut traversal, bcb, branching_counter_operand)?;
}
} else {
debug!(
@@ -186,7 +308,7 @@ impl<'a> BcbCounters<'a> {
}
if traversal.is_complete() {
- Ok(collect_intermediate_expressions)
+ Ok(())
} else {
Error::from_string(format!(
"`TraverseCoverageGraphWithLoops` missed some `BasicCoverageBlock`s: {:?}",
@@ -199,8 +321,7 @@ impl<'a> BcbCounters<'a> {
&mut self,
traversal: &mut TraverseCoverageGraphWithLoops,
branching_bcb: BasicCoverageBlock,
- branching_counter_operand: ExpressionOperandId,
- collect_intermediate_expressions: &mut Vec<CoverageKind>,
+ branching_counter_operand: Operand,
) -> Result<(), Error> {
let branches = self.bcb_branches(branching_bcb);
debug!(
@@ -208,9 +329,7 @@ impl<'a> BcbCounters<'a> {
branching_bcb,
branches
.iter()
- .map(|branch| {
- format!("{:?}: {:?}", branch, branch.counter(&self.basic_coverage_blocks))
- })
+ .map(|branch| { format!("{:?}: {:?}", branch, self.branch_counter(branch)) })
.collect::<Vec<_>>()
.join("\n "),
);
@@ -236,17 +355,10 @@ impl<'a> BcbCounters<'a> {
counter",
branch, branching_bcb
);
- self.get_or_make_counter_operand(
- branch.target_bcb,
- collect_intermediate_expressions,
- )?
+ self.get_or_make_counter_operand(branch.target_bcb)?
} else {
debug!(" {:?} has multiple incoming edges, so adding an edge counter", branch);
- self.get_or_make_edge_counter_operand(
- branching_bcb,
- branch.target_bcb,
- collect_intermediate_expressions,
- )?
+ self.get_or_make_edge_counter_operand(branching_bcb, branch.target_bcb)?
};
if let Some(sumup_counter_operand) =
some_sumup_counter_operand.replace(branch_counter_operand)
@@ -261,8 +373,8 @@ impl<'a> BcbCounters<'a> {
" [new intermediate expression: {}]",
self.format_counter(&intermediate_expression)
);
- let intermediate_expression_operand = intermediate_expression.as_operand_id();
- collect_intermediate_expressions.push(intermediate_expression);
+ let intermediate_expression_operand = intermediate_expression.as_operand();
+ self.coverage_counters.intermediate_expressions.push(intermediate_expression);
some_sumup_counter_operand.replace(intermediate_expression_operand);
}
}
@@ -282,41 +394,36 @@ impl<'a> BcbCounters<'a> {
branching_counter_operand,
Op::Subtract,
sumup_counter_operand,
- || Some(format!("{:?}", expression_branch)),
+ || Some(format!("{expression_branch:?}")),
);
debug!("{:?} gets an expression: {}", expression_branch, self.format_counter(&expression));
let bcb = expression_branch.target_bcb;
if expression_branch.is_only_path_to_target() {
- self.basic_coverage_blocks[bcb].set_counter(expression)?;
+ self.coverage_counters.set_bcb_counter(bcb, expression)?;
} else {
- self.basic_coverage_blocks[bcb].set_edge_counter_from(branching_bcb, expression)?;
+ self.coverage_counters.set_bcb_edge_counter(branching_bcb, bcb, expression)?;
}
Ok(())
}
- fn get_or_make_counter_operand(
- &mut self,
- bcb: BasicCoverageBlock,
- collect_intermediate_expressions: &mut Vec<CoverageKind>,
- ) -> Result<ExpressionOperandId, Error> {
- self.recursive_get_or_make_counter_operand(bcb, collect_intermediate_expressions, 1)
+ fn get_or_make_counter_operand(&mut self, bcb: BasicCoverageBlock) -> Result<Operand, Error> {
+ self.recursive_get_or_make_counter_operand(bcb, 1)
}
fn recursive_get_or_make_counter_operand(
&mut self,
bcb: BasicCoverageBlock,
- collect_intermediate_expressions: &mut Vec<CoverageKind>,
debug_indent_level: usize,
- ) -> Result<ExpressionOperandId, Error> {
+ ) -> Result<Operand, Error> {
// If the BCB already has a counter, return it.
- if let Some(counter_kind) = self.basic_coverage_blocks[bcb].counter() {
+ if let Some(counter_kind) = &self.coverage_counters.bcb_counters[bcb] {
debug!(
"{}{:?} already has a counter: {}",
NESTED_INDENT.repeat(debug_indent_level),
bcb,
self.format_counter(counter_kind),
);
- return Ok(counter_kind.as_operand_id());
+ return Ok(counter_kind.as_operand());
}
// A BCB with only one incoming edge gets a simple `Counter` (via `make_counter()`).
@@ -324,7 +431,7 @@ impl<'a> BcbCounters<'a> {
// program results in a tight infinite loop, but it should still compile.
let one_path_to_target = self.bcb_has_one_path_to_target(bcb);
if one_path_to_target || self.bcb_predecessors(bcb).contains(&bcb) {
- let counter_kind = self.coverage_counters.make_counter(|| Some(format!("{:?}", bcb)));
+ let counter_kind = self.coverage_counters.make_counter(|| Some(format!("{bcb:?}")));
if one_path_to_target {
debug!(
"{}{:?} gets a new counter: {}",
@@ -342,7 +449,7 @@ impl<'a> BcbCounters<'a> {
self.format_counter(&counter_kind),
);
}
- return self.basic_coverage_blocks[bcb].set_counter(counter_kind);
+ return self.coverage_counters.set_bcb_counter(bcb, counter_kind);
}
// A BCB with multiple incoming edges can compute its count by `Expression`, summing up the
@@ -358,7 +465,6 @@ impl<'a> BcbCounters<'a> {
let first_edge_counter_operand = self.recursive_get_or_make_edge_counter_operand(
predecessors.next().unwrap(),
bcb,
- collect_intermediate_expressions,
debug_indent_level + 1,
)?;
let mut some_sumup_edge_counter_operand = None;
@@ -366,7 +472,6 @@ impl<'a> BcbCounters<'a> {
let edge_counter_operand = self.recursive_get_or_make_edge_counter_operand(
predecessor,
bcb,
- collect_intermediate_expressions,
debug_indent_level + 1,
)?;
if let Some(sumup_edge_counter_operand) =
@@ -383,8 +488,8 @@ impl<'a> BcbCounters<'a> {
NESTED_INDENT.repeat(debug_indent_level),
self.format_counter(&intermediate_expression)
);
- let intermediate_expression_operand = intermediate_expression.as_operand_id();
- collect_intermediate_expressions.push(intermediate_expression);
+ let intermediate_expression_operand = intermediate_expression.as_operand();
+ self.coverage_counters.intermediate_expressions.push(intermediate_expression);
some_sumup_edge_counter_operand.replace(intermediate_expression_operand);
}
}
@@ -392,7 +497,7 @@ impl<'a> BcbCounters<'a> {
first_edge_counter_operand,
Op::Add,
some_sumup_edge_counter_operand.unwrap(),
- || Some(format!("{:?}", bcb)),
+ || Some(format!("{bcb:?}")),
);
debug!(
"{}{:?} gets a new counter (sum of predecessor counters): {}",
@@ -400,43 +505,34 @@ impl<'a> BcbCounters<'a> {
bcb,
self.format_counter(&counter_kind)
);
- self.basic_coverage_blocks[bcb].set_counter(counter_kind)
+ self.coverage_counters.set_bcb_counter(bcb, counter_kind)
}
fn get_or_make_edge_counter_operand(
&mut self,
from_bcb: BasicCoverageBlock,
to_bcb: BasicCoverageBlock,
- collect_intermediate_expressions: &mut Vec<CoverageKind>,
- ) -> Result<ExpressionOperandId, Error> {
- self.recursive_get_or_make_edge_counter_operand(
- from_bcb,
- to_bcb,
- collect_intermediate_expressions,
- 1,
- )
+ ) -> Result<Operand, Error> {
+ self.recursive_get_or_make_edge_counter_operand(from_bcb, to_bcb, 1)
}
fn recursive_get_or_make_edge_counter_operand(
&mut self,
from_bcb: BasicCoverageBlock,
to_bcb: BasicCoverageBlock,
- collect_intermediate_expressions: &mut Vec<CoverageKind>,
debug_indent_level: usize,
- ) -> Result<ExpressionOperandId, Error> {
+ ) -> Result<Operand, Error> {
// If the source BCB has only one successor (assumed to be the given target), an edge
// counter is unnecessary. Just get or make a counter for the source BCB.
let successors = self.bcb_successors(from_bcb).iter();
if successors.len() == 1 {
- return self.recursive_get_or_make_counter_operand(
- from_bcb,
- collect_intermediate_expressions,
- debug_indent_level + 1,
- );
+ return self.recursive_get_or_make_counter_operand(from_bcb, debug_indent_level + 1);
}
// If the edge already has a counter, return it.
- if let Some(counter_kind) = self.basic_coverage_blocks[to_bcb].edge_counter_from(from_bcb) {
+ if let Some(counter_kind) =
+ self.coverage_counters.bcb_edge_counters.get(&(from_bcb, to_bcb))
+ {
debug!(
"{}Edge {:?}->{:?} already has a counter: {}",
NESTED_INDENT.repeat(debug_indent_level),
@@ -444,12 +540,12 @@ impl<'a> BcbCounters<'a> {
to_bcb,
self.format_counter(counter_kind)
);
- return Ok(counter_kind.as_operand_id());
+ return Ok(counter_kind.as_operand());
}
// Make a new counter to count this edge.
let counter_kind =
- self.coverage_counters.make_counter(|| Some(format!("{:?}->{:?}", from_bcb, to_bcb)));
+ self.coverage_counters.make_counter(|| Some(format!("{from_bcb:?}->{to_bcb:?}")));
debug!(
"{}Edge {:?}->{:?} gets a new counter: {}",
NESTED_INDENT.repeat(debug_indent_level),
@@ -457,7 +553,7 @@ impl<'a> BcbCounters<'a> {
to_bcb,
self.format_counter(&counter_kind)
);
- self.basic_coverage_blocks[to_bcb].set_edge_counter_from(from_bcb, counter_kind)
+ self.coverage_counters.set_bcb_edge_counter(from_bcb, to_bcb, counter_kind)
}
/// Select a branch for the expression, either the recommended `reloop_branch`, or if none was
@@ -467,8 +563,7 @@ impl<'a> BcbCounters<'a> {
traversal: &TraverseCoverageGraphWithLoops,
branches: &[BcbBranch],
) -> BcbBranch {
- let branch_needs_a_counter =
- |branch: &BcbBranch| branch.counter(&self.basic_coverage_blocks).is_none();
+ let branch_needs_a_counter = |branch: &BcbBranch| self.branch_has_no_counter(branch);
let some_reloop_branch = self.find_some_reloop_branch(traversal, &branches);
if let Some(reloop_branch_without_counter) =
@@ -481,10 +576,8 @@ impl<'a> BcbCounters<'a> {
);
reloop_branch_without_counter
} else {
- let &branch_without_counter = branches
- .iter()
- .find(|&&branch| branch.counter(&self.basic_coverage_blocks).is_none())
- .expect(
+ let &branch_without_counter =
+ branches.iter().find(|&branch| self.branch_has_no_counter(branch)).expect(
"needs_branch_counters was `true` so there should be at least one \
branch",
);
@@ -511,8 +604,7 @@ impl<'a> BcbCounters<'a> {
traversal: &TraverseCoverageGraphWithLoops,
branches: &[BcbBranch],
) -> Option<BcbBranch> {
- let branch_needs_a_counter =
- |branch: &BcbBranch| branch.counter(&self.basic_coverage_blocks).is_none();
+ let branch_needs_a_counter = |branch: &BcbBranch| self.branch_has_no_counter(branch);
let mut some_reloop_branch: Option<BcbBranch> = None;
for context in traversal.context_stack.iter().rev() {
@@ -523,7 +615,7 @@ impl<'a> BcbCounters<'a> {
self.bcb_dominates(branch.target_bcb, backedge_from_bcb)
}) {
if let Some(reloop_branch) = some_reloop_branch {
- if reloop_branch.counter(&self.basic_coverage_blocks).is_none() {
+ if self.branch_has_no_counter(&reloop_branch) {
// we already found a candidate reloop_branch that still
// needs a counter
continue;
@@ -589,12 +681,24 @@ impl<'a> BcbCounters<'a> {
}
fn bcb_needs_branch_counters(&self, bcb: BasicCoverageBlock) -> bool {
- let branch_needs_a_counter =
- |branch: &BcbBranch| branch.counter(&self.basic_coverage_blocks).is_none();
+ let branch_needs_a_counter = |branch: &BcbBranch| self.branch_has_no_counter(branch);
let branches = self.bcb_branches(bcb);
branches.len() > 1 && branches.iter().any(branch_needs_a_counter)
}
+ fn branch_has_no_counter(&self, branch: &BcbBranch) -> bool {
+ self.branch_counter(branch).is_none()
+ }
+
+ fn branch_counter(&self, branch: &BcbBranch) -> Option<&BcbCounter> {
+ let to_bcb = branch.target_bcb;
+ if let Some(from_bcb) = branch.edge_from_bcb {
+ self.coverage_counters.bcb_edge_counters.get(&(from_bcb, to_bcb))
+ } else {
+ self.coverage_counters.bcb_counters[to_bcb].as_ref()
+ }
+ }
+
/// Returns true if the BasicCoverageBlock has zero or one incoming edge. (If zero, it should be
/// the entry point for the function.)
#[inline]
@@ -608,7 +712,7 @@ impl<'a> BcbCounters<'a> {
}
#[inline]
- fn format_counter(&self, counter_kind: &CoverageKind) -> String {
+ fn format_counter(&self, counter_kind: &BcbCounter) -> String {
self.coverage_counters.debug_counters.format_counter(counter_kind)
}
}
diff --git a/compiler/rustc_mir_transform/src/coverage/debug.rs b/compiler/rustc_mir_transform/src/coverage/debug.rs
index 7ad981441..af616c498 100644
--- a/compiler/rustc_mir_transform/src/coverage/debug.rs
+++ b/compiler/rustc_mir_transform/src/coverage/debug.rs
@@ -108,6 +108,7 @@
//! recursively, generating labels with nested operations, enclosed in parentheses
//! (for example: `bcb2 + (bcb0 - bcb1)`).
+use super::counters::{BcbCounter, CoverageCounters};
use super::graph::{BasicCoverageBlock, BasicCoverageBlockData, CoverageGraph};
use super::spans::CoverageSpan;
@@ -198,9 +199,9 @@ impl DebugOptions {
fn bool_option_val(option: &str, some_strval: Option<&str>) -> bool {
if let Some(val) = some_strval {
- if vec!["yes", "y", "on", "true"].contains(&val) {
+ if ["yes", "y", "on", "true"].contains(&val) {
true
- } else if vec!["no", "n", "off", "false"].contains(&val) {
+ } else if ["no", "n", "off", "false"].contains(&val) {
false
} else {
bug!(
@@ -246,11 +247,11 @@ impl Default for ExpressionFormat {
}
}
-/// If enabled, this struct maintains a map from `CoverageKind` IDs (as `ExpressionOperandId`) to
-/// the `CoverageKind` data and optional label (normally, the counter's associated
+/// If enabled, this struct maintains a map from `BcbCounter` IDs (as `Operand`) to
+/// the `BcbCounter` data and optional label (normally, the counter's associated
/// `BasicCoverageBlock` format string, if any).
///
-/// Use `format_counter` to convert one of these `CoverageKind` counters to a debug output string,
+/// Use `format_counter` to convert one of these `BcbCounter` counters to a debug output string,
/// as directed by the `DebugOptions`. This allows the format of counter labels in logs and dump
/// files (including the `CoverageGraph` graphviz file) to be changed at runtime, via environment
/// variable.
@@ -258,7 +259,7 @@ impl Default for ExpressionFormat {
/// `DebugCounters` supports a recursive rendering of `Expression` counters, so they can be
/// presented as nested expressions such as `(bcb3 - (bcb0 + bcb1))`.
pub(super) struct DebugCounters {
- some_counters: Option<FxHashMap<ExpressionOperandId, DebugCounter>>,
+ some_counters: Option<FxHashMap<Operand, DebugCounter>>,
}
impl DebugCounters {
@@ -275,36 +276,35 @@ impl DebugCounters {
self.some_counters.is_some()
}
- pub fn add_counter(&mut self, counter_kind: &CoverageKind, some_block_label: Option<String>) {
+ pub fn add_counter(&mut self, counter_kind: &BcbCounter, some_block_label: Option<String>) {
if let Some(counters) = &mut self.some_counters {
- let id = counter_kind.as_operand_id();
+ let id = counter_kind.as_operand();
counters
.try_insert(id, DebugCounter::new(counter_kind.clone(), some_block_label))
.expect("attempt to add the same counter_kind to DebugCounters more than once");
}
}
- pub fn some_block_label(&self, operand: ExpressionOperandId) -> Option<&String> {
+ pub fn some_block_label(&self, operand: Operand) -> Option<&String> {
self.some_counters.as_ref().and_then(|counters| {
counters.get(&operand).and_then(|debug_counter| debug_counter.some_block_label.as_ref())
})
}
- pub fn format_counter(&self, counter_kind: &CoverageKind) -> String {
+ pub fn format_counter(&self, counter_kind: &BcbCounter) -> String {
match *counter_kind {
- CoverageKind::Counter { .. } => {
+ BcbCounter::Counter { .. } => {
format!("Counter({})", self.format_counter_kind(counter_kind))
}
- CoverageKind::Expression { .. } => {
+ BcbCounter::Expression { .. } => {
format!("Expression({})", self.format_counter_kind(counter_kind))
}
- CoverageKind::Unreachable { .. } => "Unreachable".to_owned(),
}
}
- fn format_counter_kind(&self, counter_kind: &CoverageKind) -> String {
+ fn format_counter_kind(&self, counter_kind: &BcbCounter) -> String {
let counter_format = &debug_options().counter_format;
- if let CoverageKind::Expression { id, lhs, op, rhs } = *counter_kind {
+ if let BcbCounter::Expression { id, lhs, op, rhs } = *counter_kind {
if counter_format.operation {
return format!(
"{}{} {} {}",
@@ -323,29 +323,29 @@ impl DebugCounters {
}
}
- let id = counter_kind.as_operand_id();
+ let id = counter_kind.as_operand();
if self.some_counters.is_some() && (counter_format.block || !counter_format.id) {
let counters = self.some_counters.as_ref().unwrap();
if let Some(DebugCounter { some_block_label: Some(block_label), .. }) =
counters.get(&id)
{
return if counter_format.id {
- format!("{}#{}", block_label, id.index())
+ format!("{}#{:?}", block_label, id)
} else {
block_label.to_string()
};
}
}
- format!("#{}", id.index())
+ format!("#{:?}", id)
}
- fn format_operand(&self, operand: ExpressionOperandId) -> String {
- if operand.index() == 0 {
+ fn format_operand(&self, operand: Operand) -> String {
+ if matches!(operand, Operand::Zero) {
return String::from("0");
}
if let Some(counters) = &self.some_counters {
if let Some(DebugCounter { counter_kind, some_block_label }) = counters.get(&operand) {
- if let CoverageKind::Expression { .. } = counter_kind {
+ if let BcbCounter::Expression { .. } = counter_kind {
if let Some(label) = some_block_label && debug_options().counter_format.block {
return format!(
"{}:({})",
@@ -358,19 +358,19 @@ impl DebugCounters {
return self.format_counter_kind(counter_kind);
}
}
- format!("#{}", operand.index())
+ format!("#{:?}", operand)
}
}
/// A non-public support class to `DebugCounters`.
#[derive(Debug)]
struct DebugCounter {
- counter_kind: CoverageKind,
+ counter_kind: BcbCounter,
some_block_label: Option<String>,
}
impl DebugCounter {
- fn new(counter_kind: CoverageKind, some_block_label: Option<String>) -> Self {
+ fn new(counter_kind: BcbCounter, some_block_label: Option<String>) -> Self {
Self { counter_kind, some_block_label }
}
}
@@ -379,9 +379,9 @@ impl DebugCounter {
/// a Graphviz (.dot file) representation of the `CoverageGraph`, for debugging purposes.
pub(super) struct GraphvizData {
some_bcb_to_coverage_spans_with_counters:
- Option<FxHashMap<BasicCoverageBlock, Vec<(CoverageSpan, CoverageKind)>>>,
- some_bcb_to_dependency_counters: Option<FxHashMap<BasicCoverageBlock, Vec<CoverageKind>>>,
- some_edge_to_counter: Option<FxHashMap<(BasicCoverageBlock, BasicBlock), CoverageKind>>,
+ Option<FxHashMap<BasicCoverageBlock, Vec<(CoverageSpan, BcbCounter)>>>,
+ some_bcb_to_dependency_counters: Option<FxHashMap<BasicCoverageBlock, Vec<BcbCounter>>>,
+ some_edge_to_counter: Option<FxHashMap<(BasicCoverageBlock, BasicBlock), BcbCounter>>,
}
impl GraphvizData {
@@ -408,7 +408,7 @@ impl GraphvizData {
&mut self,
bcb: BasicCoverageBlock,
coverage_span: &CoverageSpan,
- counter_kind: &CoverageKind,
+ counter_kind: &BcbCounter,
) {
if let Some(bcb_to_coverage_spans_with_counters) =
self.some_bcb_to_coverage_spans_with_counters.as_mut()
@@ -423,7 +423,7 @@ impl GraphvizData {
pub fn get_bcb_coverage_spans_with_counters(
&self,
bcb: BasicCoverageBlock,
- ) -> Option<&[(CoverageSpan, CoverageKind)]> {
+ ) -> Option<&[(CoverageSpan, BcbCounter)]> {
if let Some(bcb_to_coverage_spans_with_counters) =
self.some_bcb_to_coverage_spans_with_counters.as_ref()
{
@@ -436,7 +436,7 @@ impl GraphvizData {
pub fn add_bcb_dependency_counter(
&mut self,
bcb: BasicCoverageBlock,
- counter_kind: &CoverageKind,
+ counter_kind: &BcbCounter,
) {
if let Some(bcb_to_dependency_counters) = self.some_bcb_to_dependency_counters.as_mut() {
bcb_to_dependency_counters
@@ -446,7 +446,7 @@ impl GraphvizData {
}
}
- pub fn get_bcb_dependency_counters(&self, bcb: BasicCoverageBlock) -> Option<&[CoverageKind]> {
+ pub fn get_bcb_dependency_counters(&self, bcb: BasicCoverageBlock) -> Option<&[BcbCounter]> {
if let Some(bcb_to_dependency_counters) = self.some_bcb_to_dependency_counters.as_ref() {
bcb_to_dependency_counters.get(&bcb).map(Deref::deref)
} else {
@@ -458,7 +458,7 @@ impl GraphvizData {
&mut self,
from_bcb: BasicCoverageBlock,
to_bb: BasicBlock,
- counter_kind: &CoverageKind,
+ counter_kind: &BcbCounter,
) {
if let Some(edge_to_counter) = self.some_edge_to_counter.as_mut() {
edge_to_counter
@@ -471,7 +471,7 @@ impl GraphvizData {
&self,
from_bcb: BasicCoverageBlock,
to_bb: BasicBlock,
- ) -> Option<&CoverageKind> {
+ ) -> Option<&BcbCounter> {
if let Some(edge_to_counter) = self.some_edge_to_counter.as_ref() {
edge_to_counter.get(&(from_bcb, to_bb))
} else {
@@ -485,10 +485,9 @@ impl GraphvizData {
/// _not_ used are retained in the `unused_expressions` Vec, to be included in debug output (logs
/// and/or a `CoverageGraph` graphviz output).
pub(super) struct UsedExpressions {
- some_used_expression_operands:
- Option<FxHashMap<ExpressionOperandId, Vec<InjectedExpressionId>>>,
+ some_used_expression_operands: Option<FxHashMap<Operand, Vec<ExpressionId>>>,
some_unused_expressions:
- Option<Vec<(CoverageKind, Option<BasicCoverageBlock>, BasicCoverageBlock)>>,
+ Option<Vec<(BcbCounter, Option<BasicCoverageBlock>, BasicCoverageBlock)>>,
}
impl UsedExpressions {
@@ -506,18 +505,18 @@ impl UsedExpressions {
self.some_used_expression_operands.is_some()
}
- pub fn add_expression_operands(&mut self, expression: &CoverageKind) {
+ pub fn add_expression_operands(&mut self, expression: &BcbCounter) {
if let Some(used_expression_operands) = self.some_used_expression_operands.as_mut() {
- if let CoverageKind::Expression { id, lhs, rhs, .. } = *expression {
+ if let BcbCounter::Expression { id, lhs, rhs, .. } = *expression {
used_expression_operands.entry(lhs).or_insert_with(Vec::new).push(id);
used_expression_operands.entry(rhs).or_insert_with(Vec::new).push(id);
}
}
}
- pub fn expression_is_used(&self, expression: &CoverageKind) -> bool {
+ pub fn expression_is_used(&self, expression: &BcbCounter) -> bool {
if let Some(used_expression_operands) = self.some_used_expression_operands.as_ref() {
- used_expression_operands.contains_key(&expression.as_operand_id())
+ used_expression_operands.contains_key(&expression.as_operand())
} else {
false
}
@@ -525,12 +524,12 @@ impl UsedExpressions {
pub fn add_unused_expression_if_not_found(
&mut self,
- expression: &CoverageKind,
+ expression: &BcbCounter,
edge_from_bcb: Option<BasicCoverageBlock>,
target_bcb: BasicCoverageBlock,
) {
if let Some(used_expression_operands) = self.some_used_expression_operands.as_ref() {
- if !used_expression_operands.contains_key(&expression.as_operand_id()) {
+ if !used_expression_operands.contains_key(&expression.as_operand()) {
self.some_unused_expressions.as_mut().unwrap().push((
expression.clone(),
edge_from_bcb,
@@ -540,11 +539,11 @@ impl UsedExpressions {
}
}
- /// Return the list of unused counters (if any) as a tuple with the counter (`CoverageKind`),
+ /// Return the list of unused counters (if any) as a tuple with the counter (`BcbCounter`),
/// optional `from_bcb` (if it was an edge counter), and `target_bcb`.
pub fn get_unused_expressions(
&self,
- ) -> Vec<(CoverageKind, Option<BasicCoverageBlock>, BasicCoverageBlock)> {
+ ) -> Vec<(BcbCounter, Option<BasicCoverageBlock>, BasicCoverageBlock)> {
if let Some(unused_expressions) = self.some_unused_expressions.as_ref() {
unused_expressions.clone()
} else {
@@ -560,7 +559,7 @@ impl UsedExpressions {
bcb_counters_without_direct_coverage_spans: &[(
Option<BasicCoverageBlock>,
BasicCoverageBlock,
- CoverageKind,
+ BcbCounter,
)],
) {
if self.is_enabled() {
@@ -630,7 +629,7 @@ pub(super) fn dump_coverage_spanview<'tcx>(
.expect("Unexpected error creating MIR spanview HTML file");
let crate_name = tcx.crate_name(def_id.krate);
let item_name = tcx.def_path(def_id).to_filename_friendly_no_crate();
- let title = format!("{}.{} - Coverage Spans", crate_name, item_name);
+ let title = format!("{crate_name}.{item_name} - Coverage Spans");
spanview::write_document(tcx, body_span, span_viewables, &title, &mut file)
.expect("Unexpected IO error dumping coverage spans as HTML");
}
@@ -660,18 +659,21 @@ pub(super) fn dump_coverage_graphviz<'tcx>(
mir_body: &mir::Body<'tcx>,
pass_name: &str,
basic_coverage_blocks: &CoverageGraph,
- debug_counters: &DebugCounters,
+ coverage_counters: &CoverageCounters,
graphviz_data: &GraphvizData,
- intermediate_expressions: &[CoverageKind],
+ intermediate_expressions: &[BcbCounter],
debug_used_expressions: &UsedExpressions,
) {
+ let debug_counters = &coverage_counters.debug_counters;
+
let mir_source = mir_body.source;
let def_id = mir_source.def_id();
let node_content = |bcb| {
bcb_to_string_sections(
tcx,
mir_body,
- debug_counters,
+ coverage_counters,
+ bcb,
&basic_coverage_blocks[bcb],
graphviz_data.get_bcb_coverage_spans_with_counters(bcb),
graphviz_data.get_bcb_dependency_counters(bcb),
@@ -737,12 +739,15 @@ pub(super) fn dump_coverage_graphviz<'tcx>(
fn bcb_to_string_sections<'tcx>(
tcx: TyCtxt<'tcx>,
mir_body: &mir::Body<'tcx>,
- debug_counters: &DebugCounters,
+ coverage_counters: &CoverageCounters,
+ bcb: BasicCoverageBlock,
bcb_data: &BasicCoverageBlockData,
- some_coverage_spans_with_counters: Option<&[(CoverageSpan, CoverageKind)]>,
- some_dependency_counters: Option<&[CoverageKind]>,
- some_intermediate_expressions: Option<&[CoverageKind]>,
+ some_coverage_spans_with_counters: Option<&[(CoverageSpan, BcbCounter)]>,
+ some_dependency_counters: Option<&[BcbCounter]>,
+ some_intermediate_expressions: Option<&[BcbCounter]>,
) -> Vec<String> {
+ let debug_counters = &coverage_counters.debug_counters;
+
let len = bcb_data.basic_blocks.len();
let mut sections = Vec::new();
if let Some(collect_intermediate_expressions) = some_intermediate_expressions {
@@ -778,8 +783,8 @@ fn bcb_to_string_sections<'tcx>(
.join(" \n"),
));
}
- if let Some(counter_kind) = &bcb_data.counter_kind {
- sections.push(format!("{:?}", counter_kind));
+ if let Some(counter_kind) = coverage_counters.bcb_counter(bcb) {
+ sections.push(format!("{counter_kind:?}"));
}
let non_term_blocks = bcb_data.basic_blocks[0..len - 1]
.iter()
diff --git a/compiler/rustc_mir_transform/src/coverage/graph.rs b/compiler/rustc_mir_transform/src/coverage/graph.rs
index d2a854b26..59b01ffec 100644
--- a/compiler/rustc_mir_transform/src/coverage/graph.rs
+++ b/compiler/rustc_mir_transform/src/coverage/graph.rs
@@ -1,12 +1,8 @@
-use super::Error;
-
use itertools::Itertools;
-use rustc_data_structures::fx::FxHashMap;
use rustc_data_structures::graph::dominators::{self, Dominators};
use rustc_data_structures::graph::{self, GraphSuccessors, WithNumNodes, WithStartNode};
use rustc_index::bit_set::BitSet;
use rustc_index::{IndexSlice, IndexVec};
-use rustc_middle::mir::coverage::*;
use rustc_middle::mir::{self, BasicBlock, BasicBlockData, Terminator, TerminatorKind};
use std::cmp::Ordering;
@@ -15,10 +11,7 @@ use std::ops::{Index, IndexMut};
const ID_SEPARATOR: &str = ",";
/// A coverage-specific simplification of the MIR control flow graph (CFG). The `CoverageGraph`s
-/// nodes are `BasicCoverageBlock`s, which encompass one or more MIR `BasicBlock`s, plus a
-/// `CoverageKind` counter (to be added by `CoverageCounters::make_bcb_counters`), and an optional
-/// set of additional counters--if needed--to count incoming edges, if there are more than one.
-/// (These "edge counters" are eventually converted into new MIR `BasicBlock`s.)
+/// nodes are `BasicCoverageBlock`s, which encompass one or more MIR `BasicBlock`s.
#[derive(Debug)]
pub(super) struct CoverageGraph {
bcbs: IndexVec<BasicCoverageBlock, BasicCoverageBlockData>,
@@ -196,13 +189,6 @@ impl CoverageGraph {
}
#[inline(always)]
- pub fn iter_enumerated_mut(
- &mut self,
- ) -> impl Iterator<Item = (BasicCoverageBlock, &mut BasicCoverageBlockData)> {
- self.bcbs.iter_enumerated_mut()
- }
-
- #[inline(always)]
pub fn bcb_from_bb(&self, bb: BasicBlock) -> Option<BasicCoverageBlock> {
if bb.index() < self.bb_to_bcb.len() { self.bb_to_bcb[bb] } else { None }
}
@@ -320,14 +306,12 @@ rustc_index::newtype_index! {
#[derive(Debug, Clone)]
pub(super) struct BasicCoverageBlockData {
pub basic_blocks: Vec<BasicBlock>,
- pub counter_kind: Option<CoverageKind>,
- edge_from_bcbs: Option<FxHashMap<BasicCoverageBlock, CoverageKind>>,
}
impl BasicCoverageBlockData {
pub fn from(basic_blocks: Vec<BasicBlock>) -> Self {
assert!(basic_blocks.len() > 0);
- Self { basic_blocks, counter_kind: None, edge_from_bcbs: None }
+ Self { basic_blocks }
}
#[inline(always)]
@@ -345,86 +329,6 @@ impl BasicCoverageBlockData {
&mir_body[self.last_bb()].terminator()
}
- pub fn set_counter(
- &mut self,
- counter_kind: CoverageKind,
- ) -> Result<ExpressionOperandId, Error> {
- debug_assert!(
- // If the BCB has an edge counter (to be injected into a new `BasicBlock`), it can also
- // have an expression (to be injected into an existing `BasicBlock` represented by this
- // `BasicCoverageBlock`).
- self.edge_from_bcbs.is_none() || counter_kind.is_expression(),
- "attempt to add a `Counter` to a BCB target with existing incoming edge counters"
- );
- let operand = counter_kind.as_operand_id();
- if let Some(replaced) = self.counter_kind.replace(counter_kind) {
- Error::from_string(format!(
- "attempt to set a BasicCoverageBlock coverage counter more than once; \
- {:?} already had counter {:?}",
- self, replaced,
- ))
- } else {
- Ok(operand)
- }
- }
-
- #[inline(always)]
- pub fn counter(&self) -> Option<&CoverageKind> {
- self.counter_kind.as_ref()
- }
-
- #[inline(always)]
- pub fn take_counter(&mut self) -> Option<CoverageKind> {
- self.counter_kind.take()
- }
-
- pub fn set_edge_counter_from(
- &mut self,
- from_bcb: BasicCoverageBlock,
- counter_kind: CoverageKind,
- ) -> Result<ExpressionOperandId, Error> {
- if level_enabled!(tracing::Level::DEBUG) {
- // If the BCB has an edge counter (to be injected into a new `BasicBlock`), it can also
- // have an expression (to be injected into an existing `BasicBlock` represented by this
- // `BasicCoverageBlock`).
- if self.counter_kind.as_ref().is_some_and(|c| !c.is_expression()) {
- return Error::from_string(format!(
- "attempt to add an incoming edge counter from {:?} when the target BCB already \
- has a `Counter`",
- from_bcb
- ));
- }
- }
- let operand = counter_kind.as_operand_id();
- if let Some(replaced) =
- self.edge_from_bcbs.get_or_insert_default().insert(from_bcb, counter_kind)
- {
- Error::from_string(format!(
- "attempt to set an edge counter more than once; from_bcb: \
- {:?} already had counter {:?}",
- from_bcb, replaced,
- ))
- } else {
- Ok(operand)
- }
- }
-
- #[inline]
- pub fn edge_counter_from(&self, from_bcb: BasicCoverageBlock) -> Option<&CoverageKind> {
- if let Some(edge_from_bcbs) = &self.edge_from_bcbs {
- edge_from_bcbs.get(&from_bcb)
- } else {
- None
- }
- }
-
- #[inline]
- pub fn take_edge_counters(
- &mut self,
- ) -> Option<impl Iterator<Item = (BasicCoverageBlock, CoverageKind)>> {
- self.edge_from_bcbs.take().map(|m| m.into_iter())
- }
-
pub fn id(&self) -> String {
format!("@{}", self.basic_blocks.iter().map(|bb| bb.index().to_string()).join(ID_SEPARATOR))
}
@@ -454,17 +358,6 @@ impl BcbBranch {
Self { edge_from_bcb, target_bcb: to_bcb }
}
- pub fn counter<'a>(
- &self,
- basic_coverage_blocks: &'a CoverageGraph,
- ) -> Option<&'a CoverageKind> {
- if let Some(from_bcb) = self.edge_from_bcb {
- basic_coverage_blocks[self.target_bcb].edge_counter_from(from_bcb)
- } else {
- basic_coverage_blocks[self.target_bcb].counter()
- }
- }
-
pub fn is_only_path_to_target(&self) -> bool {
self.edge_from_bcb.is_none()
}
@@ -612,7 +505,7 @@ impl TraverseCoverageGraphWithLoops {
the {}",
successor_to_add,
if let Some(loop_header) = some_loop_header {
- format!("worklist for the loop headed by {:?}", loop_header)
+ format!("worklist for the loop headed by {loop_header:?}")
} else {
String::from("non-loop worklist")
},
@@ -623,7 +516,7 @@ impl TraverseCoverageGraphWithLoops {
"{:?} successor is non-branching. Defer it to the end of the {}",
successor_to_add,
if let Some(loop_header) = some_loop_header {
- format!("worklist for the loop headed by {:?}", loop_header)
+ format!("worklist for the loop headed by {loop_header:?}")
} else {
String::from("non-loop worklist")
},
diff --git a/compiler/rustc_mir_transform/src/coverage/mod.rs b/compiler/rustc_mir_transform/src/coverage/mod.rs
index 076e714d7..8c9eae508 100644
--- a/compiler/rustc_mir_transform/src/coverage/mod.rs
+++ b/compiler/rustc_mir_transform/src/coverage/mod.rs
@@ -8,9 +8,9 @@ mod spans;
#[cfg(test)]
mod tests;
-use counters::CoverageCounters;
-use graph::{BasicCoverageBlock, BasicCoverageBlockData, CoverageGraph};
-use spans::{CoverageSpan, CoverageSpans};
+use self::counters::{BcbCounter, CoverageCounters};
+use self::graph::{BasicCoverageBlock, BasicCoverageBlockData, CoverageGraph};
+use self::spans::{CoverageSpan, CoverageSpans};
use crate::MirPass;
@@ -106,6 +106,7 @@ struct Instrumentor<'a, 'tcx> {
source_file: Lrc<SourceFile>,
fn_sig_span: Span,
body_span: Span,
+ function_source_hash: u64,
basic_coverage_blocks: CoverageGraph,
coverage_counters: CoverageCounters,
}
@@ -137,6 +138,8 @@ impl<'a, 'tcx> Instrumentor<'a, 'tcx> {
let function_source_hash = hash_mir_source(tcx, hir_body);
let basic_coverage_blocks = CoverageGraph::from_mir(mir_body);
+ let coverage_counters = CoverageCounters::new(&basic_coverage_blocks);
+
Self {
pass_name,
tcx,
@@ -144,8 +147,9 @@ impl<'a, 'tcx> Instrumentor<'a, 'tcx> {
source_file,
fn_sig_span,
body_span,
+ function_source_hash,
basic_coverage_blocks,
- coverage_counters: CoverageCounters::new(function_source_hash),
+ coverage_counters,
}
}
@@ -199,52 +203,47 @@ impl<'a, 'tcx> Instrumentor<'a, 'tcx> {
// `BasicCoverageBlock`s not already associated with a `CoverageSpan`.
//
// Intermediate expressions (used to compute other `Expression` values), which have no
- // direct associate to any `BasicCoverageBlock`, are returned in the method `Result`.
- let intermediate_expressions_or_error = self
+ // direct association with any `BasicCoverageBlock`, are accumulated inside `coverage_counters`.
+ let result = self
.coverage_counters
.make_bcb_counters(&mut self.basic_coverage_blocks, &coverage_spans);
- let (result, intermediate_expressions) = match intermediate_expressions_or_error {
- Ok(intermediate_expressions) => {
- // If debugging, add any intermediate expressions (which are not associated with any
- // BCB) to the `debug_used_expressions` map.
- if debug_used_expressions.is_enabled() {
- for intermediate_expression in &intermediate_expressions {
- debug_used_expressions.add_expression_operands(intermediate_expression);
- }
+ if let Ok(()) = result {
+ // If debugging, add any intermediate expressions (which are not associated with any
+ // BCB) to the `debug_used_expressions` map.
+ if debug_used_expressions.is_enabled() {
+ for intermediate_expression in &self.coverage_counters.intermediate_expressions {
+ debug_used_expressions.add_expression_operands(intermediate_expression);
}
-
- ////////////////////////////////////////////////////
- // Remove the counter or edge counter from of each `CoverageSpan`s associated
- // `BasicCoverageBlock`, and inject a `Coverage` statement into the MIR.
- //
- // `Coverage` statements injected from `CoverageSpan`s will include the code regions
- // (source code start and end positions) to be counted by the associated counter.
- //
- // These `CoverageSpan`-associated counters are removed from their associated
- // `BasicCoverageBlock`s so that the only remaining counters in the `CoverageGraph`
- // are indirect counters (to be injected next, without associated code regions).
- self.inject_coverage_span_counters(
- coverage_spans,
- &mut graphviz_data,
- &mut debug_used_expressions,
- );
-
- ////////////////////////////////////////////////////
- // For any remaining `BasicCoverageBlock` counters (that were not associated with
- // any `CoverageSpan`), inject `Coverage` statements (_without_ code region `Span`s)
- // to ensure `BasicCoverageBlock` counters that other `Expression`s may depend on
- // are in fact counted, even though they don't directly contribute to counting
- // their own independent code region's coverage.
- self.inject_indirect_counters(&mut graphviz_data, &mut debug_used_expressions);
-
- // Intermediate expressions will be injected as the final step, after generating
- // debug output, if any.
- ////////////////////////////////////////////////////
-
- (Ok(()), intermediate_expressions)
}
- Err(e) => (Err(e), Vec::new()),
+
+ ////////////////////////////////////////////////////
+ // Remove the counter or edge counter from of each `CoverageSpan`s associated
+ // `BasicCoverageBlock`, and inject a `Coverage` statement into the MIR.
+ //
+ // `Coverage` statements injected from `CoverageSpan`s will include the code regions
+ // (source code start and end positions) to be counted by the associated counter.
+ //
+ // These `CoverageSpan`-associated counters are removed from their associated
+ // `BasicCoverageBlock`s so that the only remaining counters in the `CoverageGraph`
+ // are indirect counters (to be injected next, without associated code regions).
+ self.inject_coverage_span_counters(
+ coverage_spans,
+ &mut graphviz_data,
+ &mut debug_used_expressions,
+ );
+
+ ////////////////////////////////////////////////////
+ // For any remaining `BasicCoverageBlock` counters (that were not associated with
+ // any `CoverageSpan`), inject `Coverage` statements (_without_ code region `Span`s)
+ // to ensure `BasicCoverageBlock` counters that other `Expression`s may depend on
+ // are in fact counted, even though they don't directly contribute to counting
+ // their own independent code region's coverage.
+ self.inject_indirect_counters(&mut graphviz_data, &mut debug_used_expressions);
+
+ // Intermediate expressions will be injected as the final step, after generating
+ // debug output, if any.
+ ////////////////////////////////////////////////////
};
if graphviz_data.is_enabled() {
@@ -255,9 +254,9 @@ impl<'a, 'tcx> Instrumentor<'a, 'tcx> {
self.mir_body,
self.pass_name,
&self.basic_coverage_blocks,
- &self.coverage_counters.debug_counters,
+ &self.coverage_counters,
&graphviz_data,
- &intermediate_expressions,
+ &self.coverage_counters.intermediate_expressions,
&debug_used_expressions,
);
}
@@ -273,8 +272,11 @@ impl<'a, 'tcx> Instrumentor<'a, 'tcx> {
////////////////////////////////////////////////////
// Finally, inject the intermediate expressions collected along the way.
- for intermediate_expression in intermediate_expressions {
- inject_intermediate_expression(self.mir_body, intermediate_expression);
+ for intermediate_expression in &self.coverage_counters.intermediate_expressions {
+ inject_intermediate_expression(
+ self.mir_body,
+ self.make_mir_coverage_kind(intermediate_expression),
+ );
}
}
@@ -303,8 +305,8 @@ impl<'a, 'tcx> Instrumentor<'a, 'tcx> {
let span = covspan.span;
let counter_kind = if let Some(&counter_operand) = bcb_counters[bcb].as_ref() {
self.coverage_counters.make_identity_counter(counter_operand)
- } else if let Some(counter_kind) = self.bcb_data_mut(bcb).take_counter() {
- bcb_counters[bcb] = Some(counter_kind.as_operand_id());
+ } else if let Some(counter_kind) = self.coverage_counters.take_bcb_counter(bcb) {
+ bcb_counters[bcb] = Some(counter_kind.as_operand());
debug_used_expressions.add_expression_operands(&counter_kind);
counter_kind
} else {
@@ -312,19 +314,14 @@ impl<'a, 'tcx> Instrumentor<'a, 'tcx> {
};
graphviz_data.add_bcb_coverage_span_with_counter(bcb, &covspan, &counter_kind);
- debug!(
- "Calling make_code_region(file_name={}, source_file={:?}, span={}, body_span={})",
- file_name,
- self.source_file,
- source_map.span_to_diagnostic_string(span),
- source_map.span_to_diagnostic_string(body_span)
- );
+ let code_region =
+ make_code_region(source_map, file_name, &self.source_file, span, body_span);
inject_statement(
self.mir_body,
- counter_kind,
+ self.make_mir_coverage_kind(&counter_kind),
self.bcb_leader_bb(bcb),
- Some(make_code_region(source_map, file_name, &self.source_file, span, body_span)),
+ Some(code_region),
);
}
}
@@ -343,19 +340,17 @@ impl<'a, 'tcx> Instrumentor<'a, 'tcx> {
debug_used_expressions: &mut debug::UsedExpressions,
) {
let mut bcb_counters_without_direct_coverage_spans = Vec::new();
- for (target_bcb, target_bcb_data) in self.basic_coverage_blocks.iter_enumerated_mut() {
- if let Some(counter_kind) = target_bcb_data.take_counter() {
- bcb_counters_without_direct_coverage_spans.push((None, target_bcb, counter_kind));
- }
- if let Some(edge_counters) = target_bcb_data.take_edge_counters() {
- for (from_bcb, counter_kind) in edge_counters {
- bcb_counters_without_direct_coverage_spans.push((
- Some(from_bcb),
- target_bcb,
- counter_kind,
- ));
- }
- }
+ for (target_bcb, counter_kind) in self.coverage_counters.drain_bcb_counters() {
+ bcb_counters_without_direct_coverage_spans.push((None, target_bcb, counter_kind));
+ }
+ for ((from_bcb, target_bcb), counter_kind) in
+ self.coverage_counters.drain_bcb_edge_counters()
+ {
+ bcb_counters_without_direct_coverage_spans.push((
+ Some(from_bcb),
+ target_bcb,
+ counter_kind,
+ ));
}
// If debug is enabled, validate that every BCB or edge counter not directly associated
@@ -372,7 +367,7 @@ impl<'a, 'tcx> Instrumentor<'a, 'tcx> {
);
match counter_kind {
- CoverageKind::Counter { .. } => {
+ BcbCounter::Counter { .. } => {
let inject_to_bb = if let Some(from_bcb) = edge_from_bcb {
// The MIR edge starts `from_bb` (the outgoing / last BasicBlock in
// `from_bcb`) and ends at `to_bb` (the incoming / first BasicBlock in the
@@ -405,12 +400,17 @@ impl<'a, 'tcx> Instrumentor<'a, 'tcx> {
target_bb
};
- inject_statement(self.mir_body, counter_kind, inject_to_bb, None);
+ inject_statement(
+ self.mir_body,
+ self.make_mir_coverage_kind(&counter_kind),
+ inject_to_bb,
+ None,
+ );
}
- CoverageKind::Expression { .. } => {
- inject_intermediate_expression(self.mir_body, counter_kind)
- }
- _ => bug!("CoverageKind should be a counter"),
+ BcbCounter::Expression { .. } => inject_intermediate_expression(
+ self.mir_body,
+ self.make_mir_coverage_kind(&counter_kind),
+ ),
}
}
}
@@ -431,13 +431,19 @@ impl<'a, 'tcx> Instrumentor<'a, 'tcx> {
}
#[inline]
- fn bcb_data_mut(&mut self, bcb: BasicCoverageBlock) -> &mut BasicCoverageBlockData {
- &mut self.basic_coverage_blocks[bcb]
+ fn format_counter(&self, counter_kind: &BcbCounter) -> String {
+ self.coverage_counters.debug_counters.format_counter(counter_kind)
}
- #[inline]
- fn format_counter(&self, counter_kind: &CoverageKind) -> String {
- self.coverage_counters.debug_counters.format_counter(counter_kind)
+ fn make_mir_coverage_kind(&self, counter_kind: &BcbCounter) -> CoverageKind {
+ match *counter_kind {
+ BcbCounter::Counter { id } => {
+ CoverageKind::Counter { function_source_hash: self.function_source_hash, id }
+ }
+ BcbCounter::Expression { id, lhs, op, rhs } => {
+ CoverageKind::Expression { id, lhs, op, rhs }
+ }
+ }
}
}
@@ -508,6 +514,14 @@ fn make_code_region(
span: Span,
body_span: Span,
) -> CodeRegion {
+ debug!(
+ "Called make_code_region(file_name={}, source_file={:?}, span={}, body_span={})",
+ file_name,
+ source_file,
+ source_map.span_to_diagnostic_string(span),
+ source_map.span_to_diagnostic_string(body_span)
+ );
+
let (start_line, mut start_col) = source_file.lookup_file_pos(span.lo());
let (end_line, end_col) = if span.hi() == span.lo() {
let (end_line, mut end_col) = (start_line, start_col);
diff --git a/compiler/rustc_mir_transform/src/coverage/query.rs b/compiler/rustc_mir_transform/src/coverage/query.rs
index 74b4b4a07..aa205655f 100644
--- a/compiler/rustc_mir_transform/src/coverage/query.rs
+++ b/compiler/rustc_mir_transform/src/coverage/query.rs
@@ -43,43 +43,25 @@ struct CoverageVisitor {
}
impl CoverageVisitor {
- /// Updates `num_counters` to the maximum encountered zero-based counter_id plus 1. Note the
- /// final computed number of counters should be the number of all `CoverageKind::Counter`
- /// statements in the MIR *plus one* for the implicit `ZERO` counter.
+ /// Updates `num_counters` to the maximum encountered counter ID plus 1.
#[inline(always)]
- fn update_num_counters(&mut self, counter_id: u32) {
+ fn update_num_counters(&mut self, counter_id: CounterId) {
+ let counter_id = counter_id.as_u32();
self.info.num_counters = std::cmp::max(self.info.num_counters, counter_id + 1);
}
- /// Computes an expression index for each expression ID, and updates `num_expressions` to the
- /// maximum encountered index plus 1.
+ /// Updates `num_expressions` to the maximum encountered expression ID plus 1.
#[inline(always)]
- fn update_num_expressions(&mut self, expression_id: u32) {
- let expression_index = u32::MAX - expression_id;
- self.info.num_expressions = std::cmp::max(self.info.num_expressions, expression_index + 1);
+ fn update_num_expressions(&mut self, expression_id: ExpressionId) {
+ let expression_id = expression_id.as_u32();
+ self.info.num_expressions = std::cmp::max(self.info.num_expressions, expression_id + 1);
}
- fn update_from_expression_operand(&mut self, operand_id: u32) {
- if operand_id >= self.info.num_counters {
- let operand_as_expression_index = u32::MAX - operand_id;
- if operand_as_expression_index >= self.info.num_expressions {
- // The operand ID is outside the known range of counter IDs and also outside the
- // known range of expression IDs. In either case, the result of a missing operand
- // (if and when used in an expression) will be zero, so from a computation
- // perspective, it doesn't matter whether it is interpreted as a counter or an
- // expression.
- //
- // However, the `num_counters` and `num_expressions` query results are used to
- // allocate arrays when generating the coverage map (during codegen), so choose
- // the type that grows either `num_counters` or `num_expressions` the least.
- if operand_id - self.info.num_counters
- < operand_as_expression_index - self.info.num_expressions
- {
- self.update_num_counters(operand_id)
- } else {
- self.update_num_expressions(operand_id)
- }
- }
+ fn update_from_expression_operand(&mut self, operand: Operand) {
+ match operand {
+ Operand::Counter(id) => self.update_num_counters(id),
+ Operand::Expression(id) => self.update_num_expressions(id),
+ Operand::Zero => {}
}
}
@@ -100,19 +82,15 @@ impl CoverageVisitor {
if self.add_missing_operands {
match coverage.kind {
CoverageKind::Expression { lhs, rhs, .. } => {
- self.update_from_expression_operand(u32::from(lhs));
- self.update_from_expression_operand(u32::from(rhs));
+ self.update_from_expression_operand(lhs);
+ self.update_from_expression_operand(rhs);
}
_ => {}
}
} else {
match coverage.kind {
- CoverageKind::Counter { id, .. } => {
- self.update_num_counters(u32::from(id));
- }
- CoverageKind::Expression { id, .. } => {
- self.update_num_expressions(u32::from(id));
- }
+ CoverageKind::Counter { id, .. } => self.update_num_counters(id),
+ CoverageKind::Expression { id, .. } => self.update_num_expressions(id),
_ => {}
}
}
@@ -123,8 +101,7 @@ fn coverageinfo<'tcx>(tcx: TyCtxt<'tcx>, instance_def: ty::InstanceDef<'tcx>) ->
let mir_body = tcx.instance_mir(instance_def);
let mut coverage_visitor = CoverageVisitor {
- // num_counters always has at least the `ZERO` counter.
- info: CoverageInfo { num_counters: 1, num_expressions: 0 },
+ info: CoverageInfo { num_counters: 0, num_expressions: 0 },
add_missing_operands: false,
};
diff --git a/compiler/rustc_mir_transform/src/coverage/spans.rs b/compiler/rustc_mir_transform/src/coverage/spans.rs
index 35cf9ea5f..deebf5345 100644
--- a/compiler/rustc_mir_transform/src/coverage/spans.rs
+++ b/compiler/rustc_mir_transform/src/coverage/spans.rs
@@ -11,7 +11,7 @@ use rustc_middle::ty::TyCtxt;
use rustc_span::source_map::original_sp;
use rustc_span::{BytePos, ExpnKind, MacroKind, Span, Symbol};
-use std::cell::RefCell;
+use std::cell::OnceCell;
use std::cmp::Ordering;
#[derive(Debug, Copy, Clone)]
@@ -67,7 +67,7 @@ impl CoverageStatement {
pub(super) struct CoverageSpan {
pub span: Span,
pub expn_span: Span,
- pub current_macro_or_none: RefCell<Option<Option<Symbol>>>,
+ pub current_macro_or_none: OnceCell<Option<Symbol>>,
pub bcb: BasicCoverageBlock,
pub coverage_statements: Vec<CoverageStatement>,
pub is_closure: bool,
@@ -175,8 +175,7 @@ impl CoverageSpan {
/// If the span is part of a macro, returns the macro name symbol.
pub fn current_macro(&self) -> Option<Symbol> {
self.current_macro_or_none
- .borrow_mut()
- .get_or_insert_with(|| {
+ .get_or_init(|| {
if let ExpnKind::Macro(MacroKind::Bang, current_macro) =
self.expn_span.ctxt().outer_expn_data().kind
{
diff --git a/compiler/rustc_mir_transform/src/coverage/test_macros/src/lib.rs b/compiler/rustc_mir_transform/src/coverage/test_macros/src/lib.rs
index 3d6095d27..f41adf667 100644
--- a/compiler/rustc_mir_transform/src/coverage/test_macros/src/lib.rs
+++ b/compiler/rustc_mir_transform/src/coverage/test_macros/src/lib.rs
@@ -2,5 +2,5 @@ use proc_macro::TokenStream;
#[proc_macro]
pub fn let_bcb(item: TokenStream) -> TokenStream {
- format!("let bcb{} = graph::BasicCoverageBlock::from_usize({});", item, item).parse().unwrap()
+ format!("let bcb{item} = graph::BasicCoverageBlock::from_usize({item});").parse().unwrap()
}
diff --git a/compiler/rustc_mir_transform/src/coverage/tests.rs b/compiler/rustc_mir_transform/src/coverage/tests.rs
index 25891d3ca..4a066ed3a 100644
--- a/compiler/rustc_mir_transform/src/coverage/tests.rs
+++ b/compiler/rustc_mir_transform/src/coverage/tests.rs
@@ -34,7 +34,6 @@ use itertools::Itertools;
use rustc_data_structures::graph::WithNumNodes;
use rustc_data_structures::graph::WithSuccessors;
use rustc_index::{Idx, IndexVec};
-use rustc_middle::mir::coverage::CoverageKind;
use rustc_middle::mir::*;
use rustc_middle::ty;
use rustc_span::{self, BytePos, Pos, Span, DUMMY_SP};
@@ -675,17 +674,17 @@ fn test_make_bcb_counters() {
));
}
}
- let mut coverage_counters = counters::CoverageCounters::new(0);
- let intermediate_expressions = coverage_counters
+ let mut coverage_counters = counters::CoverageCounters::new(&basic_coverage_blocks);
+ coverage_counters
.make_bcb_counters(&mut basic_coverage_blocks, &coverage_spans)
.expect("should be Ok");
- assert_eq!(intermediate_expressions.len(), 0);
+ assert_eq!(coverage_counters.intermediate_expressions.len(), 0);
let_bcb!(1);
assert_eq!(
- 1, // coincidentally, bcb1 has a `Counter` with id = 1
- match basic_coverage_blocks[bcb1].counter().expect("should have a counter") {
- CoverageKind::Counter { id, .. } => id,
+ 0, // bcb1 has a `Counter` with id = 0
+ match coverage_counters.bcb_counter(bcb1).expect("should have a counter") {
+ counters::BcbCounter::Counter { id, .. } => id,
_ => panic!("expected a Counter"),
}
.as_u32()
@@ -693,9 +692,9 @@ fn test_make_bcb_counters() {
let_bcb!(2);
assert_eq!(
- 2, // coincidentally, bcb2 has a `Counter` with id = 2
- match basic_coverage_blocks[bcb2].counter().expect("should have a counter") {
- CoverageKind::Counter { id, .. } => id,
+ 1, // bcb2 has a `Counter` with id = 1
+ match coverage_counters.bcb_counter(bcb2).expect("should have a counter") {
+ counters::BcbCounter::Counter { id, .. } => id,
_ => panic!("expected a Counter"),
}
.as_u32()
diff --git a/compiler/rustc_mir_transform/src/dataflow_const_prop.rs b/compiler/rustc_mir_transform/src/dataflow_const_prop.rs
index 78fb19635..8f4dc9f69 100644
--- a/compiler/rustc_mir_transform/src/dataflow_const_prop.rs
+++ b/compiler/rustc_mir_transform/src/dataflow_const_prop.rs
@@ -13,9 +13,7 @@ use rustc_middle::ty::{self, Ty, TyCtxt};
use rustc_mir_dataflow::value_analysis::{
Map, State, TrackElem, ValueAnalysis, ValueAnalysisWrapper, ValueOrPlace,
};
-use rustc_mir_dataflow::{
- lattice::FlatSet, Analysis, Results, ResultsVisitor, SwitchIntEdgeEffects,
-};
+use rustc_mir_dataflow::{lattice::FlatSet, Analysis, Results, ResultsVisitor};
use rustc_span::DUMMY_SP;
use rustc_target::abi::{Align, FieldIdx, VariantIdx};
@@ -249,49 +247,27 @@ impl<'tcx> ValueAnalysis<'tcx> for ConstAnalysis<'_, 'tcx> {
.unwrap_or(FlatSet::Top)
}
- fn handle_switch_int(
+ fn handle_switch_int<'mir>(
&self,
- discr: &Operand<'tcx>,
- apply_edge_effects: &mut impl SwitchIntEdgeEffects<State<Self::Value>>,
- ) {
- // FIXME: The dataflow framework only provides the state if we call `apply()`, which makes
- // this more inefficient than it has to be.
- let mut discr_value = None;
- let mut handled = false;
- apply_edge_effects.apply(|state, target| {
- let discr_value = match discr_value {
- Some(value) => value,
- None => {
- let value = match self.handle_operand(discr, state) {
- ValueOrPlace::Value(value) => value,
- ValueOrPlace::Place(place) => state.get_idx(place, self.map()),
- };
- let result = match value {
- FlatSet::Top => FlatSet::Top,
- FlatSet::Elem(ScalarTy(scalar, _)) => {
- let int = scalar.assert_int();
- FlatSet::Elem(int.assert_bits(int.size()))
- }
- FlatSet::Bottom => FlatSet::Bottom,
- };
- discr_value = Some(result);
- result
- }
- };
-
- let FlatSet::Elem(choice) = discr_value else {
- // Do nothing if we don't know which branch will be taken.
- return
- };
-
- if target.value.map(|n| n == choice).unwrap_or(!handled) {
- // Branch is taken. Has no effect on state.
- handled = true;
- } else {
- // Branch is not taken.
- state.mark_unreachable();
+ discr: &'mir Operand<'tcx>,
+ targets: &'mir SwitchTargets,
+ state: &mut State<Self::Value>,
+ ) -> TerminatorEdges<'mir, 'tcx> {
+ let value = match self.handle_operand(discr, state) {
+ ValueOrPlace::Value(value) => value,
+ ValueOrPlace::Place(place) => state.get_idx(place, self.map()),
+ };
+ match value {
+ // We are branching on uninitialized data, this is UB, treat it as unreachable.
+ // This allows the set of visited edges to grow monotonically with the lattice.
+ FlatSet::Bottom => TerminatorEdges::None,
+ FlatSet::Elem(ScalarTy(scalar, _)) => {
+ let int = scalar.assert_int();
+ let choice = int.assert_bits(int.size());
+ TerminatorEdges::Single(targets.target_for_value(choice))
}
- })
+ FlatSet::Top => TerminatorEdges::SwitchInt { discr, targets },
+ }
}
}
@@ -532,7 +508,7 @@ impl<'tcx, 'map, 'a> Visitor<'tcx> for OperandCollector<'tcx, 'map, 'a> {
struct DummyMachine;
-impl<'mir, 'tcx> rustc_const_eval::interpret::Machine<'mir, 'tcx> for DummyMachine {
+impl<'mir, 'tcx: 'mir> rustc_const_eval::interpret::Machine<'mir, 'tcx> for DummyMachine {
rustc_const_eval::interpret::compile_time_machine!(<'mir, 'tcx>);
type MemoryKind = !;
const PANIC_ON_ALLOC_FAIL: bool = true;
@@ -557,7 +533,7 @@ impl<'mir, 'tcx> rustc_const_eval::interpret::Machine<'mir, 'tcx> for DummyMachi
_ecx: &mut InterpCx<'mir, 'tcx, Self>,
_instance: ty::Instance<'tcx>,
_abi: rustc_target::spec::abi::Abi,
- _args: &[rustc_const_eval::interpret::OpTy<'tcx, Self::Provenance>],
+ _args: &[rustc_const_eval::interpret::FnArg<'tcx, Self::Provenance>],
_destination: &rustc_const_eval::interpret::PlaceTy<'tcx, Self::Provenance>,
_target: Option<BasicBlock>,
_unwind: UnwindAction,
diff --git a/compiler/rustc_mir_transform/src/dead_store_elimination.rs b/compiler/rustc_mir_transform/src/dead_store_elimination.rs
index 7bc5183a0..3f988930b 100644
--- a/compiler/rustc_mir_transform/src/dead_store_elimination.rs
+++ b/compiler/rustc_mir_transform/src/dead_store_elimination.rs
@@ -13,9 +13,12 @@
//!
use rustc_index::bit_set::BitSet;
+use rustc_middle::mir::visit::Visitor;
use rustc_middle::mir::*;
use rustc_middle::ty::TyCtxt;
-use rustc_mir_dataflow::impls::{borrowed_locals, MaybeTransitiveLiveLocals};
+use rustc_mir_dataflow::impls::{
+ borrowed_locals, LivenessTransferFunction, MaybeTransitiveLiveLocals,
+};
use rustc_mir_dataflow::Analysis;
/// Performs the optimization on the body
@@ -28,8 +31,33 @@ pub fn eliminate<'tcx>(tcx: TyCtxt<'tcx>, body: &mut Body<'tcx>, borrowed: &BitS
.iterate_to_fixpoint()
.into_results_cursor(body);
+ // For blocks with a call terminator, if an argument copy can be turned into a move,
+ // record it as (block, argument index).
+ let mut call_operands_to_move = Vec::new();
let mut patch = Vec::new();
+
for (bb, bb_data) in traversal::preorder(body) {
+ if let TerminatorKind::Call { ref args, .. } = bb_data.terminator().kind {
+ let loc = Location { block: bb, statement_index: bb_data.statements.len() };
+
+ // Position ourselves between the evaluation of `args` and the write to `destination`.
+ live.seek_to_block_end(bb);
+ let mut state = live.get().clone();
+
+ for (index, arg) in args.iter().enumerate().rev() {
+ if let Operand::Copy(place) = *arg
+ && !place.is_indirect()
+ && !borrowed.contains(place.local)
+ && !state.contains(place.local)
+ {
+ call_operands_to_move.push((bb, index));
+ }
+
+ // Account that `arg` is read from, so we don't promote another argument to a move.
+ LivenessTransferFunction(&mut state).visit_operand(arg, loc);
+ }
+ }
+
for (statement_index, statement) in bb_data.statements.iter().enumerate().rev() {
let loc = Location { block: bb, statement_index };
if let StatementKind::Assign(assign) = &statement.kind {
@@ -64,7 +92,7 @@ pub fn eliminate<'tcx>(tcx: TyCtxt<'tcx>, body: &mut Body<'tcx>, borrowed: &BitS
}
}
- if patch.is_empty() {
+ if patch.is_empty() && call_operands_to_move.is_empty() {
return;
}
@@ -72,6 +100,14 @@ pub fn eliminate<'tcx>(tcx: TyCtxt<'tcx>, body: &mut Body<'tcx>, borrowed: &BitS
for Location { block, statement_index } in patch {
bbs[block].statements[statement_index].make_nop();
}
+ for (block, argument_index) in call_operands_to_move {
+ let TerminatorKind::Call { ref mut args, .. } = bbs[block].terminator_mut().kind else {
+ bug!()
+ };
+ let arg = &mut args[argument_index];
+ let Operand::Copy(place) = *arg else { bug!() };
+ *arg = Operand::Move(place);
+ }
crate::simplify::simplify_locals(body, tcx)
}
diff --git a/compiler/rustc_mir_transform/src/deduce_param_attrs.rs b/compiler/rustc_mir_transform/src/deduce_param_attrs.rs
index e782c0373..79645310a 100644
--- a/compiler/rustc_mir_transform/src/deduce_param_attrs.rs
+++ b/compiler/rustc_mir_transform/src/deduce_param_attrs.rs
@@ -166,7 +166,7 @@ pub fn deduced_param_attrs<'tcx>(
// Codegen won't use this information for anything if all the function parameters are passed
// directly. Detect that and bail, for compilation speed.
- let fn_ty = tcx.type_of(def_id).subst_identity();
+ let fn_ty = tcx.type_of(def_id).instantiate_identity();
if matches!(fn_ty.kind(), ty::FnDef(..)) {
if fn_ty
.fn_sig(tcx)
diff --git a/compiler/rustc_mir_transform/src/dest_prop.rs b/compiler/rustc_mir_transform/src/dest_prop.rs
index a31551cf6..b73b72c31 100644
--- a/compiler/rustc_mir_transform/src/dest_prop.rs
+++ b/compiler/rustc_mir_transform/src/dest_prop.rs
@@ -218,9 +218,9 @@ impl<'tcx> MirPass<'tcx> for DestinationPropagation {
if merged_locals.contains(*src) {
continue;
}
- let Some(dest) =
- candidates.iter().find(|dest| !merged_locals.contains(**dest)) else {
- continue;
+ let Some(dest) = candidates.iter().find(|dest| !merged_locals.contains(**dest))
+ else {
+ continue;
};
if !tcx.consider_optimizing(|| {
format!("{} round {}", tcx.def_path_str(def_id), round_count)
@@ -601,9 +601,7 @@ impl WriteInfo {
rhs: &Operand<'tcx>,
body: &Body<'tcx>,
) {
- let Some(rhs) = rhs.place() else {
- return
- };
+ let Some(rhs) = rhs.place() else { return };
if let Some(pair) = places_to_candidate_pair(lhs, rhs, body) {
self.skip_pair = Some(pair);
}
diff --git a/compiler/rustc_mir_transform/src/early_otherwise_branch.rs b/compiler/rustc_mir_transform/src/early_otherwise_branch.rs
index 8a7b027dd..319fb4eaf 100644
--- a/compiler/rustc_mir_transform/src/early_otherwise_branch.rs
+++ b/compiler/rustc_mir_transform/src/early_otherwise_branch.rs
@@ -107,9 +107,7 @@ impl<'tcx> MirPass<'tcx> for EarlyOtherwiseBranch {
for i in 0..body.basic_blocks.len() {
let bbs = &*body.basic_blocks;
let parent = BasicBlock::from_usize(i);
- let Some(opt_data) = evaluate_candidate(tcx, body, parent) else {
- continue
- };
+ let Some(opt_data) = evaluate_candidate(tcx, body, parent) else { continue };
if !tcx.consider_optimizing(|| format!("EarlyOtherwiseBranch {:?}", &opt_data)) {
break;
@@ -119,10 +117,9 @@ impl<'tcx> MirPass<'tcx> for EarlyOtherwiseBranch {
should_cleanup = true;
- let TerminatorKind::SwitchInt {
- discr: parent_op,
- targets: parent_targets
- } = &bbs[parent].terminator().kind else {
+ let TerminatorKind::SwitchInt { discr: parent_op, targets: parent_targets } =
+ &bbs[parent].terminator().kind
+ else {
unreachable!()
};
// Always correct since we can only switch on `Copy` types
@@ -168,7 +165,8 @@ impl<'tcx> MirPass<'tcx> for EarlyOtherwiseBranch {
);
let eq_new_targets = parent_targets.iter().map(|(value, child)| {
- let TerminatorKind::SwitchInt{ targets, .. } = &bbs[child].terminator().kind else {
+ let TerminatorKind::SwitchInt { targets, .. } = &bbs[child].terminator().kind
+ else {
unreachable!()
};
(value, targets.target_for_value(value))
@@ -311,11 +309,9 @@ fn evaluate_candidate<'tcx>(
parent: BasicBlock,
) -> Option<OptimizationData<'tcx>> {
let bbs = &body.basic_blocks;
- let TerminatorKind::SwitchInt {
- targets,
- discr: parent_discr,
- } = &bbs[parent].terminator().kind else {
- return None
+ let TerminatorKind::SwitchInt { targets, discr: parent_discr } = &bbs[parent].terminator().kind
+ else {
+ return None;
};
let parent_ty = parent_discr.ty(body.local_decls(), tcx);
let parent_dest = {
@@ -332,18 +328,16 @@ fn evaluate_candidate<'tcx>(
};
let (_, child) = targets.iter().next()?;
let child_terminator = &bbs[child].terminator();
- let TerminatorKind::SwitchInt {
- targets: child_targets,
- discr: child_discr,
- } = &child_terminator.kind else {
- return None
+ let TerminatorKind::SwitchInt { targets: child_targets, discr: child_discr } =
+ &child_terminator.kind
+ else {
+ return None;
};
let child_ty = child_discr.ty(body.local_decls(), tcx);
if child_ty != parent_ty {
return None;
}
- let Some(StatementKind::Assign(boxed))
- = &bbs[child].statements.first().map(|x| &x.kind) else {
+ let Some(StatementKind::Assign(boxed)) = &bbs[child].statements.first().map(|x| &x.kind) else {
return None;
};
let (_, Rvalue::Discriminant(child_place)) = &**boxed else {
@@ -383,12 +377,8 @@ fn verify_candidate_branch<'tcx>(
return false;
}
// ...assign the discriminant of `place` in that statement
- let StatementKind::Assign(boxed) = &branch.statements[0].kind else {
- return false
- };
- let (discr_place, Rvalue::Discriminant(from_place)) = &**boxed else {
- return false
- };
+ let StatementKind::Assign(boxed) = &branch.statements[0].kind else { return false };
+ let (discr_place, Rvalue::Discriminant(from_place)) = &**boxed else { return false };
if *from_place != place {
return false;
}
@@ -397,8 +387,9 @@ fn verify_candidate_branch<'tcx>(
return false;
}
// ...terminate on a `SwitchInt` that invalidates that local
- let TerminatorKind::SwitchInt{ discr: switch_op, targets, .. } = &branch.terminator().kind else {
- return false
+ let TerminatorKind::SwitchInt { discr: switch_op, targets, .. } = &branch.terminator().kind
+ else {
+ return false;
};
if *switch_op != Operand::Move(*discr_place) {
return false;
diff --git a/compiler/rustc_mir_transform/src/elaborate_box_derefs.rs b/compiler/rustc_mir_transform/src/elaborate_box_derefs.rs
index cc0d7d51b..e51f771e0 100644
--- a/compiler/rustc_mir_transform/src/elaborate_box_derefs.rs
+++ b/compiler/rustc_mir_transform/src/elaborate_box_derefs.rs
@@ -18,9 +18,9 @@ pub fn build_ptr_tys<'tcx>(
unique_did: DefId,
nonnull_did: DefId,
) -> (Ty<'tcx>, Ty<'tcx>, Ty<'tcx>) {
- let substs = tcx.mk_substs(&[pointee.into()]);
- let unique_ty = tcx.type_of(unique_did).subst(tcx, substs);
- let nonnull_ty = tcx.type_of(nonnull_did).subst(tcx, substs);
+ let args = tcx.mk_args(&[pointee.into()]);
+ let unique_ty = tcx.type_of(unique_did).instantiate(tcx, args);
+ let nonnull_ty = tcx.type_of(nonnull_did).instantiate(tcx, args);
let ptr_ty = Ty::new_imm_ptr(tcx, pointee);
(unique_ty, nonnull_ty, ptr_ty)
@@ -95,7 +95,8 @@ impl<'tcx> MirPass<'tcx> for ElaborateBoxDerefs {
let unique_did =
tcx.adt_def(def_id).non_enum_variant().fields[FieldIdx::from_u32(0)].did;
- let Some(nonnull_def) = tcx.type_of(unique_did).subst_identity().ty_adt_def() else {
+ let Some(nonnull_def) = tcx.type_of(unique_did).instantiate_identity().ty_adt_def()
+ else {
span_bug!(tcx.def_span(unique_did), "expected Box to contain Unique")
};
diff --git a/compiler/rustc_mir_transform/src/elaborate_drops.rs b/compiler/rustc_mir_transform/src/elaborate_drops.rs
index d5664e2b4..b6b1ae6d3 100644
--- a/compiler/rustc_mir_transform/src/elaborate_drops.rs
+++ b/compiler/rustc_mir_transform/src/elaborate_drops.rs
@@ -48,6 +48,7 @@ use std::fmt;
pub struct ElaborateDrops;
impl<'tcx> MirPass<'tcx> for ElaborateDrops {
+ #[instrument(level = "trace", skip(self, tcx, body))]
fn run_pass(&self, tcx: TyCtxt<'tcx>, body: &mut Body<'tcx>) {
debug!("elaborate_drops({:?} @ {:?})", body.source, body.span);
@@ -65,23 +66,23 @@ impl<'tcx> MirPass<'tcx> for ElaborateDrops {
};
let elaborate_patch = {
let env = MoveDataParamEnv { move_data, param_env };
- remove_dead_unwinds(tcx, body, &env);
- let inits = MaybeInitializedPlaces::new(tcx, body, &env)
+ let mut inits = MaybeInitializedPlaces::new(tcx, body, &env)
+ .skipping_unreachable_unwind()
.into_engine(tcx, body)
.pass_name("elaborate_drops")
.iterate_to_fixpoint()
.into_results_cursor(body);
+ let dead_unwinds = compute_dead_unwinds(&body, &mut inits);
let uninits = MaybeUninitializedPlaces::new(tcx, body, &env)
.mark_inactive_variants_as_uninit()
+ .skipping_unreachable_unwind(dead_unwinds)
.into_engine(tcx, body)
.pass_name("elaborate_drops")
.iterate_to_fixpoint()
.into_results_cursor(body);
- let reachable = traversal::reachable_as_bitset(body);
-
let drop_flags = IndexVec::from_elem(None, &env.move_data.move_paths);
ElaborateDropsCtxt {
tcx,
@@ -90,7 +91,6 @@ impl<'tcx> MirPass<'tcx> for ElaborateDrops {
init_data: InitializationData { inits, uninits },
drop_flags,
patch: MirPatch::new(body),
- reachable,
}
.elaborate()
};
@@ -99,65 +99,30 @@ impl<'tcx> MirPass<'tcx> for ElaborateDrops {
}
}
-/// Removes unwind edges which are known to be unreachable, because they are in `drop` terminators
+/// Records unwind edges which are known to be unreachable, because they are in `drop` terminators
/// that can't drop anything.
-fn remove_dead_unwinds<'tcx>(
- tcx: TyCtxt<'tcx>,
- body: &mut Body<'tcx>,
- env: &MoveDataParamEnv<'tcx>,
-) {
- debug!("remove_dead_unwinds({:?})", body.span);
+#[instrument(level = "trace", skip(body, flow_inits), ret)]
+fn compute_dead_unwinds<'mir, 'tcx>(
+ body: &'mir Body<'tcx>,
+ flow_inits: &mut ResultsCursor<'mir, 'tcx, MaybeInitializedPlaces<'mir, 'tcx>>,
+) -> BitSet<BasicBlock> {
// We only need to do this pass once, because unwind edges can only
// reach cleanup blocks, which can't have unwind edges themselves.
- let mut dead_unwinds = Vec::new();
- let mut flow_inits = MaybeInitializedPlaces::new(tcx, body, &env)
- .into_engine(tcx, body)
- .pass_name("remove_dead_unwinds")
- .iterate_to_fixpoint()
- .into_results_cursor(body);
+ let mut dead_unwinds = BitSet::new_empty(body.basic_blocks.len());
for (bb, bb_data) in body.basic_blocks.iter_enumerated() {
- let place = match bb_data.terminator().kind {
- TerminatorKind::Drop { place, unwind: UnwindAction::Cleanup(_), .. } => place,
- _ => continue,
- };
-
- debug!("remove_dead_unwinds @ {:?}: {:?}", bb, bb_data);
-
- let LookupResult::Exact(path) = env.move_data.rev_lookup.find(place.as_ref()) else {
- debug!("remove_dead_unwinds: has parent; skipping");
+ let TerminatorKind::Drop { place, unwind: UnwindAction::Cleanup(_), .. } =
+ bb_data.terminator().kind
+ else {
continue;
};
flow_inits.seek_before_primary_effect(body.terminator_loc(bb));
- debug!(
- "remove_dead_unwinds @ {:?}: path({:?})={:?}; init_data={:?}",
- bb,
- place,
- path,
- flow_inits.get()
- );
-
- let mut maybe_live = false;
- on_all_drop_children_bits(tcx, body, &env, path, |child| {
- maybe_live |= flow_inits.contains(child);
- });
-
- debug!("remove_dead_unwinds @ {:?}: maybe_live={}", bb, maybe_live);
- if !maybe_live {
- dead_unwinds.push(bb);
+ if flow_inits.analysis().is_unwind_dead(place, flow_inits.get()) {
+ dead_unwinds.insert(bb);
}
}
- if dead_unwinds.is_empty() {
- return;
- }
-
- let basic_blocks = body.basic_blocks.as_mut();
- for &bb in dead_unwinds.iter() {
- if let Some(unwind) = basic_blocks[bb].terminator_mut().unwind_mut() {
- *unwind = UnwindAction::Unreachable;
- }
- }
+ dead_unwinds
}
struct InitializationData<'mir, 'tcx> {
@@ -290,7 +255,6 @@ struct ElaborateDropsCtxt<'a, 'tcx> {
init_data: InitializationData<'a, 'tcx>,
drop_flags: IndexVec<MovePathIndex, Option<Local>>,
patch: MirPatch<'tcx>,
- reachable: BitSet<BasicBlock>,
}
impl<'b, 'tcx> ElaborateDropsCtxt<'b, 'tcx> {
@@ -330,9 +294,6 @@ impl<'b, 'tcx> ElaborateDropsCtxt<'b, 'tcx> {
fn collect_drop_flags(&mut self) {
for (bb, data) in self.body.basic_blocks.iter_enumerated() {
- if !self.reachable.contains(bb) {
- continue;
- }
let terminator = data.terminator();
let place = match terminator.kind {
TerminatorKind::Drop { ref place, .. } => place,
@@ -358,8 +319,7 @@ impl<'b, 'tcx> ElaborateDropsCtxt<'b, 'tcx> {
self.tcx.sess.delay_span_bug(
terminator.source_info.span,
format!(
- "drop of untracked, uninitialized value {:?}, place {:?} ({:?})",
- bb, place, path
+ "drop of untracked, uninitialized value {bb:?}, place {place:?} ({path:?})"
),
);
}
@@ -385,9 +345,6 @@ impl<'b, 'tcx> ElaborateDropsCtxt<'b, 'tcx> {
fn elaborate_drops(&mut self) {
for (bb, data) in self.body.basic_blocks.iter_enumerated() {
- if !self.reachable.contains(bb) {
- continue;
- }
let loc = Location { block: bb, statement_index: data.statements.len() };
let terminator = data.terminator();
@@ -424,7 +381,7 @@ impl<'b, 'tcx> ElaborateDropsCtxt<'b, 'tcx> {
if !replace {
self.tcx.sess.delay_span_bug(
terminator.source_info.span,
- format!("drop of untracked value {:?}", bb),
+ format!("drop of untracked value {bb:?}"),
);
}
// A drop and replace behind a pointer/array/whatever.
@@ -466,9 +423,6 @@ impl<'b, 'tcx> ElaborateDropsCtxt<'b, 'tcx> {
fn drop_flags_for_fn_rets(&mut self) {
for (bb, data) in self.body.basic_blocks.iter_enumerated() {
- if !self.reachable.contains(bb) {
- continue;
- }
if let TerminatorKind::Call {
destination,
target: Some(tgt),
@@ -507,9 +461,6 @@ impl<'b, 'tcx> ElaborateDropsCtxt<'b, 'tcx> {
// clobbered before they are read.
for (bb, data) in self.body.basic_blocks.iter_enumerated() {
- if !self.reachable.contains(bb) {
- continue;
- }
debug!("drop_flags_for_locs({:?})", data);
for i in 0..(data.statements.len() + 1) {
debug!("drop_flag_for_locs: stmt {}", i);
diff --git a/compiler/rustc_mir_transform/src/ffi_unwind_calls.rs b/compiler/rustc_mir_transform/src/ffi_unwind_calls.rs
index 58cc161dd..d20286084 100644
--- a/compiler/rustc_mir_transform/src/ffi_unwind_calls.rs
+++ b/compiler/rustc_mir_transform/src/ffi_unwind_calls.rs
@@ -30,6 +30,8 @@ fn abi_can_unwind(abi: Abi) -> bool {
| EfiApi
| AvrInterrupt
| AvrNonBlockingInterrupt
+ | RiscvInterruptM
+ | RiscvInterruptS
| CCmseNonSecureCall
| Wasm
| RustIntrinsic
diff --git a/compiler/rustc_mir_transform/src/function_item_references.rs b/compiler/rustc_mir_transform/src/function_item_references.rs
index 0b41e57be..a42eacbf2 100644
--- a/compiler/rustc_mir_transform/src/function_item_references.rs
+++ b/compiler/rustc_mir_transform/src/function_item_references.rs
@@ -2,7 +2,7 @@ use itertools::Itertools;
use rustc_hir::def_id::DefId;
use rustc_middle::mir::visit::Visitor;
use rustc_middle::mir::*;
-use rustc_middle::ty::{self, EarlyBinder, SubstsRef, Ty, TyCtxt};
+use rustc_middle::ty::{self, EarlyBinder, GenericArgsRef, Ty, TyCtxt};
use rustc_session::lint::builtin::FUNCTION_ITEM_REFERENCES;
use rustc_span::{symbol::sym, Span};
use rustc_target::spec::abi::Abi;
@@ -40,20 +40,19 @@ impl<'tcx> Visitor<'tcx> for FunctionItemRefChecker<'_, 'tcx> {
{
let source_info = *self.body.source_info(location);
let func_ty = func.ty(self.body, self.tcx);
- if let ty::FnDef(def_id, substs_ref) = *func_ty.kind() {
+ if let ty::FnDef(def_id, args_ref) = *func_ty.kind() {
// Handle calls to `transmute`
if self.tcx.is_diagnostic_item(sym::transmute, def_id) {
let arg_ty = args[0].ty(self.body, self.tcx);
for inner_ty in arg_ty.walk().filter_map(|arg| arg.as_type()) {
- if let Some((fn_id, fn_substs)) =
- FunctionItemRefChecker::is_fn_ref(inner_ty)
+ if let Some((fn_id, fn_args)) = FunctionItemRefChecker::is_fn_ref(inner_ty)
{
let span = self.nth_arg_span(&args, 0);
- self.emit_lint(fn_id, fn_substs, source_info, span);
+ self.emit_lint(fn_id, fn_args, source_info, span);
}
}
} else {
- self.check_bound_args(def_id, substs_ref, &args, source_info);
+ self.check_bound_args(def_id, args_ref, &args, source_info);
}
}
}
@@ -63,11 +62,11 @@ impl<'tcx> Visitor<'tcx> for FunctionItemRefChecker<'_, 'tcx> {
impl<'tcx> FunctionItemRefChecker<'_, 'tcx> {
/// Emits a lint for function reference arguments bound by `fmt::Pointer` in calls to the
- /// function defined by `def_id` with the substitutions `substs_ref`.
+ /// function defined by `def_id` with the substitutions `args_ref`.
fn check_bound_args(
&self,
def_id: DefId,
- substs_ref: SubstsRef<'tcx>,
+ args_ref: GenericArgsRef<'tcx>,
args: &[Operand<'tcx>],
source_info: SourceInfo,
) {
@@ -76,15 +75,17 @@ impl<'tcx> FunctionItemRefChecker<'_, 'tcx> {
for bound in bounds {
if let Some(bound_ty) = self.is_pointer_trait(bound) {
// Get the argument types as they appear in the function signature.
- let arg_defs = self.tcx.fn_sig(def_id).subst_identity().skip_binder().inputs();
+ let arg_defs =
+ self.tcx.fn_sig(def_id).instantiate_identity().skip_binder().inputs();
for (arg_num, arg_def) in arg_defs.iter().enumerate() {
// For all types reachable from the argument type in the fn sig
for inner_ty in arg_def.walk().filter_map(|arg| arg.as_type()) {
// If the inner type matches the type bound by `Pointer`
if inner_ty == bound_ty {
// Do a substitution using the parameters from the callsite
- let subst_ty = EarlyBinder::bind(inner_ty).subst(self.tcx, substs_ref);
- if let Some((fn_id, fn_substs)) =
+ let subst_ty =
+ EarlyBinder::bind(inner_ty).instantiate(self.tcx, args_ref);
+ if let Some((fn_id, fn_args)) =
FunctionItemRefChecker::is_fn_ref(subst_ty)
{
let mut span = self.nth_arg_span(args, arg_num);
@@ -94,7 +95,7 @@ impl<'tcx> FunctionItemRefChecker<'_, 'tcx> {
let callsite_ctxt = span.source_callsite().ctxt();
span = span.with_ctxt(callsite_ctxt);
}
- self.emit_lint(fn_id, fn_substs, source_info, span);
+ self.emit_lint(fn_id, fn_args, source_info, span);
}
}
}
@@ -115,8 +116,8 @@ impl<'tcx> FunctionItemRefChecker<'_, 'tcx> {
}
/// If a type is a reference or raw pointer to the anonymous type of a function definition,
- /// returns that function's `DefId` and `SubstsRef`.
- fn is_fn_ref(ty: Ty<'tcx>) -> Option<(DefId, SubstsRef<'tcx>)> {
+ /// returns that function's `DefId` and `GenericArgsRef`.
+ fn is_fn_ref(ty: Ty<'tcx>) -> Option<(DefId, GenericArgsRef<'tcx>)> {
let referent_ty = match ty.kind() {
ty::Ref(_, referent_ty, _) => Some(referent_ty),
ty::RawPtr(ty_and_mut) => Some(&ty_and_mut.ty),
@@ -124,8 +125,8 @@ impl<'tcx> FunctionItemRefChecker<'_, 'tcx> {
};
referent_ty
.map(|ref_ty| {
- if let ty::FnDef(def_id, substs_ref) = *ref_ty.kind() {
- Some((def_id, substs_ref))
+ if let ty::FnDef(def_id, args_ref) = *ref_ty.kind() {
+ Some((def_id, args_ref))
} else {
None
}
@@ -145,7 +146,7 @@ impl<'tcx> FunctionItemRefChecker<'_, 'tcx> {
fn emit_lint(
&self,
fn_id: DefId,
- fn_substs: SubstsRef<'tcx>,
+ fn_args: GenericArgsRef<'tcx>,
source_info: SourceInfo,
span: Span,
) {
@@ -155,7 +156,7 @@ impl<'tcx> FunctionItemRefChecker<'_, 'tcx> {
.assert_crate_local()
.lint_root;
// FIXME: use existing printing routines to print the function signature
- let fn_sig = self.tcx.fn_sig(fn_id).subst(self.tcx, fn_substs);
+ let fn_sig = self.tcx.fn_sig(fn_id).instantiate(self.tcx, fn_args);
let unsafety = fn_sig.unsafety().prefix_str();
let abi = match fn_sig.abi() {
Abi::Rust => String::from(""),
@@ -167,15 +168,15 @@ impl<'tcx> FunctionItemRefChecker<'_, 'tcx> {
}
};
let ident = self.tcx.item_name(fn_id).to_ident_string();
- let ty_params = fn_substs.types().map(|ty| format!("{}", ty));
- let const_params = fn_substs.consts().map(|c| format!("{}", c));
+ let ty_params = fn_args.types().map(|ty| format!("{ty}"));
+ let const_params = fn_args.consts().map(|c| format!("{c}"));
let params = ty_params.chain(const_params).join(", ");
let num_args = fn_sig.inputs().map_bound(|inputs| inputs.len()).skip_binder();
let variadic = if fn_sig.c_variadic() { ", ..." } else { "" };
let ret = if fn_sig.output().skip_binder().is_unit() { "" } else { " -> _" };
let sugg = format!(
"{} as {}{}fn({}{}){}",
- if params.is_empty() { ident.clone() } else { format!("{}::<{}>", ident, params) },
+ if params.is_empty() { ident.clone() } else { format!("{ident}::<{params}>") },
unsafety,
abi,
vec!["_"; num_args].join(", "),
diff --git a/compiler/rustc_mir_transform/src/generator.rs b/compiler/rustc_mir_transform/src/generator.rs
index 264bc61f1..ff4822f33 100644
--- a/compiler/rustc_mir_transform/src/generator.rs
+++ b/compiler/rustc_mir_transform/src/generator.rs
@@ -50,8 +50,10 @@
//! For generators with state 1 (returned) and state 2 (poisoned) it does nothing.
//! Otherwise it drops all the values in scope at the last suspension point.
+use crate::abort_unwinding_calls;
use crate::deref_separator::deref_finder;
use crate::errors;
+use crate::pass_manager as pm;
use crate::simplify;
use crate::MirPass;
use rustc_data_structures::fx::{FxHashMap, FxHashSet};
@@ -64,8 +66,9 @@ use rustc_index::{Idx, IndexVec};
use rustc_middle::mir::dump_mir;
use rustc_middle::mir::visit::{MutVisitor, PlaceContext, Visitor};
use rustc_middle::mir::*;
+use rustc_middle::ty::InstanceDef;
use rustc_middle::ty::{self, AdtDef, Ty, TyCtxt};
-use rustc_middle::ty::{GeneratorSubsts, SubstsRef};
+use rustc_middle::ty::{GeneratorArgs, GenericArgsRef};
use rustc_mir_dataflow::impls::{
MaybeBorrowedLocals, MaybeLiveLocals, MaybeRequiresStorage, MaybeStorageLive,
};
@@ -194,11 +197,11 @@ fn replace_base<'tcx>(place: &mut Place<'tcx>, new_base: Place<'tcx>, tcx: TyCtx
const SELF_ARG: Local = Local::from_u32(1);
/// Generator has not been resumed yet.
-const UNRESUMED: usize = GeneratorSubsts::UNRESUMED;
+const UNRESUMED: usize = GeneratorArgs::UNRESUMED;
/// Generator has returned / is completed.
-const RETURNED: usize = GeneratorSubsts::RETURNED;
+const RETURNED: usize = GeneratorArgs::RETURNED;
/// Generator has panicked and is poisoned.
-const POISONED: usize = GeneratorSubsts::POISONED;
+const POISONED: usize = GeneratorArgs::POISONED;
/// Number of variants to reserve in generator state. Corresponds to
/// `UNRESUMED` (beginning of a generator) and `RETURNED`/`POISONED`
@@ -223,7 +226,7 @@ struct TransformVisitor<'tcx> {
tcx: TyCtxt<'tcx>,
is_async_kind: bool,
state_adt_ref: AdtDef<'tcx>,
- state_substs: SubstsRef<'tcx>,
+ state_args: GenericArgsRef<'tcx>,
// The type of the discriminant in the generator struct
discr_ty: Ty<'tcx>,
@@ -265,7 +268,7 @@ impl<'tcx> TransformVisitor<'tcx> {
(false, true) => 1, // Poll::Pending
});
- let kind = AggregateKind::Adt(self.state_adt_ref.did(), idx, self.state_substs, None, None);
+ let kind = AggregateKind::Adt(self.state_adt_ref.did(), idx, self.state_args, None, None);
// `Poll::Pending`
if self.is_async_kind && idx == VariantIdx::new(1) {
@@ -431,8 +434,8 @@ fn make_generator_state_argument_pinned<'tcx>(tcx: TyCtxt<'tcx>, body: &mut Body
let pin_did = tcx.require_lang_item(LangItem::Pin, Some(body.span));
let pin_adt_ref = tcx.adt_def(pin_did);
- let substs = tcx.mk_substs(&[ref_gen_ty.into()]);
- let pin_ref_gen_ty = Ty::new_adt(tcx, pin_adt_ref, substs);
+ let args = tcx.mk_args(&[ref_gen_ty.into()]);
+ let pin_ref_gen_ty = Ty::new_adt(tcx, pin_adt_ref, args);
// Replace the by ref generator argument
body.local_decls.raw[1].ty = pin_ref_gen_ty;
@@ -856,7 +859,7 @@ fn sanitize_witness<'tcx>(
tcx: TyCtxt<'tcx>,
body: &Body<'tcx>,
witness: Ty<'tcx>,
- upvars: Vec<Ty<'tcx>>,
+ upvars: &'tcx ty::List<Ty<'tcx>>,
layout: &GeneratorLayout<'tcx>,
) {
let did = body.source.def_id();
@@ -1147,7 +1150,25 @@ fn create_generator_drop_shim<'tcx>(
// unrelated code from the resume part of the function
simplify::remove_dead_blocks(tcx, &mut body);
+ // Update the body's def to become the drop glue.
+ // This needs to be updated before the AbortUnwindingCalls pass.
+ let gen_instance = body.source.instance;
+ let drop_in_place = tcx.require_lang_item(LangItem::DropInPlace, None);
+ let drop_instance = InstanceDef::DropGlue(drop_in_place, Some(gen_ty));
+ body.source.instance = drop_instance;
+
+ pm::run_passes_no_validate(
+ tcx,
+ &mut body,
+ &[&abort_unwinding_calls::AbortUnwindingCalls],
+ None,
+ );
+
+ // Temporary change MirSource to generator's instance so that dump_mir produces more sensible
+ // filename.
+ body.source.instance = gen_instance;
dump_mir(tcx, false, "generator_drop", &0, &body, |_, _| Ok(()));
+ body.source.instance = drop_instance;
body
}
@@ -1317,6 +1338,8 @@ fn create_generator_resume_function<'tcx>(
// unrelated code from the drop part of the function
simplify::remove_dead_blocks(tcx, body);
+ pm::run_passes_no_validate(tcx, body, &[&abort_unwinding_calls::AbortUnwindingCalls], None);
+
dump_mir(tcx, false, "generator_resume", &0, body, |_, _| Ok(()));
}
@@ -1431,7 +1454,7 @@ pub(crate) fn mir_generator_witnesses<'tcx>(
// The first argument is the generator type passed by value
let gen_ty = body.local_decls[ty::CAPTURE_STRUCT_LOCAL].ty;
- // Get the interior types and substs which typeck computed
+ // Get the interior types and args which typeck computed
let movable = match *gen_ty.kind() {
ty::Generator(_, _, movability) => movability == hir::Movability::Movable,
ty::Error(_) => return None,
@@ -1465,38 +1488,38 @@ impl<'tcx> MirPass<'tcx> for StateTransform {
// The first argument is the generator type passed by value
let gen_ty = body.local_decls.raw[1].ty;
- // Get the discriminant type and substs which typeck computed
+ // Get the discriminant type and args which typeck computed
let (discr_ty, upvars, interior, movable) = match *gen_ty.kind() {
- ty::Generator(_, substs, movability) => {
- let substs = substs.as_generator();
+ ty::Generator(_, args, movability) => {
+ let args = args.as_generator();
(
- substs.discr_ty(tcx),
- substs.upvar_tys().collect::<Vec<_>>(),
- substs.witness(),
+ args.discr_ty(tcx),
+ args.upvar_tys(),
+ args.witness(),
movability == hir::Movability::Movable,
)
}
_ => {
- tcx.sess.delay_span_bug(body.span, format!("unexpected generator type {}", gen_ty));
+ tcx.sess.delay_span_bug(body.span, format!("unexpected generator type {gen_ty}"));
return;
}
};
let is_async_kind = matches!(body.generator_kind(), Some(GeneratorKind::Async(_)));
- let (state_adt_ref, state_substs) = if is_async_kind {
+ let (state_adt_ref, state_args) = if is_async_kind {
// Compute Poll<return_ty>
let poll_did = tcx.require_lang_item(LangItem::Poll, None);
let poll_adt_ref = tcx.adt_def(poll_did);
- let poll_substs = tcx.mk_substs(&[body.return_ty().into()]);
- (poll_adt_ref, poll_substs)
+ let poll_args = tcx.mk_args(&[body.return_ty().into()]);
+ (poll_adt_ref, poll_args)
} else {
// Compute GeneratorState<yield_ty, return_ty>
let state_did = tcx.require_lang_item(LangItem::GeneratorState, None);
let state_adt_ref = tcx.adt_def(state_did);
- let state_substs = tcx.mk_substs(&[yield_ty.into(), body.return_ty().into()]);
- (state_adt_ref, state_substs)
+ let state_args = tcx.mk_args(&[yield_ty.into(), body.return_ty().into()]);
+ (state_adt_ref, state_args)
};
- let ret_ty = Ty::new_adt(tcx, state_adt_ref, state_substs);
+ let ret_ty = Ty::new_adt(tcx, state_adt_ref, state_args);
// We rename RETURN_PLACE which has type mir.return_ty to new_ret_local
// RETURN_PLACE then is a fresh unused local with type ret_ty.
@@ -1570,7 +1593,7 @@ impl<'tcx> MirPass<'tcx> for StateTransform {
tcx,
is_async_kind,
state_adt_ref,
- state_substs,
+ state_args,
remap,
storage_liveness,
always_live_locals,
@@ -1763,7 +1786,9 @@ fn check_suspend_tys<'tcx>(tcx: TyCtxt<'tcx>, layout: &GeneratorLayout<'tcx>, bo
debug!(?decl);
if !decl.ignore_for_traits && linted_tys.insert(decl.ty) {
- let Some(hir_id) = decl.source_info.scope.lint_root(&body.source_scopes) else { continue };
+ let Some(hir_id) = decl.source_info.scope.lint_root(&body.source_scopes) else {
+ continue;
+ };
check_must_not_suspend_ty(
tcx,
diff --git a/compiler/rustc_mir_transform/src/inline.rs b/compiler/rustc_mir_transform/src/inline.rs
index b6578cb25..fc9e18378 100644
--- a/compiler/rustc_mir_transform/src/inline.rs
+++ b/compiler/rustc_mir_transform/src/inline.rs
@@ -1,6 +1,7 @@
//! Inlining pass for MIR functions
use crate::deref_separator::deref_finder;
use rustc_attr::InlineAttr;
+use rustc_const_eval::transform::validate::validate_types;
use rustc_hir::def_id::DefId;
use rustc_index::bit_set::BitSet;
use rustc_index::Idx;
@@ -10,7 +11,7 @@ use rustc_middle::mir::*;
use rustc_middle::ty::TypeVisitableExt;
use rustc_middle::ty::{self, Instance, InstanceDef, ParamEnv, Ty, TyCtxt};
use rustc_session::config::OptLevel;
-use rustc_target::abi::{FieldIdx, FIRST_VARIANT};
+use rustc_target::abi::FieldIdx;
use rustc_target::spec::abi::Abi;
use crate::simplify::{remove_dead_blocks, CfgSimplifier};
@@ -105,7 +106,7 @@ struct Inliner<'tcx> {
/// Caller codegen attributes.
codegen_fn_attrs: &'tcx CodegenFnAttrs,
/// Stack of inlined instances.
- /// We only check the `DefId` and not the substs because we want to
+ /// We only check the `DefId` and not the args because we want to
/// avoid inlining cases of polymorphic recursion.
/// The number of `DefId`s is finite, so checking history is enough
/// to ensure that we do not loop endlessly while inlining.
@@ -200,6 +201,19 @@ impl<'tcx> Inliner<'tcx> {
return Err("failed to normalize callee body");
};
+ // Normally, this shouldn't be required, but trait normalization failure can create a
+ // validation ICE.
+ if !validate_types(
+ self.tcx,
+ MirPhase::Runtime(RuntimePhase::Optimized),
+ self.param_env,
+ &callee_body,
+ )
+ .is_empty()
+ {
+ return Err("failed to validate callee body");
+ }
+
// Check call signature compatibility.
// Normally, this shouldn't be required, but trait normalization failure can create a
// validation ICE.
@@ -209,19 +223,29 @@ impl<'tcx> Inliner<'tcx> {
return Err("failed to normalize return type");
}
if callsite.fn_sig.abi() == Abi::RustCall {
- let (arg_tuple, skipped_args) = match &args[..] {
- [arg_tuple] => (arg_tuple, 0),
- [_, arg_tuple] => (arg_tuple, 1),
+ // FIXME: Don't inline user-written `extern "rust-call"` functions,
+ // since this is generally perf-negative on rustc, and we hope that
+ // LLVM will inline these functions instead.
+ if callee_body.spread_arg.is_some() {
+ return Err("do not inline user-written rust-call functions");
+ }
+
+ let (self_arg, arg_tuple) = match &args[..] {
+ [arg_tuple] => (None, arg_tuple),
+ [self_arg, arg_tuple] => (Some(self_arg), arg_tuple),
_ => bug!("Expected `rust-call` to have 1 or 2 args"),
};
+ let self_arg_ty =
+ self_arg.map(|self_arg| self_arg.ty(&caller_body.local_decls, self.tcx));
+
let arg_tuple_ty = arg_tuple.ty(&caller_body.local_decls, self.tcx);
- let ty::Tuple(arg_tuple_tys) = arg_tuple_ty.kind() else {
+ let ty::Tuple(arg_tuple_tys) = *arg_tuple_ty.kind() else {
bug!("Closure arguments are not passed as a tuple");
};
for (arg_ty, input) in
- arg_tuple_tys.iter().zip(callee_body.args_iter().skip(skipped_args))
+ self_arg_ty.into_iter().chain(arg_tuple_tys).zip(callee_body.args_iter())
{
let input_type = callee_body.local_decls[input].ty;
if !util::is_subtype(self.tcx, self.param_env, input_type, arg_ty) {
@@ -329,11 +353,11 @@ impl<'tcx> Inliner<'tcx> {
let terminator = bb_data.terminator();
if let TerminatorKind::Call { ref func, target, fn_span, .. } = terminator.kind {
let func_ty = func.ty(caller_body, self.tcx);
- if let ty::FnDef(def_id, substs) = *func_ty.kind() {
- // To resolve an instance its substs have to be fully normalized.
- let substs = self.tcx.try_normalize_erasing_regions(self.param_env, substs).ok()?;
+ if let ty::FnDef(def_id, args) = *func_ty.kind() {
+ // To resolve an instance its args have to be fully normalized.
+ let args = self.tcx.try_normalize_erasing_regions(self.param_env, args).ok()?;
let callee =
- Instance::resolve(self.tcx, self.param_env, def_id, substs).ok().flatten()?;
+ Instance::resolve(self.tcx, self.param_env, def_id, args).ok().flatten()?;
if let InstanceDef::Virtual(..) | InstanceDef::Intrinsic(_) = callee.def {
return None;
@@ -343,7 +367,7 @@ impl<'tcx> Inliner<'tcx> {
return None;
}
- let fn_sig = self.tcx.fn_sig(def_id).subst(self.tcx, substs);
+ let fn_sig = self.tcx.fn_sig(def_id).instantiate(self.tcx, args);
let source_info = SourceInfo { span: fn_span, ..terminator.source_info };
return Some(CallSite { callee, fn_sig, block: bb, target, source_info });
@@ -368,7 +392,7 @@ impl<'tcx> Inliner<'tcx> {
// inlining. This is to ensure that the final crate doesn't have MIR that
// reference unexported symbols
if callsite.callee.def_id().is_local() {
- let is_generic = callsite.callee.substs.non_erasable_generics().next().is_some();
+ let is_generic = callsite.callee.args.non_erasable_generics().next().is_some();
if !is_generic && !callee_attrs.requests_inline() {
return Err("not exported");
}
@@ -437,13 +461,8 @@ impl<'tcx> Inliner<'tcx> {
instance: callsite.callee,
callee_body,
cost: 0,
- validation: Ok(()),
};
- for var_debug_info in callee_body.var_debug_info.iter() {
- checker.visit_var_debug_info(var_debug_info);
- }
-
// Traverse the MIR manually so we can account for the effects of inlining on the CFG.
let mut work_list = vec![START_BLOCK];
let mut visited = BitSet::new_empty(callee_body.basic_blocks.len());
@@ -480,9 +499,6 @@ impl<'tcx> Inliner<'tcx> {
}
}
- // Abort if type validation found anything fishy.
- checker.validation?;
-
// N.B. We still apply our cost threshold to #[inline(always)] functions.
// That attribute is often applied to very large functions that exceed LLVM's (very
// generous) inlining threshold. Such functions are very poor MIR inlining candidates.
@@ -774,11 +790,10 @@ struct CostChecker<'b, 'tcx> {
cost: usize,
callee_body: &'b Body<'tcx>,
instance: ty::Instance<'tcx>,
- validation: Result<(), &'static str>,
}
impl<'tcx> Visitor<'tcx> for CostChecker<'_, 'tcx> {
- fn visit_statement(&mut self, statement: &Statement<'tcx>, location: Location) {
+ fn visit_statement(&mut self, statement: &Statement<'tcx>, _: Location) {
// Don't count StorageLive/StorageDead in the inlining cost.
match statement.kind {
StatementKind::StorageLive(_)
@@ -787,11 +802,9 @@ impl<'tcx> Visitor<'tcx> for CostChecker<'_, 'tcx> {
| StatementKind::Nop => {}
_ => self.cost += INSTR_COST,
}
-
- self.super_statement(statement, location);
}
- fn visit_terminator(&mut self, terminator: &Terminator<'tcx>, location: Location) {
+ fn visit_terminator(&mut self, terminator: &Terminator<'tcx>, _: Location) {
let tcx = self.tcx;
match terminator.kind {
TerminatorKind::Drop { ref place, unwind, .. } => {
@@ -835,109 +848,6 @@ impl<'tcx> Visitor<'tcx> for CostChecker<'_, 'tcx> {
}
_ => self.cost += INSTR_COST,
}
-
- self.super_terminator(terminator, location);
- }
-
- /// This method duplicates code from MIR validation in an attempt to detect type mismatches due
- /// to normalization failure.
- fn visit_projection_elem(
- &mut self,
- place_ref: PlaceRef<'tcx>,
- elem: PlaceElem<'tcx>,
- context: PlaceContext,
- location: Location,
- ) {
- if let ProjectionElem::Field(f, ty) = elem {
- let parent_ty = place_ref.ty(&self.callee_body.local_decls, self.tcx);
- let check_equal = |this: &mut Self, f_ty| {
- // Fast path if there is nothing to substitute.
- if ty == f_ty {
- return;
- }
- let ty = this.instance.subst_mir(this.tcx, ty::EarlyBinder::bind(&ty));
- let f_ty = this.instance.subst_mir(this.tcx, ty::EarlyBinder::bind(&f_ty));
- if ty == f_ty {
- return;
- }
- if !util::is_subtype(this.tcx, this.param_env, ty, f_ty) {
- trace!(?ty, ?f_ty);
- this.validation = Err("failed to normalize projection type");
- return;
- }
- };
-
- let kind = match parent_ty.ty.kind() {
- &ty::Alias(ty::Opaque, ty::AliasTy { def_id, substs, .. }) => {
- self.tcx.type_of(def_id).subst(self.tcx, substs).kind()
- }
- kind => kind,
- };
-
- match kind {
- ty::Tuple(fields) => {
- let Some(f_ty) = fields.get(f.as_usize()) else {
- self.validation = Err("malformed MIR");
- return;
- };
- check_equal(self, *f_ty);
- }
- ty::Adt(adt_def, substs) => {
- let var = parent_ty.variant_index.unwrap_or(FIRST_VARIANT);
- let Some(field) = adt_def.variant(var).fields.get(f) else {
- self.validation = Err("malformed MIR");
- return;
- };
- check_equal(self, field.ty(self.tcx, substs));
- }
- ty::Closure(_, substs) => {
- let substs = substs.as_closure();
- let Some(f_ty) = substs.upvar_tys().nth(f.as_usize()) else {
- self.validation = Err("malformed MIR");
- return;
- };
- check_equal(self, f_ty);
- }
- &ty::Generator(def_id, substs, _) => {
- let f_ty = if let Some(var) = parent_ty.variant_index {
- let gen_body = if def_id == self.callee_body.source.def_id() {
- self.callee_body
- } else {
- self.tcx.optimized_mir(def_id)
- };
-
- let Some(layout) = gen_body.generator_layout() else {
- self.validation = Err("malformed MIR");
- return;
- };
-
- let Some(&local) = layout.variant_fields[var].get(f) else {
- self.validation = Err("malformed MIR");
- return;
- };
-
- let Some(f_ty) = layout.field_tys.get(local) else {
- self.validation = Err("malformed MIR");
- return;
- };
-
- f_ty.ty
- } else {
- let Some(f_ty) = substs.as_generator().prefix_tys().nth(f.index()) else {
- self.validation = Err("malformed MIR");
- return;
- };
-
- f_ty
- };
-
- check_equal(self, f_ty);
- }
- _ => self.validation = Err("malformed MIR"),
- }
- }
-
- self.super_projection_elem(place_ref, elem, context, location);
}
}
@@ -1143,10 +1053,10 @@ fn try_instance_mir<'tcx>(
) -> Result<&'tcx Body<'tcx>, &'static str> {
match instance {
ty::InstanceDef::DropGlue(_, Some(ty)) => match ty.kind() {
- ty::Adt(def, substs) => {
+ ty::Adt(def, args) => {
let fields = def.all_fields();
for field in fields {
- let field_ty = field.ty(tcx, substs);
+ let field_ty = field.ty(tcx, args);
if field_ty.has_param() && field_ty.has_projections() {
return Err("cannot build drop shim for polymorphic type");
}
diff --git a/compiler/rustc_mir_transform/src/inline/cycle.rs b/compiler/rustc_mir_transform/src/inline/cycle.rs
index 8a10445f8..822634129 100644
--- a/compiler/rustc_mir_transform/src/inline/cycle.rs
+++ b/compiler/rustc_mir_transform/src/inline/cycle.rs
@@ -3,7 +3,7 @@ use rustc_data_structures::stack::ensure_sufficient_stack;
use rustc_hir::def_id::{DefId, LocalDefId};
use rustc_middle::mir::TerminatorKind;
use rustc_middle::ty::TypeVisitableExt;
-use rustc_middle::ty::{self, subst::SubstsRef, InstanceDef, TyCtxt};
+use rustc_middle::ty::{self, GenericArgsRef, InstanceDef, TyCtxt};
use rustc_session::Limit;
// FIXME: check whether it is cheaper to precompute the entire call graph instead of invoking
@@ -43,16 +43,16 @@ pub(crate) fn mir_callgraph_reachable<'tcx>(
recursion_limit: Limit,
) -> bool {
trace!(%caller);
- for &(callee, substs) in tcx.mir_inliner_callees(caller.def) {
- let Ok(substs) = caller.try_subst_mir_and_normalize_erasing_regions(
+ for &(callee, args) in tcx.mir_inliner_callees(caller.def) {
+ let Ok(args) = caller.try_subst_mir_and_normalize_erasing_regions(
tcx,
param_env,
- ty::EarlyBinder::bind(substs),
+ ty::EarlyBinder::bind(args),
) else {
- trace!(?caller, ?param_env, ?substs, "cannot normalize, skipping");
+ trace!(?caller, ?param_env, ?args, "cannot normalize, skipping");
continue;
};
- let Ok(Some(callee)) = ty::Instance::resolve(tcx, param_env, callee, substs) else {
+ let Ok(Some(callee)) = ty::Instance::resolve(tcx, param_env, callee, args) else {
trace!(?callee, "cannot resolve, skipping");
continue;
};
@@ -147,7 +147,7 @@ pub(crate) fn mir_callgraph_reachable<'tcx>(
pub(crate) fn mir_inliner_callees<'tcx>(
tcx: TyCtxt<'tcx>,
instance: ty::InstanceDef<'tcx>,
-) -> &'tcx [(DefId, SubstsRef<'tcx>)] {
+) -> &'tcx [(DefId, GenericArgsRef<'tcx>)] {
let steal;
let guard;
let body = match (instance, instance.def_id().as_local()) {
@@ -165,7 +165,7 @@ pub(crate) fn mir_inliner_callees<'tcx>(
if let TerminatorKind::Call { func, .. } = &terminator.kind {
let ty = func.ty(&body.local_decls, tcx);
let call = match ty.kind() {
- ty::FnDef(def_id, substs) => (*def_id, *substs),
+ ty::FnDef(def_id, args) => (*def_id, *args),
_ => continue,
};
calls.insert(call);
diff --git a/compiler/rustc_mir_transform/src/instsimplify.rs b/compiler/rustc_mir_transform/src/instsimplify.rs
index e4dc61762..8b0a0903d 100644
--- a/compiler/rustc_mir_transform/src/instsimplify.rs
+++ b/compiler/rustc_mir_transform/src/instsimplify.rs
@@ -5,7 +5,7 @@ use crate::MirPass;
use rustc_hir::Mutability;
use rustc_middle::mir::*;
use rustc_middle::ty::layout::ValidityRequirement;
-use rustc_middle::ty::{self, ParamEnv, SubstsRef, Ty, TyCtxt};
+use rustc_middle::ty::{self, GenericArgsRef, ParamEnv, Ty, TyCtxt};
use rustc_span::symbol::Symbol;
use rustc_target::abi::FieldIdx;
@@ -57,7 +57,7 @@ struct InstSimplifyContext<'tcx, 'a> {
impl<'tcx> InstSimplifyContext<'tcx, '_> {
fn should_simplify(&self, source_info: &SourceInfo, rvalue: &Rvalue<'tcx>) -> bool {
self.tcx.consider_optimizing(|| {
- format!("InstSimplify - Rvalue: {:?} SourceInfo: {:?}", rvalue, source_info)
+ format!("InstSimplify - Rvalue: {rvalue:?} SourceInfo: {source_info:?}")
})
}
@@ -163,14 +163,14 @@ impl<'tcx> InstSimplifyContext<'tcx, '_> {
}
// Transmuting a transparent struct/union to a field's type is a projection
- if let ty::Adt(adt_def, substs) = operand_ty.kind()
+ if let ty::Adt(adt_def, args) = operand_ty.kind()
&& adt_def.repr().transparent()
&& (adt_def.is_struct() || adt_def.is_union())
&& let Some(place) = operand.place()
{
let variant = adt_def.non_enum_variant();
for (i, field) in variant.fields.iter().enumerate() {
- let field_ty = field.ty(self.tcx, substs);
+ let field_ty = field.ty(self.tcx, args);
if field_ty == *cast_ty {
let place = place.project_deeper(&[ProjectionElem::Field(FieldIdx::from_usize(i), *cast_ty)], self.tcx);
let operand = if operand.is_move() { Operand::Move(place) } else { Operand::Copy(place) };
@@ -189,22 +189,22 @@ impl<'tcx> InstSimplifyContext<'tcx, '_> {
statements: &mut Vec<Statement<'tcx>>,
) {
let TerminatorKind::Call { func, args, destination, target, .. } = &mut terminator.kind
- else { return };
+ else {
+ return;
+ };
// It's definitely not a clone if there are multiple arguments
if args.len() != 1 {
return;
}
- let Some(destination_block) = *target
- else { return };
+ let Some(destination_block) = *target else { return };
// Only bother looking more if it's easy to know what we're calling
- let Some((fn_def_id, fn_substs)) = func.const_fn_def()
- else { return };
+ let Some((fn_def_id, fn_args)) = func.const_fn_def() else { return };
// Clone needs one subst, so we can cheaply rule out other stuff
- if fn_substs.len() != 1 {
+ if fn_args.len() != 1 {
return;
}
@@ -212,8 +212,7 @@ impl<'tcx> InstSimplifyContext<'tcx, '_> {
// doing DefId lookups to figure out what we're actually calling.
let arg_ty = args[0].ty(self.local_decls, self.tcx);
- let ty::Ref(_region, inner_ty, Mutability::Not) = *arg_ty.kind()
- else { return };
+ let ty::Ref(_region, inner_ty, Mutability::Not) = *arg_ty.kind() else { return };
if !inner_ty.is_trivially_pure_clone_copy() {
return;
@@ -227,15 +226,14 @@ impl<'tcx> InstSimplifyContext<'tcx, '_> {
if !self.tcx.consider_optimizing(|| {
format!(
"InstSimplify - Call: {:?} SourceInfo: {:?}",
- (fn_def_id, fn_substs),
+ (fn_def_id, fn_args),
terminator.source_info
)
}) {
return;
}
- let Some(arg_place) = args.pop().unwrap().place()
- else { return };
+ let Some(arg_place) = args.pop().unwrap().place() else { return };
statements.push(Statement {
source_info: terminator.source_info,
@@ -254,17 +252,21 @@ impl<'tcx> InstSimplifyContext<'tcx, '_> {
terminator: &mut Terminator<'tcx>,
_statements: &mut Vec<Statement<'tcx>>,
) {
- let TerminatorKind::Call { func, target, .. } = &mut terminator.kind else { return; };
- let Some(target_block) = target else { return; };
+ let TerminatorKind::Call { func, target, .. } = &mut terminator.kind else {
+ return;
+ };
+ let Some(target_block) = target else {
+ return;
+ };
let func_ty = func.ty(self.local_decls, self.tcx);
- let Some((intrinsic_name, substs)) = resolve_rust_intrinsic(self.tcx, func_ty) else {
+ let Some((intrinsic_name, args)) = resolve_rust_intrinsic(self.tcx, func_ty) else {
return;
};
// The intrinsics we are interested in have one generic parameter
- if substs.is_empty() {
+ if args.is_empty() {
return;
}
- let ty = substs.type_at(0);
+ let ty = args.type_at(0);
let known_is_valid = intrinsic_assert_panics(self.tcx, self.param_env, ty, intrinsic_name);
match known_is_valid {
@@ -295,10 +297,10 @@ fn intrinsic_assert_panics<'tcx>(
fn resolve_rust_intrinsic<'tcx>(
tcx: TyCtxt<'tcx>,
func_ty: Ty<'tcx>,
-) -> Option<(Symbol, SubstsRef<'tcx>)> {
- if let ty::FnDef(def_id, substs) = *func_ty.kind() {
+) -> Option<(Symbol, GenericArgsRef<'tcx>)> {
+ if let ty::FnDef(def_id, args) = *func_ty.kind() {
if tcx.is_intrinsic(def_id) {
- return Some((tcx.item_name(def_id), substs));
+ return Some((tcx.item_name(def_id), args));
}
}
None
diff --git a/compiler/rustc_mir_transform/src/large_enums.rs b/compiler/rustc_mir_transform/src/large_enums.rs
index 8ed4706e1..19108dabd 100644
--- a/compiler/rustc_mir_transform/src/large_enums.rs
+++ b/compiler/rustc_mir_transform/src/large_enums.rs
@@ -48,7 +48,7 @@ impl EnumSizeOpt {
alloc_cache: &mut FxHashMap<Ty<'tcx>, AllocId>,
) -> Option<(AdtDef<'tcx>, usize, AllocId)> {
let adt_def = match ty.kind() {
- ty::Adt(adt_def, _substs) if adt_def.is_enum() => adt_def,
+ ty::Adt(adt_def, _args) if adt_def.is_enum() => adt_def,
_ => return None,
};
let layout = tcx.layout_of(param_env.and(ty)).ok()?;
diff --git a/compiler/rustc_mir_transform/src/lib.rs b/compiler/rustc_mir_transform/src/lib.rs
index fa8257cf9..bf798adee 100644
--- a/compiler/rustc_mir_transform/src/lib.rs
+++ b/compiler/rustc_mir_transform/src/lib.rs
@@ -75,7 +75,7 @@ mod errors;
mod ffi_unwind_calls;
mod function_item_references;
mod generator;
-mod inline;
+pub mod inline;
mod instsimplify;
mod large_enums;
mod lower_intrinsics;
@@ -338,38 +338,16 @@ fn inner_mir_for_ctfe(tcx: TyCtxt<'_>, def: LocalDefId) -> Body<'_> {
return shim::build_adt_ctor(tcx, def.to_def_id());
}
- let context = tcx
- .hir()
- .body_const_context(def)
- .expect("mir_for_ctfe should not be used for runtime functions");
-
- let body = tcx.mir_drops_elaborated_and_const_checked(def).borrow().clone();
+ let body = tcx.mir_drops_elaborated_and_const_checked(def);
+ let body = match tcx.hir().body_const_context(def) {
+ // consts and statics do not have `optimized_mir`, so we can steal the body instead of
+ // cloning it.
+ Some(hir::ConstContext::Const | hir::ConstContext::Static(_)) => body.steal(),
+ Some(hir::ConstContext::ConstFn) => body.borrow().clone(),
+ None => bug!("`mir_for_ctfe` called on non-const {def:?}"),
+ };
let mut body = remap_mir_for_const_eval_select(tcx, body, hir::Constness::Const);
-
- match context {
- // Do not const prop functions, either they get executed at runtime or exported to metadata,
- // so we run const prop on them, or they don't, in which case we const evaluate some control
- // flow paths of the function and any errors in those paths will get emitted as const eval
- // errors.
- hir::ConstContext::ConstFn => {}
- // Static items always get evaluated, so we can just let const eval see if any erroneous
- // control flow paths get executed.
- hir::ConstContext::Static(_) => {}
- // Associated constants get const prop run so we detect common failure situations in the
- // crate that defined the constant.
- // Technically we want to not run on regular const items, but oli-obk doesn't know how to
- // conveniently detect that at this point without looking at the HIR.
- hir::ConstContext::Const => {
- pm::run_passes(
- tcx,
- &mut body,
- &[&const_prop::ConstProp],
- Some(MirPhase::Runtime(RuntimePhase::Optimized)),
- );
- }
- }
-
pm::run_passes(tcx, &mut body, &[&ctfe_limit::CtfeLimit], None);
body
@@ -446,10 +424,16 @@ fn mir_drops_elaborated_and_const_checked(tcx: TyCtxt<'_>, def: LocalDefId) -> &
run_analysis_to_runtime_passes(tcx, &mut body);
+ // Now that drop elaboration has been performed, we can check for
+ // unconditional drop recursion.
+ rustc_mir_build::lints::check_drop_recursion(tcx, &body);
+
tcx.alloc_steal_mir(body)
}
-fn run_analysis_to_runtime_passes<'tcx>(tcx: TyCtxt<'tcx>, body: &mut Body<'tcx>) {
+// Made public such that `mir_drops_elaborated_and_const_checked` can be overridden
+// by custom rustc drivers, running all the steps by themselves.
+pub fn run_analysis_to_runtime_passes<'tcx>(tcx: TyCtxt<'tcx>, body: &mut Body<'tcx>) {
assert!(body.phase == MirPhase::Analysis(AnalysisPhase::Initial));
let did = body.source.def_id();
@@ -553,6 +537,7 @@ fn run_optimization_passes<'tcx>(tcx: TyCtxt<'tcx>, body: &mut Body<'tcx>) {
&normalize_array_len::NormalizeArrayLen, // has to run after `slice::len` lowering
&const_goto::ConstGoto,
&remove_unneeded_drops::RemoveUnneededDrops,
+ &ref_prop::ReferencePropagation,
&sroa::ScalarReplacementOfAggregates,
&match_branches::MatchBranchSimplification,
// inst combine is after MatchBranchSimplification to clean up Ne(_1, false)
@@ -560,7 +545,6 @@ fn run_optimization_passes<'tcx>(tcx: TyCtxt<'tcx>, body: &mut Body<'tcx>) {
&instsimplify::InstSimplify,
&simplify::SimplifyLocals::BeforeConstProp,
&copy_prop::CopyProp,
- &ref_prop::ReferencePropagation,
// Perform `SeparateConstSwitch` after SSA-based analyses, as cloning blocks may
// destroy the SSA property. It should still happen before const-propagation, so the
// latter pass will leverage the created opportunities.
@@ -615,7 +599,7 @@ fn inner_optimized_mir(tcx: TyCtxt<'_>, did: LocalDefId) -> Body<'_> {
// computes and caches its result.
Some(hir::ConstContext::ConstFn) => tcx.ensure_with_value().mir_for_ctfe(did),
None => {}
- Some(other) => panic!("do not use `optimized_mir` for constants: {:?}", other),
+ Some(other) => panic!("do not use `optimized_mir` for constants: {other:?}"),
}
debug!("about to call mir_drops_elaborated...");
let body = tcx.mir_drops_elaborated_and_const_checked(did).steal();
diff --git a/compiler/rustc_mir_transform/src/lower_intrinsics.rs b/compiler/rustc_mir_transform/src/lower_intrinsics.rs
index ce98e9b0c..fc36c6e41 100644
--- a/compiler/rustc_mir_transform/src/lower_intrinsics.rs
+++ b/compiler/rustc_mir_transform/src/lower_intrinsics.rs
@@ -2,7 +2,7 @@
use crate::{errors, MirPass};
use rustc_middle::mir::*;
-use rustc_middle::ty::subst::SubstsRef;
+use rustc_middle::ty::GenericArgsRef;
use rustc_middle::ty::{self, Ty, TyCtxt};
use rustc_span::symbol::{sym, Symbol};
use rustc_span::Span;
@@ -19,7 +19,8 @@ impl<'tcx> MirPass<'tcx> for LowerIntrinsics {
&mut terminator.kind
{
let func_ty = func.ty(local_decls, tcx);
- let Some((intrinsic_name, substs)) = resolve_rust_intrinsic(tcx, func_ty) else {
+ let Some((intrinsic_name, generic_args)) = resolve_rust_intrinsic(tcx, func_ty)
+ else {
continue;
};
match intrinsic_name {
@@ -149,7 +150,7 @@ impl<'tcx> MirPass<'tcx> for LowerIntrinsics {
}
sym::size_of | sym::min_align_of => {
if let Some(target) = *target {
- let tp_ty = substs.type_at(0);
+ let tp_ty = generic_args.type_at(0);
let null_op = match intrinsic_name {
sym::size_of => NullOp::SizeOf,
sym::min_align_of => NullOp::AlignOf,
@@ -251,7 +252,9 @@ impl<'tcx> MirPass<'tcx> for LowerIntrinsics {
if let (Some(target), Some(arg)) = (*target, args[0].place()) {
let ty::RawPtr(ty::TypeAndMut { ty: dest_ty, .. }) =
destination.ty(local_decls, tcx).ty.kind()
- else { bug!(); };
+ else {
+ bug!();
+ };
block.statements.push(Statement {
source_info: terminator.source_info,
@@ -302,7 +305,7 @@ impl<'tcx> MirPass<'tcx> for LowerIntrinsics {
terminator.kind = TerminatorKind::Unreachable;
}
}
- _ if intrinsic_name.as_str().starts_with("simd_shuffle") => {
+ sym::simd_shuffle => {
validate_simd_shuffle(tcx, args, terminator.source_info.span);
}
_ => {}
@@ -315,10 +318,10 @@ impl<'tcx> MirPass<'tcx> for LowerIntrinsics {
fn resolve_rust_intrinsic<'tcx>(
tcx: TyCtxt<'tcx>,
func_ty: Ty<'tcx>,
-) -> Option<(Symbol, SubstsRef<'tcx>)> {
- if let ty::FnDef(def_id, substs) = *func_ty.kind() {
+) -> Option<(Symbol, GenericArgsRef<'tcx>)> {
+ if let ty::FnDef(def_id, args) = *func_ty.kind() {
if tcx.is_intrinsic(def_id) {
- return Some((tcx.item_name(def_id), substs));
+ return Some((tcx.item_name(def_id), args));
}
}
None
diff --git a/compiler/rustc_mir_transform/src/match_branches.rs b/compiler/rustc_mir_transform/src/match_branches.rs
index 6eb484982..bc29fb8de 100644
--- a/compiler/rustc_mir_transform/src/match_branches.rs
+++ b/compiler/rustc_mir_transform/src/match_branches.rs
@@ -51,7 +51,7 @@ impl<'tcx> MirPass<'tcx> for MatchBranchSimplification {
let bbs = body.basic_blocks.as_mut();
let mut should_cleanup = false;
'outer: for bb_idx in bbs.indices() {
- if !tcx.consider_optimizing(|| format!("MatchBranchSimplification {:?} ", def_id)) {
+ if !tcx.consider_optimizing(|| format!("MatchBranchSimplification {def_id:?} ")) {
continue;
}
diff --git a/compiler/rustc_mir_transform/src/multiple_return_terminators.rs b/compiler/rustc_mir_transform/src/multiple_return_terminators.rs
index 3957cd92c..c97d03454 100644
--- a/compiler/rustc_mir_transform/src/multiple_return_terminators.rs
+++ b/compiler/rustc_mir_transform/src/multiple_return_terminators.rs
@@ -27,7 +27,7 @@ impl<'tcx> MirPass<'tcx> for MultipleReturnTerminators {
}
for bb in bbs {
- if !tcx.consider_optimizing(|| format!("MultipleReturnTerminators {:?} ", def_id)) {
+ if !tcx.consider_optimizing(|| format!("MultipleReturnTerminators {def_id:?} ")) {
break;
}
diff --git a/compiler/rustc_mir_transform/src/nrvo.rs b/compiler/rustc_mir_transform/src/nrvo.rs
index 5ce96012b..e1298b065 100644
--- a/compiler/rustc_mir_transform/src/nrvo.rs
+++ b/compiler/rustc_mir_transform/src/nrvo.rs
@@ -45,7 +45,7 @@ impl<'tcx> MirPass<'tcx> for RenameReturnPlace {
return;
};
- if !tcx.consider_optimizing(|| format!("RenameReturnPlace {:?}", def_id)) {
+ if !tcx.consider_optimizing(|| format!("RenameReturnPlace {def_id:?}")) {
return;
}
diff --git a/compiler/rustc_mir_transform/src/pass_manager.rs b/compiler/rustc_mir_transform/src/pass_manager.rs
index 710eed3ed..057f5fe82 100644
--- a/compiler/rustc_mir_transform/src/pass_manager.rs
+++ b/compiler/rustc_mir_transform/src/pass_manager.rs
@@ -118,7 +118,7 @@ fn run_passes_inner<'tcx>(
dump_mir_for_pass(tcx, body, &name, false);
}
if validate {
- validate_body(tcx, body, format!("before pass {}", name));
+ validate_body(tcx, body, format!("before pass {name}"));
}
tcx.sess.time(name, || pass.run_pass(tcx, body));
@@ -127,7 +127,7 @@ fn run_passes_inner<'tcx>(
dump_mir_for_pass(tcx, body, &name, true);
}
if validate {
- validate_body(tcx, body, format!("after pass {}", name));
+ validate_body(tcx, body, format!("after pass {name}"));
}
body.pass_count += 1;
diff --git a/compiler/rustc_mir_transform/src/ref_prop.rs b/compiler/rustc_mir_transform/src/ref_prop.rs
index bbd9f76ba..49a940b57 100644
--- a/compiler/rustc_mir_transform/src/ref_prop.rs
+++ b/compiler/rustc_mir_transform/src/ref_prop.rs
@@ -71,7 +71,7 @@ pub struct ReferencePropagation;
impl<'tcx> MirPass<'tcx> for ReferencePropagation {
fn is_enabled(&self, sess: &rustc_session::Session) -> bool {
- sess.mir_opt_level() >= 4
+ sess.mir_opt_level() >= 2
}
#[instrument(level = "trace", skip(self, tcx, body))]
@@ -265,7 +265,6 @@ fn compute_replacement<'tcx>(
targets,
storage_to_remove,
allowed_replacements,
- fully_replacable_locals,
any_replacement: false,
};
@@ -346,7 +345,6 @@ struct Replacer<'tcx> {
storage_to_remove: BitSet<Local>,
allowed_replacements: FxHashSet<(Local, Location)>,
any_replacement: bool,
- fully_replacable_locals: BitSet<Local>,
}
impl<'tcx> MutVisitor<'tcx> for Replacer<'tcx> {
@@ -355,7 +353,10 @@ impl<'tcx> MutVisitor<'tcx> for Replacer<'tcx> {
}
fn visit_var_debug_info(&mut self, debuginfo: &mut VarDebugInfo<'tcx>) {
- if let VarDebugInfoContents::Place(ref mut place) = debuginfo.value
+ // If the debuginfo is a pointer to another place:
+ // - if it's a reborrow, see through it;
+ // - if it's a direct borrow, increase `debuginfo.references`.
+ while let VarDebugInfoContents::Place(ref mut place) = debuginfo.value
&& place.projection.is_empty()
&& let Value::Pointer(target, _) = self.targets[place.local]
&& target.projection.iter().all(|p| p.can_use_in_debuginfo())
@@ -363,34 +364,37 @@ impl<'tcx> MutVisitor<'tcx> for Replacer<'tcx> {
if let Some((&PlaceElem::Deref, rest)) = target.projection.split_last() {
*place = Place::from(target.local).project_deeper(rest, self.tcx);
self.any_replacement = true;
- } else if self.fully_replacable_locals.contains(place.local)
- && let Some(references) = debuginfo.references.checked_add(1)
- {
- debuginfo.references = references;
- *place = target;
- self.any_replacement = true;
+ } else {
+ break
}
}
+
+ // Simplify eventual projections left inside `debuginfo`.
+ self.super_var_debug_info(debuginfo);
}
fn visit_place(&mut self, place: &mut Place<'tcx>, ctxt: PlaceContext, loc: Location) {
- if place.projection.first() != Some(&PlaceElem::Deref) {
- return;
- }
-
loop {
- if let Value::Pointer(target, _) = self.targets[place.local] {
- let perform_opt = matches!(ctxt, PlaceContext::NonUse(_))
- || self.allowed_replacements.contains(&(target.local, loc));
-
- if perform_opt {
- *place = target.project_deeper(&place.projection[1..], self.tcx);
- self.any_replacement = true;
- continue;
+ if place.projection.first() != Some(&PlaceElem::Deref) {
+ return;
+ }
+
+ let Value::Pointer(target, _) = self.targets[place.local] else { return };
+
+ let perform_opt = match ctxt {
+ PlaceContext::NonUse(NonUseContext::VarDebugInfo) => {
+ target.projection.iter().all(|p| p.can_use_in_debuginfo())
}
+ PlaceContext::NonUse(_) => true,
+ _ => self.allowed_replacements.contains(&(target.local, loc)),
+ };
+
+ if !perform_opt {
+ return;
}
- break;
+ *place = target.project_deeper(&place.projection[1..], self.tcx);
+ self.any_replacement = true;
}
}
diff --git a/compiler/rustc_mir_transform/src/remove_noop_landing_pads.rs b/compiler/rustc_mir_transform/src/remove_noop_landing_pads.rs
index 4941c9edc..4e85c76fb 100644
--- a/compiler/rustc_mir_transform/src/remove_noop_landing_pads.rs
+++ b/compiler/rustc_mir_transform/src/remove_noop_landing_pads.rs
@@ -6,8 +6,8 @@ use rustc_middle::ty::TyCtxt;
use rustc_target::spec::PanicStrategy;
/// A pass that removes noop landing pads and replaces jumps to them with
-/// `None`. This is important because otherwise LLVM generates terrible
-/// code for these.
+/// `UnwindAction::Continue`. This is important because otherwise LLVM generates
+/// terrible code for these.
pub struct RemoveNoopLandingPads;
impl<'tcx> MirPass<'tcx> for RemoveNoopLandingPads {
@@ -84,7 +84,17 @@ impl RemoveNoopLandingPads {
fn remove_nop_landing_pads(&self, body: &mut Body<'_>) {
debug!("body: {:#?}", body);
- // make sure there's a resume block
+ // Skip the pass if there are no blocks with a resume terminator.
+ let has_resume = body
+ .basic_blocks
+ .iter_enumerated()
+ .any(|(_bb, block)| matches!(block.terminator().kind, TerminatorKind::Resume));
+ if !has_resume {
+ debug!("remove_noop_landing_pads: no resume block in MIR");
+ return;
+ }
+
+ // make sure there's a resume block without any statements
let resume_block = {
let mut patch = MirPatch::new(body);
let resume_block = patch.resume_block();
diff --git a/compiler/rustc_mir_transform/src/remove_uninit_drops.rs b/compiler/rustc_mir_transform/src/remove_uninit_drops.rs
index 283931de0..263849747 100644
--- a/compiler/rustc_mir_transform/src/remove_uninit_drops.rs
+++ b/compiler/rustc_mir_transform/src/remove_uninit_drops.rs
@@ -1,10 +1,12 @@
use rustc_index::bit_set::ChunkedBitSet;
use rustc_middle::mir::{Body, TerminatorKind};
-use rustc_middle::ty::subst::SubstsRef;
+use rustc_middle::ty::GenericArgsRef;
use rustc_middle::ty::{self, ParamEnv, Ty, TyCtxt, VariantDef};
use rustc_mir_dataflow::impls::MaybeInitializedPlaces;
use rustc_mir_dataflow::move_paths::{LookupResult, MoveData, MovePathIndex};
-use rustc_mir_dataflow::{self, move_path_children_matching, Analysis, MoveDataParamEnv};
+use rustc_mir_dataflow::{
+ self, move_path_children_matching, Analysis, MaybeReachable, MoveDataParamEnv,
+};
use rustc_target::abi::FieldIdx;
use crate::MirPass;
@@ -38,10 +40,10 @@ impl<'tcx> MirPass<'tcx> for RemoveUninitDrops {
let mut to_remove = vec![];
for (bb, block) in body.basic_blocks.iter_enumerated() {
let terminator = block.terminator();
- let TerminatorKind::Drop { place, .. } = &terminator.kind
- else { continue };
+ let TerminatorKind::Drop { place, .. } = &terminator.kind else { continue };
maybe_inits.seek_before_primary_effect(body.terminator_loc(bb));
+ let MaybeReachable::Reachable(maybe_inits) = maybe_inits.get() else { continue };
// If there's no move path for the dropped place, it's probably a `Deref`. Let it alone.
let LookupResult::Exact(mpi) = mdpe.move_data.rev_lookup.find(place.as_ref()) else {
@@ -51,7 +53,7 @@ impl<'tcx> MirPass<'tcx> for RemoveUninitDrops {
let should_keep = is_needs_drop_and_init(
tcx,
param_env,
- maybe_inits.get(),
+ maybe_inits,
&mdpe.move_data,
place.ty(body, tcx).ty,
mpi,
@@ -64,9 +66,9 @@ impl<'tcx> MirPass<'tcx> for RemoveUninitDrops {
for bb in to_remove {
let block = &mut body.basic_blocks_mut()[bb];
- let TerminatorKind::Drop { target, .. }
- = &block.terminator().kind
- else { unreachable!() };
+ let TerminatorKind::Drop { target, .. } = &block.terminator().kind else {
+ unreachable!()
+ };
// Replace block terminator with `Goto`.
block.terminator_mut().kind = TerminatorKind::Goto { target: *target };
@@ -99,7 +101,7 @@ fn is_needs_drop_and_init<'tcx>(
// This pass is only needed for const-checking, so it doesn't handle as many cases as
// `DropCtxt::open_drop`, since they aren't relevant in a const-context.
match ty.kind() {
- ty::Adt(adt, substs) => {
+ ty::Adt(adt, args) => {
let dont_elaborate = adt.is_union() || adt.is_manually_drop() || adt.has_dtor(tcx);
if dont_elaborate {
return true;
@@ -119,7 +121,7 @@ fn is_needs_drop_and_init<'tcx>(
let downcast =
move_path_children_matching(move_data, mpi, |x| x.is_downcast_to(vid));
let Some(dc_mpi) = downcast else {
- return variant_needs_drop(tcx, param_env, substs, variant);
+ return variant_needs_drop(tcx, param_env, args, variant);
};
dc_mpi
@@ -131,7 +133,7 @@ fn is_needs_drop_and_init<'tcx>(
.fields
.iter()
.enumerate()
- .map(|(f, field)| (FieldIdx::from_usize(f), field.ty(tcx, substs), mpi))
+ .map(|(f, field)| (FieldIdx::from_usize(f), field.ty(tcx, args), mpi))
.any(field_needs_drop_and_init)
})
}
@@ -149,11 +151,11 @@ fn is_needs_drop_and_init<'tcx>(
fn variant_needs_drop<'tcx>(
tcx: TyCtxt<'tcx>,
param_env: ParamEnv<'tcx>,
- substs: SubstsRef<'tcx>,
+ args: GenericArgsRef<'tcx>,
variant: &VariantDef,
) -> bool {
variant.fields.iter().any(|field| {
- let f_ty = field.ty(tcx, substs);
+ let f_ty = field.ty(tcx, args);
f_ty.needs_drop(tcx, param_env)
})
}
diff --git a/compiler/rustc_mir_transform/src/remove_unneeded_drops.rs b/compiler/rustc_mir_transform/src/remove_unneeded_drops.rs
index 84ccf6e1f..08b2a6537 100644
--- a/compiler/rustc_mir_transform/src/remove_unneeded_drops.rs
+++ b/compiler/rustc_mir_transform/src/remove_unneeded_drops.rs
@@ -27,7 +27,7 @@ impl<'tcx> MirPass<'tcx> for RemoveUnneededDrops {
if ty.ty.needs_drop(tcx, param_env) {
continue;
}
- if !tcx.consider_optimizing(|| format!("RemoveUnneededDrops {:?} ", did)) {
+ if !tcx.consider_optimizing(|| format!("RemoveUnneededDrops {did:?} ")) {
continue;
}
debug!("SUCCESS: replacing `drop` with goto({:?})", target);
diff --git a/compiler/rustc_mir_transform/src/remove_zsts.rs b/compiler/rustc_mir_transform/src/remove_zsts.rs
index 1f37f03cf..9c6c55b08 100644
--- a/compiler/rustc_mir_transform/src/remove_zsts.rs
+++ b/compiler/rustc_mir_transform/src/remove_zsts.rs
@@ -15,7 +15,7 @@ impl<'tcx> MirPass<'tcx> for RemoveZsts {
fn run_pass(&self, tcx: TyCtxt<'tcx>, body: &mut Body<'tcx>) {
// Avoid query cycles (generators require optimized MIR for layout).
- if tcx.type_of(body.source.def_id()).subst_identity().is_generator() {
+ if tcx.type_of(body.source.def_id()).instantiate_identity().is_generator() {
return;
}
let param_env = tcx.param_env_reveal_all_normalized(body.source.def_id());
@@ -102,7 +102,7 @@ impl<'tcx> MutVisitor<'tcx> for Replacer<'_, 'tcx> {
let op_ty = operand.ty(self.local_decls, self.tcx);
if self.known_to_be_zst(op_ty)
&& self.tcx.consider_optimizing(|| {
- format!("RemoveZsts - Operand: {:?} Location: {:?}", operand, loc)
+ format!("RemoveZsts - Operand: {operand:?} Location: {loc:?}")
})
{
*operand = Operand::Constant(Box::new(self.make_zst(op_ty)))
diff --git a/compiler/rustc_mir_transform/src/shim.rs b/compiler/rustc_mir_transform/src/shim.rs
index b176db3c9..223dc59c6 100644
--- a/compiler/rustc_mir_transform/src/shim.rs
+++ b/compiler/rustc_mir_transform/src/shim.rs
@@ -3,8 +3,8 @@ use rustc_hir::def_id::DefId;
use rustc_hir::lang_items::LangItem;
use rustc_middle::mir::*;
use rustc_middle::query::Providers;
-use rustc_middle::ty::InternalSubsts;
-use rustc_middle::ty::{self, EarlyBinder, GeneratorSubsts, Ty, TyCtxt};
+use rustc_middle::ty::GenericArgs;
+use rustc_middle::ty::{self, EarlyBinder, GeneratorArgs, Ty, TyCtxt};
use rustc_target::abi::{FieldIdx, VariantIdx, FIRST_VARIANT};
use rustc_index::{Idx, IndexVec};
@@ -69,10 +69,19 @@ fn make_shim<'tcx>(tcx: TyCtxt<'tcx>, instance: ty::InstanceDef<'tcx>) -> Body<'
ty::InstanceDef::DropGlue(def_id, ty) => {
// FIXME(#91576): Drop shims for generators aren't subject to the MIR passes at the end
// of this function. Is this intentional?
- if let Some(ty::Generator(gen_def_id, substs, _)) = ty.map(Ty::kind) {
+ if let Some(ty::Generator(gen_def_id, args, _)) = ty.map(Ty::kind) {
let body = tcx.optimized_mir(*gen_def_id).generator_drop().unwrap();
- let body = EarlyBinder::bind(body.clone()).subst(tcx, substs);
+ let mut body = EarlyBinder::bind(body.clone()).instantiate(tcx, args);
debug!("make_shim({:?}) = {:?}", instance, body);
+
+ // Run empty passes to mark phase change and perform validation.
+ pm::run_passes(
+ tcx,
+ &mut body,
+ &[],
+ Some(MirPhase::Runtime(RuntimePhase::Optimized)),
+ );
+
return body;
}
@@ -160,12 +169,12 @@ fn build_drop_shim<'tcx>(tcx: TyCtxt<'tcx>, def_id: DefId, ty: Option<Ty<'tcx>>)
assert!(!matches!(ty, Some(ty) if ty.is_generator()));
- let substs = if let Some(ty) = ty {
- tcx.mk_substs(&[ty.into()])
+ let args = if let Some(ty) = ty {
+ tcx.mk_args(&[ty.into()])
} else {
- InternalSubsts::identity_for_item(tcx, def_id)
+ GenericArgs::identity_for_item(tcx, def_id)
};
- let sig = tcx.fn_sig(def_id).subst(tcx, substs);
+ let sig = tcx.fn_sig(def_id).instantiate(tcx, args);
let sig = tcx.erase_late_bound_regions(sig);
let span = tcx.def_span(def_id);
@@ -377,12 +386,10 @@ fn build_clone_shim<'tcx>(tcx: TyCtxt<'tcx>, def_id: DefId, self_ty: Ty<'tcx>) -
match self_ty.kind() {
_ if is_copy => builder.copy_shim(),
- ty::Closure(_, substs) => {
- builder.tuple_like_shim(dest, src, substs.as_closure().upvar_tys())
- }
+ ty::Closure(_, args) => builder.tuple_like_shim(dest, src, args.as_closure().upvar_tys()),
ty::Tuple(..) => builder.tuple_like_shim(dest, src, self_ty.tuple_fields()),
- ty::Generator(gen_def_id, substs, hir::Movability::Movable) => {
- builder.generator_shim(dest, src, *gen_def_id, substs.as_generator())
+ ty::Generator(gen_def_id, args, hir::Movability::Movable) => {
+ builder.generator_shim(dest, src, *gen_def_id, args.as_generator())
}
_ => bug!("clone shim for `{:?}` which is not `Copy` and is not an aggregate", self_ty),
};
@@ -404,7 +411,7 @@ impl<'tcx> CloneShimBuilder<'tcx> {
// we must subst the self_ty because it's
// otherwise going to be TySelf and we can't index
// or access fields of a Place of type TySelf.
- let sig = tcx.fn_sig(def_id).subst(tcx, &[self_ty.into()]);
+ let sig = tcx.fn_sig(def_id).instantiate(tcx, &[self_ty.into()]);
let sig = tcx.erase_late_bound_regions(sig);
let span = tcx.def_span(def_id);
@@ -587,17 +594,17 @@ impl<'tcx> CloneShimBuilder<'tcx> {
dest: Place<'tcx>,
src: Place<'tcx>,
gen_def_id: DefId,
- substs: GeneratorSubsts<'tcx>,
+ args: GeneratorArgs<'tcx>,
) {
self.block(vec![], TerminatorKind::Goto { target: self.block_index_offset(3) }, false);
let unwind = self.block(vec![], TerminatorKind::Resume, true);
// This will get overwritten with a switch once we know the target blocks
let switch = self.block(vec![], TerminatorKind::Unreachable, false);
- let unwind = self.clone_fields(dest, src, switch, unwind, substs.upvar_tys());
+ let unwind = self.clone_fields(dest, src, switch, unwind, args.upvar_tys());
let target = self.block(vec![], TerminatorKind::Return, false);
let unreachable = self.block(vec![], TerminatorKind::Unreachable, false);
- let mut cases = Vec::with_capacity(substs.state_tys(gen_def_id, self.tcx).count());
- for (index, state_tys) in substs.state_tys(gen_def_id, self.tcx).enumerate() {
+ let mut cases = Vec::with_capacity(args.state_tys(gen_def_id, self.tcx).count());
+ for (index, state_tys) in args.state_tys(gen_def_id, self.tcx).enumerate() {
let variant_index = VariantIdx::new(index);
let dest = self.tcx.mk_place_downcast_unnamed(dest, variant_index);
let src = self.tcx.mk_place_downcast_unnamed(src, variant_index);
@@ -613,7 +620,7 @@ impl<'tcx> CloneShimBuilder<'tcx> {
cases.push((index as u128, start_block));
let _final_cleanup_block = self.clone_fields(dest, src, target, unwind, state_tys);
}
- let discr_ty = substs.discr_ty(self.tcx);
+ let discr_ty = args.discr_ty(self.tcx);
let temp = self.make_place(Mutability::Mut, discr_ty);
let rvalue = Rvalue::Discriminant(src);
let statement = self.make_statement(StatementKind::Assign(Box::new((temp, rvalue))));
@@ -642,7 +649,7 @@ fn build_call_shim<'tcx>(
// `FnPtrShim` contains the fn pointer type that a call shim is being built for - this is used
// to substitute into the signature of the shim. It is not necessary for users of this
// MIR body to perform further substitutions (see `InstanceDef::has_polymorphic_mir_body`).
- let (sig_substs, untuple_args) = if let ty::InstanceDef::FnPtrShim(_, ty) = instance {
+ let (sig_args, untuple_args) = if let ty::InstanceDef::FnPtrShim(_, ty) = instance {
let sig = tcx.erase_late_bound_regions(ty.fn_sig(tcx));
let untuple_args = sig.inputs();
@@ -659,11 +666,11 @@ fn build_call_shim<'tcx>(
let sig = tcx.fn_sig(def_id);
let sig = sig.map_bound(|sig| tcx.erase_late_bound_regions(sig));
- assert_eq!(sig_substs.is_some(), !instance.has_polymorphic_mir_body());
- let mut sig = if let Some(sig_substs) = sig_substs {
- sig.subst(tcx, &sig_substs)
+ assert_eq!(sig_args.is_some(), !instance.has_polymorphic_mir_body());
+ let mut sig = if let Some(sig_args) = sig_args {
+ sig.instantiate(tcx, &sig_args)
} else {
- sig.subst_identity()
+ sig.instantiate_identity()
};
if let CallKind::Indirect(fnty) = call_kind {
@@ -751,7 +758,7 @@ fn build_call_shim<'tcx>(
// `FnDef` call with optional receiver.
CallKind::Direct(def_id) => {
- let ty = tcx.type_of(def_id).subst_identity();
+ let ty = tcx.type_of(def_id).instantiate_identity();
(
Operand::Constant(Box::new(Constant {
span,
@@ -868,12 +875,12 @@ pub fn build_adt_ctor(tcx: TyCtxt<'_>, ctor_id: DefId) -> Body<'_> {
// Normalize the sig.
let sig = tcx
.fn_sig(ctor_id)
- .subst_identity()
+ .instantiate_identity()
.no_bound_vars()
.expect("LBR in ADT constructor signature");
let sig = tcx.normalize_erasing_regions(param_env, sig);
- let ty::Adt(adt_def, substs) = sig.output().kind() else {
+ let ty::Adt(adt_def, args) = sig.output().kind() else {
bug!("unexpected type for ADT ctor {:?}", sig.output());
};
@@ -896,7 +903,7 @@ pub fn build_adt_ctor(tcx: TyCtxt<'_>, ctor_id: DefId) -> Body<'_> {
// return;
debug!("build_ctor: variant_index={:?}", variant_index);
- let kind = AggregateKind::Adt(adt_def.did(), variant_index, substs, None, None);
+ let kind = AggregateKind::Adt(adt_def.did(), variant_index, args, None, None);
let variant = adt_def.variant(variant_index);
let statement = Statement {
kind: StatementKind::Assign(Box::new((
@@ -941,7 +948,7 @@ pub fn build_adt_ctor(tcx: TyCtxt<'_>, ctor_id: DefId) -> Body<'_> {
fn build_fn_ptr_addr_shim<'tcx>(tcx: TyCtxt<'tcx>, def_id: DefId, self_ty: Ty<'tcx>) -> Body<'tcx> {
assert!(matches!(self_ty.kind(), ty::FnPtr(..)), "expected fn ptr, found {self_ty}");
let span = tcx.def_span(def_id);
- let Some(sig) = tcx.fn_sig(def_id).subst(tcx, &[self_ty.into()]).no_bound_vars() else {
+ let Some(sig) = tcx.fn_sig(def_id).instantiate(tcx, &[self_ty.into()]).no_bound_vars() else {
span_bug!(span, "FnPtr::addr with bound vars for `{self_ty}`");
};
let locals = local_decls_for_sig(&sig, span);
diff --git a/compiler/rustc_mir_transform/src/simplify.rs b/compiler/rustc_mir_transform/src/simplify.rs
index e59219321..b7a51cfd6 100644
--- a/compiler/rustc_mir_transform/src/simplify.rs
+++ b/compiler/rustc_mir_transform/src/simplify.rs
@@ -199,7 +199,8 @@ impl<'a, 'tcx> CfgSimplifier<'a, 'tcx> {
let last = current;
*start = last;
while let Some((current, mut terminator)) = terminators.pop() {
- let Terminator { kind: TerminatorKind::Goto { ref mut target }, .. } = terminator else {
+ let Terminator { kind: TerminatorKind::Goto { ref mut target }, .. } = terminator
+ else {
unreachable!();
};
*changed |= *target != last;
diff --git a/compiler/rustc_mir_transform/src/sroa.rs b/compiler/rustc_mir_transform/src/sroa.rs
index 94e1da8e1..e66ae8ff8 100644
--- a/compiler/rustc_mir_transform/src/sroa.rs
+++ b/compiler/rustc_mir_transform/src/sroa.rs
@@ -12,7 +12,7 @@ pub struct ScalarReplacementOfAggregates;
impl<'tcx> MirPass<'tcx> for ScalarReplacementOfAggregates {
fn is_enabled(&self, sess: &rustc_session::Session) -> bool {
- sess.mir_opt_level() >= 3
+ sess.mir_opt_level() >= 2
}
#[instrument(level = "debug", skip(self, tcx, body))]
@@ -20,7 +20,7 @@ impl<'tcx> MirPass<'tcx> for ScalarReplacementOfAggregates {
debug!(def_id = ?body.source.def_id());
// Avoid query cycles (generators require optimized MIR for layout).
- if tcx.type_of(body.source.def_id()).subst_identity().is_generator() {
+ if tcx.type_of(body.source.def_id()).instantiate_identity().is_generator() {
return;
}
@@ -64,7 +64,7 @@ fn escaping_locals<'tcx>(
if ty.is_union() || ty.is_enum() {
return true;
}
- if let ty::Adt(def, _substs) = ty.kind() {
+ if let ty::Adt(def, _args) = ty.kind() {
if def.repr().flags.contains(ReprFlags::IS_SIMD) {
// Exclude #[repr(simd)] types so that they are not de-optimized into an array
return true;
@@ -161,7 +161,9 @@ struct ReplacementMap<'tcx> {
impl<'tcx> ReplacementMap<'tcx> {
fn replace_place(&self, tcx: TyCtxt<'tcx>, place: PlaceRef<'tcx>) -> Option<Place<'tcx>> {
- let &[PlaceElem::Field(f, _), ref rest @ ..] = place.projection else { return None; };
+ let &[PlaceElem::Field(f, _), ref rest @ ..] = place.projection else {
+ return None;
+ };
let fields = self.fragments[place.local].as_ref()?;
let (_, new_local) = fields[f]?;
Some(Place { local: new_local, projection: tcx.mk_place_elems(&rest) })
diff --git a/compiler/rustc_mir_transform/src/ssa.rs b/compiler/rustc_mir_transform/src/ssa.rs
index 8dc2dfe13..04bc461c8 100644
--- a/compiler/rustc_mir_transform/src/ssa.rs
+++ b/compiler/rustc_mir_transform/src/ssa.rs
@@ -266,9 +266,11 @@ fn compute_copy_classes(ssa: &mut SsaLocals, body: &Body<'_>) {
let mut copies = IndexVec::from_fn_n(|l| l, body.local_decls.len());
for (local, rvalue, _) in ssa.assignments(body) {
- let (Rvalue::Use(Operand::Copy(place) | Operand::Move(place)) | Rvalue::CopyForDeref(place))
- = rvalue
- else { continue };
+ let (Rvalue::Use(Operand::Copy(place) | Operand::Move(place))
+ | Rvalue::CopyForDeref(place)) = rvalue
+ else {
+ continue;
+ };
let Some(rhs) = place.as_local() else { continue };
let local_ty = body.local_decls()[local].ty;
diff --git a/compiler/rustc_mir_transform/src/uninhabited_enum_branching.rs b/compiler/rustc_mir_transform/src/uninhabited_enum_branching.rs
index 5389b9f52..092bcb5c9 100644
--- a/compiler/rustc_mir_transform/src/uninhabited_enum_branching.rs
+++ b/compiler/rustc_mir_transform/src/uninhabited_enum_branching.rs
@@ -105,7 +105,8 @@ impl<'tcx> MirPass<'tcx> for UninhabitedEnumBranching {
for bb in body.basic_blocks.indices() {
trace!("processing block {:?}", bb);
- let Some(discriminant_ty) = get_switched_on_type(&body.basic_blocks[bb], tcx, body) else {
+ let Some(discriminant_ty) = get_switched_on_type(&body.basic_blocks[bb], tcx, body)
+ else {
continue;
};
diff --git a/compiler/rustc_monomorphize/src/collector.rs b/compiler/rustc_monomorphize/src/collector.rs
index 242269e9d..55b14ce1c 100644
--- a/compiler/rustc_monomorphize/src/collector.rs
+++ b/compiler/rustc_monomorphize/src/collector.rs
@@ -178,11 +178,11 @@ use rustc_middle::mir::{self, Local, Location};
use rustc_middle::query::TyCtxtAt;
use rustc_middle::ty::adjustment::{CustomCoerceUnsized, PointerCoercion};
use rustc_middle::ty::print::with_no_trimmed_paths;
-use rustc_middle::ty::subst::{GenericArgKind, InternalSubsts};
use rustc_middle::ty::{
self, GenericParamDefKind, Instance, InstanceDef, Ty, TyCtxt, TypeFoldable, TypeVisitableExt,
VtblEntry,
};
+use rustc_middle::ty::{GenericArgKind, GenericArgs};
use rustc_middle::{middle::codegen_fn_attrs::CodegenFnAttrFlags, mir::visit::TyContext};
use rustc_session::config::EntryFnType;
use rustc_session::lint::builtin::LARGE_ASSIGNMENTS;
@@ -384,7 +384,7 @@ fn collect_items_rec<'tcx>(
if let Ok(alloc) = tcx.eval_static_initializer(def_id) {
for &id in alloc.inner().provenance().ptrs().values() {
- collect_miri(tcx, id, &mut used_items);
+ collect_alloc(tcx, id, &mut used_items);
}
}
@@ -393,7 +393,7 @@ fn collect_items_rec<'tcx>(
starting_item.span,
MonoItem::Fn(Instance {
def: InstanceDef::ThreadLocalShim(def_id),
- substs: InternalSubsts::empty(),
+ args: GenericArgs::empty(),
}),
));
}
@@ -555,7 +555,7 @@ fn check_recursion_limit<'tcx>(
fn check_type_length_limit<'tcx>(tcx: TyCtxt<'tcx>, instance: Instance<'tcx>) {
let type_length = instance
- .substs
+ .args
.iter()
.flat_map(|arg| arg.walk())
.filter(|arg| match arg.unpack() {
@@ -659,11 +659,11 @@ impl<'a, 'tcx> MirVisitor<'tcx> for MirUsedCollector<'a, 'tcx> {
let source_ty = operand.ty(self.body, self.tcx);
let source_ty = self.monomorphize(source_ty);
match *source_ty.kind() {
- ty::Closure(def_id, substs) => {
+ ty::Closure(def_id, args) => {
let instance = Instance::resolve_closure(
self.tcx,
def_id,
- substs,
+ args,
ty::ClosureKind::FnOnce,
)
.expect("failed to normalize and resolve closure during codegen");
@@ -875,12 +875,11 @@ fn visit_fn_use<'tcx>(
source: Span,
output: &mut MonoItems<'tcx>,
) {
- if let ty::FnDef(def_id, substs) = *ty.kind() {
+ if let ty::FnDef(def_id, args) = *ty.kind() {
let instance = if is_direct_call {
- ty::Instance::expect_resolve(tcx, ty::ParamEnv::reveal_all(), def_id, substs)
+ ty::Instance::expect_resolve(tcx, ty::ParamEnv::reveal_all(), def_id, args)
} else {
- match ty::Instance::resolve_for_fn_ptr(tcx, ty::ParamEnv::reveal_all(), def_id, substs)
- {
+ match ty::Instance::resolve_for_fn_ptr(tcx, ty::ParamEnv::reveal_all(), def_id, args) {
Some(instance) => instance,
_ => bug!("failed to resolve instance for {ty}"),
}
@@ -1043,7 +1042,7 @@ fn find_vtable_types_for_unsizing<'tcx>(
// T as dyn* Trait
(_, &ty::Dynamic(_, _, ty::DynStar)) => ptr_vtable(source_ty, target_ty),
- (&ty::Adt(source_adt_def, source_substs), &ty::Adt(target_adt_def, target_substs)) => {
+ (&ty::Adt(source_adt_def, source_args), &ty::Adt(target_adt_def, target_args)) => {
assert_eq!(source_adt_def, target_adt_def);
let CustomCoerceUnsized::Struct(coerce_index) =
@@ -1059,8 +1058,8 @@ fn find_vtable_types_for_unsizing<'tcx>(
find_vtable_types_for_unsizing(
tcx,
- source_fields[coerce_index].ty(*tcx, source_substs),
- target_fields[coerce_index].ty(*tcx, target_substs),
+ source_fields[coerce_index].ty(*tcx, source_args),
+ target_fields[coerce_index].ty(*tcx, target_args),
)
}
_ => bug!(
@@ -1245,7 +1244,7 @@ impl<'v> RootCollector<'_, 'v> {
self.tcx,
ty::ParamEnv::reveal_all(),
start_def_id,
- self.tcx.mk_substs(&[main_ret_ty.into()]),
+ self.tcx.mk_args(&[main_ret_ty.into()]),
)
.unwrap()
.unwrap();
@@ -1292,8 +1291,8 @@ fn create_mono_items_for_default_impls<'tcx>(
)
}
};
- let impl_substs = InternalSubsts::for_item(tcx, item.owner_id.to_def_id(), only_region_params);
- let trait_ref = trait_ref.subst(tcx, impl_substs);
+ let impl_args = GenericArgs::for_item(tcx, item.owner_id.to_def_id(), only_region_params);
+ let trait_ref = trait_ref.instantiate(tcx, impl_args);
// Unlike 'lazy' monomorphization that begins by collecting items transitively
// called by `main` or other global items, when eagerly monomorphizing impl
@@ -1304,7 +1303,7 @@ fn create_mono_items_for_default_impls<'tcx>(
// consider higher-ranked predicates such as `for<'a> &'a mut [u8]: Copy` to
// be trivially false. We must now check that the impl has no impossible-to-satisfy
// predicates.
- if tcx.subst_and_check_impossible_predicates((item.owner_id.to_def_id(), impl_substs)) {
+ if tcx.subst_and_check_impossible_predicates((item.owner_id.to_def_id(), impl_args)) {
return;
}
@@ -1322,8 +1321,8 @@ fn create_mono_items_for_default_impls<'tcx>(
// As mentioned above, the method is legal to eagerly instantiate if it
// only has lifetime substitutions. This is validated by
- let substs = trait_ref.substs.extend_to(tcx, method.def_id, only_region_params);
- let instance = ty::Instance::expect_resolve(tcx, param_env, method.def_id, substs);
+ let args = trait_ref.args.extend_to(tcx, method.def_id, only_region_params);
+ let instance = ty::Instance::expect_resolve(tcx, param_env, method.def_id, args);
let mono_item = create_fn_mono_item(tcx, instance, DUMMY_SP);
if mono_item.node.is_instantiable(tcx) && should_codegen_locally(tcx, &instance) {
@@ -1332,8 +1331,8 @@ fn create_mono_items_for_default_impls<'tcx>(
}
}
-/// Scans the miri alloc in order to find function calls, closures, and drop-glue.
-fn collect_miri<'tcx>(tcx: TyCtxt<'tcx>, alloc_id: AllocId, output: &mut MonoItems<'tcx>) {
+/// Scans the CTFE alloc in order to find function calls, closures, and drop-glue.
+fn collect_alloc<'tcx>(tcx: TyCtxt<'tcx>, alloc_id: AllocId, output: &mut MonoItems<'tcx>) {
match tcx.global_alloc(alloc_id) {
GlobalAlloc::Static(def_id) => {
assert!(!tcx.is_thread_local_static(def_id));
@@ -1347,7 +1346,7 @@ fn collect_miri<'tcx>(tcx: TyCtxt<'tcx>, alloc_id: AllocId, output: &mut MonoIte
trace!("collecting {:?} with {:#?}", alloc_id, alloc);
for &inner in alloc.inner().provenance().ptrs().values() {
rustc_data_structures::stack::ensure_sufficient_stack(|| {
- collect_miri(tcx, inner, output);
+ collect_alloc(tcx, inner, output);
});
}
}
@@ -1359,7 +1358,7 @@ fn collect_miri<'tcx>(tcx: TyCtxt<'tcx>, alloc_id: AllocId, output: &mut MonoIte
}
GlobalAlloc::VTable(ty, trait_ref) => {
let alloc_id = tcx.vtable_allocation((ty, trait_ref));
- collect_miri(tcx, alloc_id, output)
+ collect_alloc(tcx, alloc_id, output)
}
}
}
@@ -1382,10 +1381,10 @@ fn collect_const_value<'tcx>(
output: &mut MonoItems<'tcx>,
) {
match value {
- ConstValue::Scalar(Scalar::Ptr(ptr, _size)) => collect_miri(tcx, ptr.provenance, output),
+ ConstValue::Scalar(Scalar::Ptr(ptr, _size)) => collect_alloc(tcx, ptr.provenance, output),
ConstValue::Slice { data: alloc, start: _, end: _ } | ConstValue::ByRef { alloc, .. } => {
for &id in alloc.inner().provenance().ptrs().values() {
- collect_miri(tcx, id, output);
+ collect_alloc(tcx, id, output);
}
}
_ => {}
diff --git a/compiler/rustc_monomorphize/src/partitioning.rs b/compiler/rustc_monomorphize/src/partitioning.rs
index da76cf223..de6db8ae6 100644
--- a/compiler/rustc_monomorphize/src/partitioning.rs
+++ b/compiler/rustc_monomorphize/src/partitioning.rs
@@ -107,7 +107,8 @@ use rustc_middle::middle::codegen_fn_attrs::CodegenFnAttrFlags;
use rustc_middle::middle::exported_symbols::{SymbolExportInfo, SymbolExportLevel};
use rustc_middle::mir;
use rustc_middle::mir::mono::{
- CodegenUnit, CodegenUnitNameBuilder, InstantiationMode, Linkage, MonoItem, Visibility,
+ CodegenUnit, CodegenUnitNameBuilder, InstantiationMode, Linkage, MonoItem, MonoItemData,
+ Visibility,
};
use rustc_middle::query::Providers;
use rustc_middle::ty::print::{characteristic_def_id_of_type, with_no_trimmed_paths};
@@ -130,11 +131,6 @@ struct PlacedMonoItems<'tcx> {
codegen_units: Vec<CodegenUnit<'tcx>>,
internalization_candidates: FxHashSet<MonoItem<'tcx>>,
-
- /// These must be obtained when the iterator in `partition` runs. They
- /// can't be obtained later because some inlined functions might not be
- /// reachable.
- unique_inlined_stats: (usize, usize),
}
// The output CGUs are sorted by name.
@@ -152,11 +148,11 @@ where
// Place all mono items into a codegen unit. `place_mono_items` is
// responsible for initializing the CGU size estimates.
- let PlacedMonoItems { mut codegen_units, internalization_candidates, unique_inlined_stats } = {
+ let PlacedMonoItems { mut codegen_units, internalization_candidates } = {
let _prof_timer = tcx.prof.generic_activity("cgu_partitioning_place_items");
let placed = place_mono_items(cx, mono_items);
- debug_dump(tcx, "PLACE", &placed.codegen_units, placed.unique_inlined_stats);
+ debug_dump(tcx, "PLACE", &placed.codegen_units);
placed
};
@@ -167,7 +163,7 @@ where
{
let _prof_timer = tcx.prof.generic_activity("cgu_partitioning_merge_cgus");
merge_codegen_units(cx, &mut codegen_units);
- debug_dump(tcx, "MERGE", &codegen_units, unique_inlined_stats);
+ debug_dump(tcx, "MERGE", &codegen_units);
}
// Make as many symbols "internal" as possible, so LLVM has more freedom to
@@ -176,7 +172,7 @@ where
let _prof_timer = tcx.prof.generic_activity("cgu_partitioning_internalize_symbols");
internalize_symbols(cx, &mut codegen_units, internalization_candidates);
- debug_dump(tcx, "INTERNALIZE", &codegen_units, unique_inlined_stats);
+ debug_dump(tcx, "INTERNALIZE", &codegen_units);
}
// Mark one CGU for dead code, if necessary.
@@ -216,18 +212,12 @@ where
let cgu_name_builder = &mut CodegenUnitNameBuilder::new(cx.tcx);
let cgu_name_cache = &mut FxHashMap::default();
- let mut num_unique_inlined_items = 0;
- let mut unique_inlined_items_size = 0;
for mono_item in mono_items {
// Handle only root items directly here. Inlined items are handled at
// the bottom of the loop based on reachability.
match mono_item.instantiation_mode(cx.tcx) {
InstantiationMode::GloballyShared { .. } => {}
- InstantiationMode::LocalCopy => {
- num_unique_inlined_items += 1;
- unique_inlined_items_size += mono_item.size_estimate(cx.tcx);
- continue;
- }
+ InstantiationMode::LocalCopy => continue,
}
let characteristic_def_id = characteristic_def_id_of_mono_item(cx.tcx, mono_item);
@@ -256,8 +246,10 @@ where
if visibility == Visibility::Hidden && can_be_internalized {
internalization_candidates.insert(mono_item);
}
+ let size_estimate = mono_item.size_estimate(cx.tcx);
- cgu.items_mut().insert(mono_item, (linkage, visibility));
+ cgu.items_mut()
+ .insert(mono_item, MonoItemData { inlined: false, linkage, visibility, size_estimate });
// Get all inlined items that are reachable from `mono_item` without
// going via another root item. This includes drop-glue, functions from
@@ -271,7 +263,12 @@ where
// the `insert` will be a no-op.
for inlined_item in reachable_inlined_items {
// This is a CGU-private copy.
- cgu.items_mut().insert(inlined_item, (Linkage::Internal, Visibility::Default));
+ cgu.items_mut().entry(inlined_item).or_insert_with(|| MonoItemData {
+ inlined: true,
+ linkage: Linkage::Internal,
+ visibility: Visibility::Default,
+ size_estimate: inlined_item.size_estimate(cx.tcx),
+ });
}
}
@@ -286,14 +283,10 @@ where
codegen_units.sort_by(|a, b| a.name().as_str().cmp(b.name().as_str()));
for cgu in codegen_units.iter_mut() {
- cgu.compute_size_estimate(cx.tcx);
+ cgu.compute_size_estimate();
}
- return PlacedMonoItems {
- codegen_units,
- internalization_candidates,
- unique_inlined_stats: (num_unique_inlined_items, unique_inlined_items_size),
- };
+ return PlacedMonoItems { codegen_units, internalization_candidates };
fn get_reachable_inlined_items<'tcx>(
tcx: TyCtxt<'tcx>,
@@ -325,6 +318,60 @@ fn merge_codegen_units<'tcx>(
let mut cgu_contents: FxHashMap<Symbol, Vec<Symbol>> =
codegen_units.iter().map(|cgu| (cgu.name(), vec![cgu.name()])).collect();
+ // If N is the maximum number of CGUs, and the CGUs are sorted from largest
+ // to smallest, we repeatedly find which CGU in codegen_units[N..] has the
+ // greatest overlap of inlined items with codegen_units[N-1], merge that
+ // CGU into codegen_units[N-1], then re-sort by size and repeat.
+ //
+ // We use inlined item overlap to guide this merging because it minimizes
+ // duplication of inlined items, which makes LLVM be faster and generate
+ // better and smaller machine code.
+ //
+ // Why merge into codegen_units[N-1]? We want CGUs to have similar sizes,
+ // which means we don't want codegen_units[0..N] (the already big ones)
+ // getting any bigger, if we can avoid it. When we have more than N CGUs
+ // then at least one of the biggest N will have to grow. codegen_units[N-1]
+ // is the smallest of those, and so has the most room to grow.
+ let max_codegen_units = cx.tcx.sess.codegen_units().as_usize();
+ while codegen_units.len() > max_codegen_units {
+ // Sort small CGUs to the back.
+ codegen_units.sort_by_key(|cgu| cmp::Reverse(cgu.size_estimate()));
+
+ let cgu_dst = &codegen_units[max_codegen_units - 1];
+
+ // Find the CGU that overlaps the most with `cgu_dst`. In the case of a
+ // tie, favour the earlier (bigger) CGU.
+ let mut max_overlap = 0;
+ let mut max_overlap_i = max_codegen_units;
+ for (i, cgu_src) in codegen_units.iter().enumerate().skip(max_codegen_units) {
+ if cgu_src.size_estimate() <= max_overlap {
+ // None of the remaining overlaps can exceed `max_overlap`, so
+ // stop looking.
+ break;
+ }
+
+ let overlap = compute_inlined_overlap(cgu_dst, cgu_src);
+ if overlap > max_overlap {
+ max_overlap = overlap;
+ max_overlap_i = i;
+ }
+ }
+
+ let mut cgu_src = codegen_units.swap_remove(max_overlap_i);
+ let cgu_dst = &mut codegen_units[max_codegen_units - 1];
+
+ // Move the items from `cgu_src` to `cgu_dst`. Some of them may be
+ // duplicate inlined items, in which case the destination CGU is
+ // unaffected. Recalculate size estimates afterwards.
+ cgu_dst.items_mut().extend(cgu_src.items_mut().drain());
+ cgu_dst.compute_size_estimate();
+
+ // Record that `cgu_dst` now contains all the stuff that was in
+ // `cgu_src` before.
+ let mut consumed_cgu_names = cgu_contents.remove(&cgu_src.name()).unwrap();
+ cgu_contents.get_mut(&cgu_dst.name()).unwrap().append(&mut consumed_cgu_names);
+ }
+
// Having multiple CGUs can drastically speed up compilation. But for
// non-incremental builds, tiny CGUs slow down compilation *and* result in
// worse generated code. So we don't allow CGUs smaller than this (unless
@@ -332,24 +379,22 @@ fn merge_codegen_units<'tcx>(
// common in larger programs, so this isn't all that large.
const NON_INCR_MIN_CGU_SIZE: usize = 1800;
- // Repeatedly merge the two smallest codegen units as long as:
- // - we have more CGUs than the upper limit, or
- // - (Non-incremental builds only) the user didn't specify a CGU count, and
- // there are multiple CGUs, and some are below the minimum size.
+ // Repeatedly merge the two smallest codegen units as long as: it's a
+ // non-incremental build, and the user didn't specify a CGU count, and
+ // there are multiple CGUs, and some are below the minimum size.
//
// The "didn't specify a CGU count" condition is because when an explicit
// count is requested we observe it as closely as possible. For example,
// the `compiler_builtins` crate sets `codegen-units = 10000` and it's
// critical they aren't merged. Also, some tests use explicit small values
// and likewise won't work if small CGUs are merged.
- while codegen_units.len() > cx.tcx.sess.codegen_units().as_usize()
- || (cx.tcx.sess.opts.incremental.is_none()
- && matches!(cx.tcx.sess.codegen_units(), CodegenUnits::Default(_))
- && codegen_units.len() > 1
- && codegen_units.iter().any(|cgu| cgu.size_estimate() < NON_INCR_MIN_CGU_SIZE))
+ while cx.tcx.sess.opts.incremental.is_none()
+ && matches!(cx.tcx.sess.codegen_units(), CodegenUnits::Default(_))
+ && codegen_units.len() > 1
+ && codegen_units.iter().any(|cgu| cgu.size_estimate() < NON_INCR_MIN_CGU_SIZE)
{
// Sort small cgus to the back.
- codegen_units.sort_by_cached_key(|cgu| cmp::Reverse(cgu.size_estimate()));
+ codegen_units.sort_by_key(|cgu| cmp::Reverse(cgu.size_estimate()));
let mut smallest = codegen_units.pop().unwrap();
let second_smallest = codegen_units.last_mut().unwrap();
@@ -358,18 +403,9 @@ fn merge_codegen_units<'tcx>(
// may be duplicate inlined items, in which case the destination CGU is
// unaffected. Recalculate size estimates afterwards.
second_smallest.items_mut().extend(smallest.items_mut().drain());
- second_smallest.compute_size_estimate(cx.tcx);
-
- // Record that `second_smallest` now contains all the stuff that was
- // in `smallest` before.
- let mut consumed_cgu_names = cgu_contents.remove(&smallest.name()).unwrap();
- cgu_contents.get_mut(&second_smallest.name()).unwrap().append(&mut consumed_cgu_names);
+ second_smallest.compute_size_estimate();
- debug!(
- "CodegenUnit {} merged into CodegenUnit {}",
- smallest.name(),
- second_smallest.name()
- );
+ // Don't update `cgu_contents`, that's only for incremental builds.
}
let cgu_name_builder = &mut CodegenUnitNameBuilder::new(cx.tcx);
@@ -448,6 +484,25 @@ fn merge_codegen_units<'tcx>(
}
}
+/// Compute the combined size of all inlined items that appear in both `cgu1`
+/// and `cgu2`.
+fn compute_inlined_overlap<'tcx>(cgu1: &CodegenUnit<'tcx>, cgu2: &CodegenUnit<'tcx>) -> usize {
+ // Either order works. We pick the one that involves iterating over fewer
+ // items.
+ let (src_cgu, dst_cgu) =
+ if cgu1.items().len() <= cgu2.items().len() { (cgu1, cgu2) } else { (cgu2, cgu1) };
+
+ let mut overlap = 0;
+ for (item, data) in src_cgu.items().iter() {
+ if data.inlined {
+ if dst_cgu.items().contains_key(item) {
+ overlap += data.size_estimate;
+ }
+ }
+ }
+ overlap
+}
+
fn internalize_symbols<'tcx>(
cx: &PartitioningCx<'_, 'tcx>,
codegen_units: &mut [CodegenUnit<'tcx>],
@@ -492,7 +547,7 @@ fn internalize_symbols<'tcx>(
for cgu in codegen_units {
let home_cgu = MonoItemPlacement::SingleCgu(cgu.name());
- for (item, linkage_and_visibility) in cgu.items_mut() {
+ for (item, data) in cgu.items_mut() {
if !internalization_candidates.contains(item) {
// This item is no candidate for internalizing, so skip it.
continue;
@@ -520,7 +575,8 @@ fn internalize_symbols<'tcx>(
// If we got here, we did not find any uses from other CGUs, so
// it's fine to make this monomorphization internal.
- *linkage_and_visibility = (Linkage::Internal, Visibility::Default);
+ data.linkage = Linkage::Internal;
+ data.visibility = Visibility::Default;
}
}
}
@@ -537,7 +593,7 @@ fn mark_code_coverage_dead_code_cgu<'tcx>(codegen_units: &mut [CodegenUnit<'tcx>
// function symbols to be included via `-u` or `/include` linker args.
let dead_code_cgu = codegen_units
.iter_mut()
- .filter(|cgu| cgu.items().iter().any(|(_, (linkage, _))| *linkage == Linkage::External))
+ .filter(|cgu| cgu.items().iter().any(|(_, data)| data.linkage == Linkage::External))
.min_by_key(|cgu| cgu.size_estimate());
// If there are no CGUs that have externally linked items, then we just
@@ -572,7 +628,7 @@ fn characteristic_def_id_of_mono_item<'tcx>(
// DefId, we use the location of the impl after all.
if tcx.trait_of_item(def_id).is_some() {
- let self_ty = instance.substs.type_at(0);
+ let self_ty = instance.args.type_at(0);
// This is a default implementation of a trait method.
return characteristic_def_id_of_type(self_ty).or(Some(def_id));
}
@@ -592,7 +648,7 @@ fn characteristic_def_id_of_mono_item<'tcx>(
if !tcx.sess.opts.unstable_opts.polymorphize || !instance.has_param() {
// This is a method within an impl, find out what the self-type is:
let impl_self_ty = tcx.subst_and_normalize_erasing_regions(
- instance.substs,
+ instance.args,
ty::ParamEnv::reveal_all(),
tcx.type_of(impl_def_id),
);
@@ -745,7 +801,7 @@ fn mono_item_visibility<'tcx>(
return Visibility::Hidden;
}
- let is_generic = instance.substs.non_erasable_generics().next().is_some();
+ let is_generic = instance.args.non_erasable_generics().next().is_some();
// Upstream `DefId` instances get different handling than local ones.
let Some(def_id) = def_id.as_local() else {
@@ -851,12 +907,7 @@ fn default_visibility(tcx: TyCtxt<'_>, id: DefId, is_generic: bool) -> Visibilit
}
}
-fn debug_dump<'a, 'tcx: 'a>(
- tcx: TyCtxt<'tcx>,
- label: &str,
- cgus: &[CodegenUnit<'tcx>],
- (unique_inlined_items, unique_inlined_size): (usize, usize),
-) {
+fn debug_dump<'a, 'tcx: 'a>(tcx: TyCtxt<'tcx>, label: &str, cgus: &[CodegenUnit<'tcx>]) {
let dump = move || {
use std::fmt::Write;
@@ -865,29 +916,34 @@ fn debug_dump<'a, 'tcx: 'a>(
// Note: every unique root item is placed exactly once, so the number
// of unique root items always equals the number of placed root items.
+ //
+ // Also, unreached inlined items won't be counted here. This is fine.
+
+ let mut inlined_items = FxHashSet::default();
let mut root_items = 0;
- // unique_inlined_items is passed in above.
+ let mut unique_inlined_items = 0;
let mut placed_inlined_items = 0;
let mut root_size = 0;
- // unique_inlined_size is passed in above.
+ let mut unique_inlined_size = 0;
let mut placed_inlined_size = 0;
for cgu in cgus.iter() {
num_cgus += 1;
all_cgu_sizes.push(cgu.size_estimate());
- for (item, _) in cgu.items() {
- match item.instantiation_mode(tcx) {
- InstantiationMode::GloballyShared { .. } => {
- root_items += 1;
- root_size += item.size_estimate(tcx);
- }
- InstantiationMode::LocalCopy => {
- placed_inlined_items += 1;
- placed_inlined_size += item.size_estimate(tcx);
+ for (item, data) in cgu.items() {
+ if !data.inlined {
+ root_items += 1;
+ root_size += data.size_estimate;
+ } else {
+ if inlined_items.insert(item) {
+ unique_inlined_items += 1;
+ unique_inlined_size += data.size_estimate;
}
+ placed_inlined_items += 1;
+ placed_inlined_size += data.size_estimate;
}
}
}
@@ -928,7 +984,7 @@ fn debug_dump<'a, 'tcx: 'a>(
let mean_size = size as f64 / num_items as f64;
let mut placed_item_sizes: Vec<_> =
- cgu.items().iter().map(|(item, _)| item.size_estimate(tcx)).collect();
+ cgu.items().values().map(|data| data.size_estimate).collect();
placed_item_sizes.sort_unstable_by_key(|&n| cmp::Reverse(n));
let sizes = list(&placed_item_sizes);
@@ -937,15 +993,13 @@ fn debug_dump<'a, 'tcx: 'a>(
let _ =
writeln!(s, " - items: {num_items}, mean size: {mean_size:.1}, sizes: {sizes}",);
- for (item, linkage) in cgu.items_in_deterministic_order(tcx) {
+ for (item, data) in cgu.items_in_deterministic_order(tcx) {
+ let linkage = data.linkage;
let symbol_name = item.symbol_name(tcx).name;
let symbol_hash_start = symbol_name.rfind('h');
let symbol_hash = symbol_hash_start.map_or("<no hash>", |i| &symbol_name[i..]);
- let size = item.size_estimate(tcx);
- let kind = match item.instantiation_mode(tcx) {
- InstantiationMode::GloballyShared { .. } => "root",
- InstantiationMode::LocalCopy => "inlined",
- };
+ let kind = if !data.inlined { "root" } else { "inlined" };
+ let size = data.size_estimate;
let _ = with_no_trimmed_paths!(writeln!(
s,
" - {item} [{linkage:?}] [{symbol_hash}] ({kind}, size: {size})"
@@ -987,10 +1041,7 @@ fn debug_dump<'a, 'tcx: 'a>(
}
elem(curr, curr_count);
- let mut s = "[".to_string();
- s.push_str(&v.join(", "));
- s.push_str("]");
- s
+ format!("[{}]", v.join(", "))
}
};
@@ -1100,8 +1151,8 @@ fn collect_and_partition_mono_items(tcx: TyCtxt<'_>, (): ()) -> (&DefIdSet, &[Co
let mut item_to_cgus: FxHashMap<_, Vec<_>> = Default::default();
for cgu in codegen_units {
- for (&mono_item, &linkage) in cgu.items() {
- item_to_cgus.entry(mono_item).or_default().push((cgu.name(), linkage));
+ for (&mono_item, &data) in cgu.items() {
+ item_to_cgus.entry(mono_item).or_default().push((cgu.name(), data.linkage));
}
}
@@ -1114,7 +1165,7 @@ fn collect_and_partition_mono_items(tcx: TyCtxt<'_>, (): ()) -> (&DefIdSet, &[Co
let cgus = item_to_cgus.get_mut(i).unwrap_or(&mut empty);
cgus.sort_by_key(|(name, _)| *name);
cgus.dedup();
- for &(ref cgu_name, (linkage, _)) in cgus.iter() {
+ for &(ref cgu_name, linkage) in cgus.iter() {
output.push(' ');
output.push_str(cgu_name.as_str());
@@ -1175,12 +1226,13 @@ fn dump_mono_items_stats<'tcx>(
// Gather instantiated mono items grouped by def_id
let mut items_per_def_id: FxHashMap<_, Vec<_>> = Default::default();
for cgu in codegen_units {
- for (&mono_item, _) in cgu.items() {
+ cgu.items()
+ .keys()
// Avoid variable-sized compiler-generated shims
- if mono_item.is_user_defined() {
+ .filter(|mono_item| mono_item.is_user_defined())
+ .for_each(|mono_item| {
items_per_def_id.entry(mono_item.def_id()).or_default().push(mono_item);
- }
- }
+ });
}
#[derive(serde::Serialize)]
@@ -1233,7 +1285,7 @@ fn codegened_and_inlined_items(tcx: TyCtxt<'_>, (): ()) -> &DefIdSet {
let mut result = items.clone();
for cgu in cgus {
- for (item, _) in cgu.items() {
+ for item in cgu.items().keys() {
if let MonoItem::Fn(ref instance) = item {
let did = instance.def_id();
if !visited.insert(did) {
diff --git a/compiler/rustc_monomorphize/src/polymorphize.rs b/compiler/rustc_monomorphize/src/polymorphize.rs
index 88a3e0285..a8b7a0dbb 100644
--- a/compiler/rustc_monomorphize/src/polymorphize.rs
+++ b/compiler/rustc_monomorphize/src/polymorphize.rs
@@ -14,9 +14,8 @@ use rustc_middle::mir::{
use rustc_middle::query::Providers;
use rustc_middle::ty::{
self,
- subst::SubstsRef,
visit::{TypeSuperVisitable, TypeVisitable, TypeVisitableExt, TypeVisitor},
- Const, Ty, TyCtxt, UnusedGenericParams,
+ Const, GenericArgsRef, Ty, TyCtxt, UnusedGenericParams,
};
use rustc_span::symbol::sym;
use std::ops::ControlFlow;
@@ -144,7 +143,7 @@ fn mark_used_by_default_parameters<'tcx>(
| DefKind::Enum
| DefKind::Variant
| DefKind::Trait
- | DefKind::TyAlias
+ | DefKind::TyAlias { .. }
| DefKind::ForeignTy
| DefKind::TraitAlias
| DefKind::AssocTy
@@ -163,7 +162,6 @@ fn mark_used_by_default_parameters<'tcx>(
| DefKind::AnonConst
| DefKind::InlineConst
| DefKind::OpaqueTy
- | DefKind::ImplTraitPlaceholder
| DefKind::Field
| DefKind::LifetimeParam
| DefKind::GlobalAsm
@@ -230,12 +228,12 @@ struct MarkUsedGenericParams<'a, 'tcx> {
impl<'a, 'tcx> MarkUsedGenericParams<'a, 'tcx> {
/// Invoke `unused_generic_params` on a body contained within the current item (e.g.
/// a closure, generator or constant).
- #[instrument(level = "debug", skip(self, def_id, substs))]
- fn visit_child_body(&mut self, def_id: DefId, substs: SubstsRef<'tcx>) {
+ #[instrument(level = "debug", skip(self, def_id, args))]
+ fn visit_child_body(&mut self, def_id: DefId, args: GenericArgsRef<'tcx>) {
let instance = ty::InstanceDef::Item(def_id);
let unused = self.tcx.unused_generic_params(instance);
debug!(?self.unused_parameters, ?unused);
- for (i, arg) in substs.iter().enumerate() {
+ for (i, arg) in args.iter().enumerate() {
let i = i.try_into().unwrap();
if unused.is_used(i) {
arg.visit_with(self);
@@ -253,9 +251,9 @@ impl<'a, 'tcx> Visitor<'tcx> for MarkUsedGenericParams<'a, 'tcx> {
if matches!(def_kind, DefKind::Closure | DefKind::Generator) {
// Skip visiting the closure/generator that is currently being processed. This only
// happens because the first argument to the closure is a reference to itself and
- // that will call `visit_substs`, resulting in each generic parameter captured being
+ // that will call `visit_args`, resulting in each generic parameter captured being
// considered used by default.
- debug!("skipping closure substs");
+ debug!("skipping closure args");
return;
}
}
@@ -268,12 +266,12 @@ impl<'a, 'tcx> Visitor<'tcx> for MarkUsedGenericParams<'a, 'tcx> {
ConstantKind::Ty(c) => {
c.visit_with(self);
}
- ConstantKind::Unevaluated(mir::UnevaluatedConst { def, substs: _, promoted }, ty) => {
+ ConstantKind::Unevaluated(mir::UnevaluatedConst { def, args: _, promoted }, ty) => {
// Avoid considering `T` unused when constants are of the form:
// `<Self as Foo<T>>::foo::promoted[p]`
if let Some(p) = promoted {
if self.def_id == def && !self.tcx.generics_of(def).has_self {
- // If there is a promoted, don't look at the substs - since it will always contain
+ // If there is a promoted, don't look at the args - since it will always contain
// the generic parameters, instead, traverse the promoted MIR.
let promoted = self.tcx.promoted_mir(def);
self.visit_body(&promoted[p]);
@@ -304,10 +302,10 @@ impl<'a, 'tcx> TypeVisitor<TyCtxt<'tcx>> for MarkUsedGenericParams<'a, 'tcx> {
self.unused_parameters.mark_used(param.index);
ControlFlow::Continue(())
}
- ty::ConstKind::Unevaluated(ty::UnevaluatedConst { def, substs })
+ ty::ConstKind::Unevaluated(ty::UnevaluatedConst { def, args })
if matches!(self.tcx.def_kind(def), DefKind::AnonConst) =>
{
- self.visit_child_body(def, substs);
+ self.visit_child_body(def, args);
ControlFlow::Continue(())
}
_ => c.super_visit_with(self),
@@ -321,7 +319,7 @@ impl<'a, 'tcx> TypeVisitor<TyCtxt<'tcx>> for MarkUsedGenericParams<'a, 'tcx> {
}
match *ty.kind() {
- ty::Closure(def_id, substs) | ty::Generator(def_id, substs, ..) => {
+ ty::Closure(def_id, args) | ty::Generator(def_id, args, ..) => {
debug!(?def_id);
// Avoid cycle errors with generators.
if def_id == self.def_id {
@@ -330,7 +328,7 @@ impl<'a, 'tcx> TypeVisitor<TyCtxt<'tcx>> for MarkUsedGenericParams<'a, 'tcx> {
// Consider any generic parameters used by any closures/generators as used in the
// parent.
- self.visit_child_body(def_id, substs);
+ self.visit_child_body(def_id, args);
ControlFlow::Continue(())
}
ty::Param(param) => {
diff --git a/compiler/rustc_monomorphize/src/util.rs b/compiler/rustc_monomorphize/src/util.rs
index f6a80b043..a3433d3d1 100644
--- a/compiler/rustc_monomorphize/src/util.rs
+++ b/compiler/rustc_monomorphize/src/util.rs
@@ -27,12 +27,12 @@ pub(crate) fn dump_closure_profile<'tcx>(tcx: TyCtxt<'tcx>, closure_instance: In
typeck_results.closure_size_eval[&closure_def_id];
let before_feature_tys = tcx.subst_and_normalize_erasing_regions(
- closure_instance.substs,
+ closure_instance.args,
param_env,
ty::EarlyBinder::bind(before_feature_tys),
);
let after_feature_tys = tcx.subst_and_normalize_erasing_regions(
- closure_instance.substs,
+ closure_instance.args,
param_env,
ty::EarlyBinder::bind(after_feature_tys),
);
diff --git a/compiler/rustc_parse/messages.ftl b/compiler/rustc_parse/messages.ftl
index 9787d98c1..34cc0998c 100644
--- a/compiler/rustc_parse/messages.ftl
+++ b/compiler/rustc_parse/messages.ftl
@@ -23,6 +23,8 @@ parse_async_block_in_2015 = `async` blocks are only allowed in Rust 2018 or late
parse_async_fn_in_2015 = `async fn` is not permitted in Rust 2015
.label = to use `async fn`, switch to Rust 2018 or later
+parse_async_move_block_in_2015 = `async move` blocks are only allowed in Rust 2018 or later
+
parse_async_move_order_incorrect = the order of `move` and `async` is incorrect
.suggestion = try switching the order
@@ -270,6 +272,8 @@ parse_found_expr_would_be_stmt = expected expression, found `{$token}`
parse_function_body_equals_expr = function body cannot be `= expression;`
.suggestion = surround the expression with `{"{"}` and `{"}"}` instead of `=` and `;`
+parse_generic_args_in_pat_require_turbofish_syntax = generic args in patterns require the turbofish syntax
+
parse_generic_parameters_without_angle_brackets = generic parameters without surrounding angle brackets
.suggestion = surround the type parameters with angle brackets
@@ -306,8 +310,8 @@ parse_inclusive_range_no_end = inclusive range with no end
.suggestion_open_range = use `..` instead
.note = inclusive ranges must be bounded at the end (`..=b` or `a..=b`)
-parse_incorrect_braces_trait_bounds = incorrect braces around trait bounds
- .suggestion = remove the parentheses
+parse_incorrect_parens_trait_bounds = incorrect parentheses around trait bounds
+parse_incorrect_parens_trait_bounds_sugg = fix the parentheses
parse_incorrect_semicolon =
expected item, found `;`
@@ -457,6 +461,12 @@ parse_loop_else = `{$loop_kind}...else` loops are not supported
.note = consider moving this `else` clause to a separate `if` statement and use a `bool` variable to control if it should run
.loop_keyword = `else` is attached to this loop
+parse_macro_expands_to_adt_field = macros cannot expand to {$adt_ty} fields
+
+parse_macro_expands_to_enum_variant = macros cannot expand to enum variants
+
+parse_macro_expands_to_match_arm = macros cannot expand to match arms
+
parse_macro_invocation_visibility = can't qualify macro invocation with `pub`
.suggestion = remove the visibility
.help = try adjusting the macro to put `{$vis}` inside the invocation
@@ -690,6 +700,8 @@ parse_single_colon_import_path = expected `::`, found `:`
parse_single_colon_struct_type = found single colon in a struct field type path
.suggestion = write a path separator here
+parse_static_with_generics = static items may not have generic parameters
+
parse_struct_literal_body_without_path =
struct literal body without path
.suggestion = you might have forgotten to add the struct literal inside the block
@@ -722,6 +734,10 @@ parse_sugg_wrap_pattern_in_parens = wrap the pattern in parentheses
parse_switch_mut_let_order =
switch the order of `mut` and `let`
+
+parse_ternary_operator = Rust has no ternary operator
+ .help = use an `if-else` expression instead
+
parse_tilde_const_lifetime = `~const` may only modify trait bounds, not lifetime bounds
parse_tilde_is_not_unary_operator = `~` cannot be used as a unary operator
@@ -847,6 +863,12 @@ parse_visibility_not_followed_by_item = visibility `{$vis}` is not followed by a
.label = the visibility
.help = you likely meant to define an item, e.g., `{$vis} fn foo() {"{}"}`
+parse_where_clause_before_const_body = where clauses are not allowed before const item bodies
+ .label = unexpected where clause
+ .name_label = while parsing this const item
+ .body_label = the item body
+ .suggestion = move the body before the where clause
+
parse_where_clause_before_tuple_struct_body = where clauses are not allowed before tuple struct bodies
.label = unexpected where clause
.name_label = while parsing this tuple struct
diff --git a/compiler/rustc_parse/src/errors.rs b/compiler/rustc_parse/src/errors.rs
index 96e1c0e3c..e0b1e3678 100644
--- a/compiler/rustc_parse/src/errors.rs
+++ b/compiler/rustc_parse/src/errors.rs
@@ -365,6 +365,14 @@ pub(crate) enum IfExpressionMissingThenBlockSub {
AddThenBlock(#[primary_span] Span),
}
+#[derive(Diagnostic)]
+#[diag(parse_ternary_operator)]
+#[help]
+pub struct TernaryOperator {
+ #[primary_span]
+ pub span: Span,
+}
+
#[derive(Subdiagnostic)]
#[suggestion(parse_extra_if_in_let_else, applicability = "maybe-incorrect", code = "")]
pub(crate) struct IfExpressionLetSomeSub {
@@ -1427,6 +1435,13 @@ pub(crate) struct AsyncBlockIn2015 {
}
#[derive(Diagnostic)]
+#[diag(parse_async_move_block_in_2015)]
+pub(crate) struct AsyncMoveBlockIn2015 {
+ #[primary_span]
+ pub span: Span,
+}
+
+#[derive(Diagnostic)]
#[diag(parse_self_argument_pointer)]
pub(crate) struct SelfArgumentPointer {
#[primary_span]
@@ -1801,6 +1816,12 @@ pub struct UnknownPrefix<'a> {
}
#[derive(Subdiagnostic)]
+#[note(parse_macro_expands_to_adt_field)]
+pub struct MacroExpandsToAdtField<'a> {
+ pub adt_ty: &'a str,
+}
+
+#[derive(Subdiagnostic)]
pub enum UnknownPrefixSugg {
#[suggestion(
parse_suggestion_br,
@@ -2615,21 +2636,24 @@ pub(crate) struct MissingPlusBounds {
}
#[derive(Diagnostic)]
-#[diag(parse_incorrect_braces_trait_bounds)]
-pub(crate) struct IncorrectBracesTraitBounds {
+#[diag(parse_incorrect_parens_trait_bounds)]
+pub(crate) struct IncorrectParensTraitBounds {
#[primary_span]
pub span: Vec<Span>,
#[subdiagnostic]
- pub sugg: IncorrectBracesTraitBoundsSugg,
+ pub sugg: IncorrectParensTraitBoundsSugg,
}
#[derive(Subdiagnostic)]
-#[multipart_suggestion(parse_suggestion, applicability = "machine-applicable")]
-pub(crate) struct IncorrectBracesTraitBoundsSugg {
+#[multipart_suggestion(
+ parse_incorrect_parens_trait_bounds_sugg,
+ applicability = "machine-applicable"
+)]
+pub(crate) struct IncorrectParensTraitBoundsSugg {
#[suggestion_part(code = " ")]
- pub l: Span,
- #[suggestion_part(code = "")]
- pub r: Span,
+ pub wrong_span: Span,
+ #[suggestion_part(code = "(")]
+ pub new_span: Span,
}
#[derive(Diagnostic)]
@@ -2692,3 +2716,48 @@ pub(crate) struct ExpectedBuiltinIdent {
#[primary_span]
pub span: Span,
}
+
+#[derive(Diagnostic)]
+#[diag(parse_static_with_generics)]
+pub(crate) struct StaticWithGenerics {
+ #[primary_span]
+ pub span: Span,
+}
+
+#[derive(Diagnostic)]
+#[diag(parse_where_clause_before_const_body)]
+pub(crate) struct WhereClauseBeforeConstBody {
+ #[primary_span]
+ #[label]
+ pub span: Span,
+ #[label(parse_name_label)]
+ pub name: Span,
+ #[label(parse_body_label)]
+ pub body: Span,
+ #[subdiagnostic]
+ pub sugg: Option<WhereClauseBeforeConstBodySugg>,
+}
+
+#[derive(Subdiagnostic)]
+#[multipart_suggestion(parse_suggestion, applicability = "machine-applicable")]
+pub(crate) struct WhereClauseBeforeConstBodySugg {
+ #[suggestion_part(code = "= {snippet} ")]
+ pub left: Span,
+ pub snippet: String,
+ #[suggestion_part(code = "")]
+ pub right: Span,
+}
+
+#[derive(Diagnostic)]
+#[diag(parse_generic_args_in_pat_require_turbofish_syntax)]
+pub(crate) struct GenericArgsInPatRequireTurbofishSyntax {
+ #[primary_span]
+ pub span: Span,
+ #[suggestion(
+ parse_sugg_turbofish_syntax,
+ style = "verbose",
+ code = "::",
+ applicability = "maybe-incorrect"
+ )]
+ pub suggest_turbofish: Span,
+}
diff --git a/compiler/rustc_parse/src/lexer/diagnostics.rs b/compiler/rustc_parse/src/lexer/diagnostics.rs
index 9e6d27bf0..b50bb47f2 100644
--- a/compiler/rustc_parse/src/lexer/diagnostics.rs
+++ b/compiler/rustc_parse/src/lexer/diagnostics.rs
@@ -46,7 +46,7 @@ pub fn report_missing_open_delim(
};
err.span_label(
unmatch_brace.found_span.shrink_to_lo(),
- format!("missing open `{}` for this delimiter", missed_open),
+ format!("missing open `{missed_open}` for this delimiter"),
);
reported_missing_open = true;
}
diff --git a/compiler/rustc_parse/src/lexer/mod.rs b/compiler/rustc_parse/src/lexer/mod.rs
index c6e6b46e4..a375a1d69 100644
--- a/compiler/rustc_parse/src/lexer/mod.rs
+++ b/compiler/rustc_parse/src/lexer/mod.rs
@@ -9,8 +9,8 @@ use rustc_ast::tokenstream::TokenStream;
use rustc_ast::util::unicode::contains_text_flow_control_chars;
use rustc_errors::{error_code, Applicability, Diagnostic, DiagnosticBuilder, StashKey};
use rustc_lexer::unescape::{self, EscapeError, Mode};
-use rustc_lexer::Cursor;
use rustc_lexer::{Base, DocStyle, RawStrError};
+use rustc_lexer::{Cursor, LiteralKind};
use rustc_session::lint::builtin::{
RUST_2021_PREFIXES_INCOMPATIBLE_SYNTAX, TEXT_DIRECTION_CODEPOINT_IN_COMMENT,
};
@@ -74,7 +74,6 @@ pub(crate) fn parse_token_trees<'a>(
// because the delimiter mismatch is more likely to be the root cause of error
let mut buffer = Vec::with_capacity(1);
- // Not using `emit_unclosed_delims` to use `db.buffer`
for unmatched in unmatched_delims {
if let Some(err) = make_unclosed_delims_error(unmatched, &sess) {
err.buffer(&mut buffer);
@@ -118,6 +117,7 @@ impl<'a> StringReader<'a> {
let mut swallow_next_invalid = 0;
// Skip trivial (whitespace & comments) tokens
loop {
+ let str_before = self.cursor.as_str();
let token = self.cursor.advance_token();
let start = self.pos;
self.pos = self.pos + BytePos(token.len);
@@ -165,10 +165,7 @@ impl<'a> StringReader<'a> {
continue;
}
rustc_lexer::TokenKind::Ident => {
- let sym = nfc_normalize(self.str_from(start));
- let span = self.mk_sp(start, self.pos);
- self.sess.symbol_gallery.insert(sym, span);
- token::Ident(sym, false)
+ self.ident(start)
}
rustc_lexer::TokenKind::RawIdent => {
let sym = nfc_normalize(self.str_from(start + BytePos(2)));
@@ -182,10 +179,7 @@ impl<'a> StringReader<'a> {
}
rustc_lexer::TokenKind::UnknownPrefix => {
self.report_unknown_prefix(start);
- let sym = nfc_normalize(self.str_from(start));
- let span = self.mk_sp(start, self.pos);
- self.sess.symbol_gallery.insert(sym, span);
- token::Ident(sym, false)
+ self.ident(start)
}
rustc_lexer::TokenKind::InvalidIdent
// Do not recover an identifier with emoji if the codepoint is a confusable
@@ -203,6 +197,27 @@ impl<'a> StringReader<'a> {
.push(span);
token::Ident(sym, false)
}
+ // split up (raw) c string literals to an ident and a string literal when edition < 2021.
+ rustc_lexer::TokenKind::Literal {
+ kind: kind @ (LiteralKind::CStr { .. } | LiteralKind::RawCStr { .. }),
+ suffix_start: _,
+ } if !self.mk_sp(start, self.pos).edition().at_least_rust_2021() => {
+ let prefix_len = match kind {
+ LiteralKind::CStr { .. } => 1,
+ LiteralKind::RawCStr { .. } => 2,
+ _ => unreachable!(),
+ };
+
+ // reset the state so that only the prefix ("c" or "cr")
+ // was consumed.
+ let lit_start = start + BytePos(prefix_len);
+ self.pos = lit_start;
+ self.cursor = Cursor::new(&str_before[prefix_len as usize..]);
+
+ self.report_unknown_prefix(start);
+ let prefix_span = self.mk_sp(start, lit_start);
+ return (Token::new(self.ident(start), prefix_span), preceded_by_whitespace);
+ }
rustc_lexer::TokenKind::Literal { kind, suffix_start } => {
let suffix_start = start + BytePos(suffix_start);
let (kind, symbol) = self.cook_lexer_literal(start, suffix_start, kind);
@@ -317,6 +332,13 @@ impl<'a> StringReader<'a> {
}
}
+ fn ident(&self, start: BytePos) -> TokenKind {
+ let sym = nfc_normalize(self.str_from(start));
+ let span = self.mk_sp(start, self.pos);
+ self.sess.symbol_gallery.insert(sym, span);
+ token::Ident(sym, false)
+ }
+
fn struct_fatal_span_char(
&self,
from_pos: BytePos,
diff --git a/compiler/rustc_parse/src/lexer/tokentrees.rs b/compiler/rustc_parse/src/lexer/tokentrees.rs
index 318a29985..07910113d 100644
--- a/compiler/rustc_parse/src/lexer/tokentrees.rs
+++ b/compiler/rustc_parse/src/lexer/tokentrees.rs
@@ -198,7 +198,7 @@ impl<'a> TokenTreesReader<'a> {
// An unexpected closing delimiter (i.e., there is no
// matching opening delimiter).
let token_str = token_to_string(&self.token);
- let msg = format!("unexpected closing delimiter: `{}`", token_str);
+ let msg = format!("unexpected closing delimiter: `{token_str}`");
let mut err = self.string_reader.sess.span_diagnostic.struct_span_err(self.token.span, msg);
report_suspicious_mismatch_block(
diff --git a/compiler/rustc_parse/src/lexer/unescape_error_reporting.rs b/compiler/rustc_parse/src/lexer/unescape_error_reporting.rs
index 461a34b67..b659c40b2 100644
--- a/compiler/rustc_parse/src/lexer/unescape_error_reporting.rs
+++ b/compiler/rustc_parse/src/lexer/unescape_error_reporting.rs
@@ -27,7 +27,7 @@ pub(crate) fn emit_unescape_error(
lit, span_with_quotes, mode, range, error
);
let last_char = || {
- let c = lit[range.clone()].chars().rev().next().unwrap();
+ let c = lit[range.clone()].chars().next_back().unwrap();
let span = span.with_lo(span.hi() - BytePos(c.len_utf8() as u32));
(c, span)
};
@@ -80,20 +80,14 @@ pub(crate) fn emit_unescape_error(
let sugg = sugg.unwrap_or_else(|| {
let prefix = mode.prefix_noraw();
let mut escaped = String::with_capacity(lit.len());
- let mut chrs = lit.chars().peekable();
- while let Some(first) = chrs.next() {
- match (first, chrs.peek()) {
- ('\\', Some('"')) => {
- escaped.push('\\');
- escaped.push('"');
- chrs.next();
- }
- ('"', _) => {
- escaped.push('\\');
- escaped.push('"')
- }
- (c, _) => escaped.push(c),
- };
+ let mut in_escape = false;
+ for c in lit.chars() {
+ match c {
+ '\\' => in_escape = !in_escape,
+ '"' if !in_escape => escaped.push('\\'),
+ _ => in_escape = false,
+ }
+ escaped.push(c);
}
let sugg = format!("{prefix}\"{escaped}\"");
MoreThanOneCharSugg::Quotes {
@@ -135,7 +129,7 @@ pub(crate) fn emit_unescape_error(
"unknown character escape"
};
let ec = escaped_char(c);
- let mut diag = handler.struct_span_err(span, format!("{}: `{}`", label, ec));
+ let mut diag = handler.struct_span_err(span, format!("{label}: `{ec}`"));
diag.span_label(span, label);
if c == '{' || c == '}' && matches!(mode, Mode::Str | Mode::RawStr) {
diag.help(
@@ -151,7 +145,7 @@ pub(crate) fn emit_unescape_error(
diag.span_suggestion(
span_with_quotes,
"if you meant to write a literal backslash (perhaps escaping in a regular expression), consider a raw string literal",
- format!("r\"{}\"", lit),
+ format!("r\"{lit}\""),
Applicability::MaybeIncorrect,
);
}
@@ -180,21 +174,20 @@ pub(crate) fn emit_unescape_error(
Mode::RawByteStr => "raw byte string literal",
_ => panic!("non-is_byte literal paired with NonAsciiCharInByte"),
};
- let mut err = handler.struct_span_err(span, format!("non-ASCII character in {}", desc));
+ let mut err = handler.struct_span_err(span, format!("non-ASCII character in {desc}"));
let postfix = if unicode_width::UnicodeWidthChar::width(c).unwrap_or(1) == 0 {
- format!(" but is {:?}", c)
+ format!(" but is {c:?}")
} else {
String::new()
};
- err.span_label(span, format!("must be ASCII{}", postfix));
+ err.span_label(span, format!("must be ASCII{postfix}"));
// Note: the \\xHH suggestions are not given for raw byte string
// literals, because they are araw and so cannot use any escapes.
if (c as u32) <= 0xFF && mode != Mode::RawByteStr {
err.span_suggestion(
span,
format!(
- "if you meant to use the unicode code point for {:?}, use a \\xHH escape",
- c
+ "if you meant to use the unicode code point for {c:?}, use a \\xHH escape"
),
format!("\\x{:X}", c as u32),
Applicability::MaybeIncorrect,
@@ -206,7 +199,7 @@ pub(crate) fn emit_unescape_error(
utf8.push(c);
err.span_suggestion(
span,
- format!("if you meant to use the UTF-8 encoding of {:?}, use \\xHH escapes", c),
+ format!("if you meant to use the UTF-8 encoding of {c:?}, use \\xHH escapes"),
utf8.as_bytes()
.iter()
.map(|b: &u8| format!("\\x{:X}", *b))
diff --git a/compiler/rustc_parse/src/lexer/unicode_chars.rs b/compiler/rustc_parse/src/lexer/unicode_chars.rs
index 829d9693e..bbfb160eb 100644
--- a/compiler/rustc_parse/src/lexer/unicode_chars.rs
+++ b/compiler/rustc_parse/src/lexer/unicode_chars.rs
@@ -349,7 +349,7 @@ pub(super) fn check_for_substitution(
let span = Span::with_root_ctxt(pos, pos + Pos::from_usize(ch.len_utf8() * count));
let Some((_, ascii_name, token)) = ASCII_ARRAY.iter().find(|&&(s, _, _)| s == ascii_str) else {
- let msg = format!("substitution character not found for '{}'", ch);
+ let msg = format!("substitution character not found for '{ch}'");
reader.sess.span_diagnostic.span_bug_no_panic(span, msg);
return (None, None);
};
diff --git a/compiler/rustc_parse/src/lib.rs b/compiler/rustc_parse/src/lib.rs
index 25de78085..892be36aa 100644
--- a/compiler/rustc_parse/src/lib.rs
+++ b/compiler/rustc_parse/src/lib.rs
@@ -8,6 +8,7 @@
#![feature(never_type)]
#![feature(rustc_attrs)]
#![recursion_limit = "256"]
+#![cfg_attr(not(bootstrap), allow(internal_features))]
#[macro_use]
extern crate tracing;
@@ -205,7 +206,7 @@ pub fn stream_to_parser<'a>(
stream: TokenStream,
subparser_name: Option<&'static str>,
) -> Parser<'a> {
- Parser::new(sess, stream, false, subparser_name)
+ Parser::new(sess, stream, subparser_name)
}
/// Runs the given subparser `f` on the tokens of the given `attr`'s item.
@@ -215,7 +216,7 @@ pub fn parse_in<'a, T>(
name: &'static str,
mut f: impl FnMut(&mut Parser<'a>) -> PResult<'a, T>,
) -> PResult<'a, T> {
- let mut parser = Parser::new(sess, tts, false, Some(name));
+ let mut parser = Parser::new(sess, tts, Some(name));
let result = f(&mut parser)?;
if parser.token != token::Eof {
parser.unexpected()?;
@@ -247,7 +248,7 @@ pub fn parse_cfg_attr(
match parse_in(parse_sess, tokens.clone(), "`cfg_attr` input", |p| p.parse_cfg_attr()) {
Ok(r) => return Some(r),
Err(mut e) => {
- e.help(format!("the valid syntax is `{}`", CFG_ATTR_GRAMMAR_HELP))
+ e.help(format!("the valid syntax is `{CFG_ATTR_GRAMMAR_HELP}`"))
.note(CFG_ATTR_NOTE_REF)
.emit();
}
diff --git a/compiler/rustc_parse/src/parser/attr.rs b/compiler/rustc_parse/src/parser/attr.rs
index ee0abba1c..104de47b9 100644
--- a/compiler/rustc_parse/src/parser/attr.rs
+++ b/compiler/rustc_parse/src/parser/attr.rs
@@ -36,7 +36,7 @@ impl<'a> Parser<'a> {
pub(super) fn parse_outer_attributes(&mut self) -> PResult<'a, AttrWrapper> {
let mut outer_attrs = ast::AttrVec::new();
let mut just_parsed_doc_comment = false;
- let start_pos = self.token_cursor.num_next_calls;
+ let start_pos = self.num_bump_calls;
loop {
let attr = if self.check(&token::Pound) {
let prev_outer_attr_sp = outer_attrs.last().map(|attr| attr.span);
@@ -277,7 +277,7 @@ impl<'a> Parser<'a> {
pub(crate) fn parse_inner_attributes(&mut self) -> PResult<'a, ast::AttrVec> {
let mut attrs = ast::AttrVec::new();
loop {
- let start_pos: u32 = self.token_cursor.num_next_calls.try_into().unwrap();
+ let start_pos: u32 = self.num_bump_calls.try_into().unwrap();
// Only try to parse if it is an inner attribute (has `!`).
let attr = if self.check(&token::Pound) && self.look_ahead(1, |t| t == &token::Not) {
Some(self.parse_attribute(InnerAttrPolicy::Permitted)?)
@@ -298,7 +298,7 @@ impl<'a> Parser<'a> {
None
};
if let Some(attr) = attr {
- let end_pos: u32 = self.token_cursor.num_next_calls.try_into().unwrap();
+ let end_pos: u32 = self.num_bump_calls.try_into().unwrap();
// If we are currently capturing tokens, mark the location of this inner attribute.
// If capturing ends up creating a `LazyAttrTokenStream`, we will include
// this replace range with it, removing the inner attribute from the final
diff --git a/compiler/rustc_parse/src/parser/attr_wrapper.rs b/compiler/rustc_parse/src/parser/attr_wrapper.rs
index b579da098..5d6c574ba 100644
--- a/compiler/rustc_parse/src/parser/attr_wrapper.rs
+++ b/compiler/rustc_parse/src/parser/attr_wrapper.rs
@@ -107,7 +107,7 @@ impl ToAttrTokenStream for LazyAttrTokenStreamImpl {
let tokens =
std::iter::once((FlatToken::Token(self.start_token.0.clone()), self.start_token.1))
.chain((0..self.num_calls).map(|_| {
- let token = cursor_snapshot.next(cursor_snapshot.desugar_doc_comments);
+ let token = cursor_snapshot.next();
(FlatToken::Token(token.0), token.1)
}))
.take(self.num_calls);
@@ -145,13 +145,11 @@ impl ToAttrTokenStream for LazyAttrTokenStreamImpl {
// another replace range will capture the *replaced* tokens for the inner
// range, not the original tokens.
for (range, new_tokens) in replace_ranges.into_iter().rev() {
- assert!(!range.is_empty(), "Cannot replace an empty range: {:?}", range);
+ assert!(!range.is_empty(), "Cannot replace an empty range: {range:?}");
// Replace ranges are only allowed to decrease the number of tokens.
assert!(
range.len() >= new_tokens.len(),
- "Range {:?} has greater len than {:?}",
- range,
- new_tokens
+ "Range {range:?} has greater len than {new_tokens:?}"
);
// Replace any removed tokens with `FlatToken::Empty`.
@@ -215,6 +213,7 @@ impl<'a> Parser<'a> {
let start_token = (self.token.clone(), self.token_spacing);
let cursor_snapshot = self.token_cursor.clone();
+ let start_pos = self.num_bump_calls;
let has_outer_attrs = !attrs.attrs.is_empty();
let prev_capturing = std::mem::replace(&mut self.capture_state.capturing, Capturing::Yes);
@@ -275,8 +274,7 @@ impl<'a> Parser<'a> {
let replace_ranges_end = self.capture_state.replace_ranges.len();
- let cursor_snapshot_next_calls = cursor_snapshot.num_next_calls;
- let mut end_pos = self.token_cursor.num_next_calls;
+ let mut end_pos = self.num_bump_calls;
let mut captured_trailing = false;
@@ -303,12 +301,12 @@ impl<'a> Parser<'a> {
// then extend the range of captured tokens to include it, since the parser
// was not actually bumped past it. When the `LazyAttrTokenStream` gets converted
// into an `AttrTokenStream`, we will create the proper token.
- if self.token_cursor.break_last_token {
+ if self.break_last_token {
assert!(!captured_trailing, "Cannot set break_last_token and have trailing token");
end_pos += 1;
}
- let num_calls = end_pos - cursor_snapshot_next_calls;
+ let num_calls = end_pos - start_pos;
// If we have no attributes, then we will never need to
// use any replace ranges.
@@ -318,7 +316,7 @@ impl<'a> Parser<'a> {
// Grab any replace ranges that occur *inside* the current AST node.
// We will perform the actual replacement when we convert the `LazyAttrTokenStream`
// to an `AttrTokenStream`.
- let start_calls: u32 = cursor_snapshot_next_calls.try_into().unwrap();
+ let start_calls: u32 = start_pos.try_into().unwrap();
self.capture_state.replace_ranges[replace_ranges_start..replace_ranges_end]
.iter()
.cloned()
@@ -333,7 +331,7 @@ impl<'a> Parser<'a> {
start_token,
num_calls,
cursor_snapshot,
- break_last_token: self.token_cursor.break_last_token,
+ break_last_token: self.break_last_token,
replace_ranges,
});
@@ -361,14 +359,10 @@ impl<'a> Parser<'a> {
// with a `FlatToken::AttrTarget`. If this AST node is inside an item
// that has `#[derive]`, then this will allow us to cfg-expand this
// AST node.
- let start_pos =
- if has_outer_attrs { attrs.start_pos } else { cursor_snapshot_next_calls };
+ let start_pos = if has_outer_attrs { attrs.start_pos } else { start_pos };
let new_tokens = vec![(FlatToken::AttrTarget(attr_data), Spacing::Alone)];
- assert!(
- !self.token_cursor.break_last_token,
- "Should not have unglued last token with cfg attr"
- );
+ assert!(!self.break_last_token, "Should not have unglued last token with cfg attr");
let range: Range<u32> = (start_pos.try_into().unwrap())..(end_pos.try_into().unwrap());
self.capture_state.replace_ranges.push((range, new_tokens));
self.capture_state.replace_ranges.extend(inner_attr_replace_ranges);
@@ -409,22 +403,19 @@ fn make_token_stream(
FlatToken::Token(Token { kind: TokenKind::CloseDelim(delim), span }) => {
let frame_data = stack
.pop()
- .unwrap_or_else(|| panic!("Token stack was empty for token: {:?}", token));
+ .unwrap_or_else(|| panic!("Token stack was empty for token: {token:?}"));
let (open_delim, open_sp) = frame_data.open_delim_sp.unwrap();
assert_eq!(
open_delim, delim,
- "Mismatched open/close delims: open={:?} close={:?}",
- open_delim, span
+ "Mismatched open/close delims: open={open_delim:?} close={span:?}"
);
let dspan = DelimSpan::from_pair(open_sp, span);
let stream = AttrTokenStream::new(frame_data.inner);
let delimited = AttrTokenTree::Delimited(dspan, delim, stream);
stack
.last_mut()
- .unwrap_or_else(|| {
- panic!("Bottom token frame is missing for token: {:?}", token)
- })
+ .unwrap_or_else(|| panic!("Bottom token frame is missing for token: {token:?}"))
.inner
.push(delimited);
}
@@ -456,7 +447,7 @@ fn make_token_stream(
.inner
.push(AttrTokenTree::Token(Token::new(unglued_first, first_span), spacing));
} else {
- panic!("Unexpected last token {:?}", last_token)
+ panic!("Unexpected last token {last_token:?}")
}
}
AttrTokenStream::new(final_buf.inner)
@@ -469,6 +460,6 @@ mod size_asserts {
use rustc_data_structures::static_assert_size;
// tidy-alphabetical-start
static_assert_size!(AttrWrapper, 16);
- static_assert_size!(LazyAttrTokenStreamImpl, 120);
+ static_assert_size!(LazyAttrTokenStreamImpl, 104);
// tidy-alphabetical-end
}
diff --git a/compiler/rustc_parse/src/parser/diagnostics.rs b/compiler/rustc_parse/src/parser/diagnostics.rs
index 0ce6a570d..6c8ef3406 100644
--- a/compiler/rustc_parse/src/parser/diagnostics.rs
+++ b/compiler/rustc_parse/src/parser/diagnostics.rs
@@ -4,17 +4,18 @@ use super::{
TokenExpectType, TokenType,
};
use crate::errors::{
- AmbiguousPlus, AttributeOnParamType, BadQPathStage2, BadTypePlus, BadTypePlusSub, ColonAsSemi,
- ComparisonOperatorsCannotBeChained, ComparisonOperatorsCannotBeChainedSugg,
- ConstGenericWithoutBraces, ConstGenericWithoutBracesSugg, DocCommentDoesNotDocumentAnything,
- DocCommentOnParamType, DoubleColonInBound, ExpectedIdentifier, ExpectedSemi, ExpectedSemiSugg,
+ AmbiguousPlus, AsyncMoveBlockIn2015, AttributeOnParamType, BadQPathStage2, BadTypePlus,
+ BadTypePlusSub, ColonAsSemi, ComparisonOperatorsCannotBeChained,
+ ComparisonOperatorsCannotBeChainedSugg, ConstGenericWithoutBraces,
+ ConstGenericWithoutBracesSugg, DocCommentDoesNotDocumentAnything, DocCommentOnParamType,
+ DoubleColonInBound, ExpectedIdentifier, ExpectedSemi, ExpectedSemiSugg,
GenericParamsWithoutAngleBrackets, GenericParamsWithoutAngleBracketsSugg,
HelpIdentifierStartsWithNumber, InInTypo, IncorrectAwait, IncorrectSemicolon,
IncorrectUseOfAwait, ParenthesesInForHead, ParenthesesInForHeadSugg,
PatternMethodParamWithoutBody, QuestionMarkInType, QuestionMarkInTypeSugg, SelfParamNotFirst,
StructLiteralBodyWithoutPath, StructLiteralBodyWithoutPathSugg, StructLiteralNeedingParens,
StructLiteralNeedingParensSugg, SuggAddMissingLetStmt, SuggEscapeIdentifier, SuggRemoveComma,
- UnexpectedConstInGenericParam, UnexpectedConstParamDeclaration,
+ TernaryOperator, UnexpectedConstInGenericParam, UnexpectedConstParamDeclaration,
UnexpectedConstParamDeclarationSugg, UnmatchedAngleBrackets, UseEqInstead,
};
@@ -247,7 +248,7 @@ impl<'a> Parser<'a> {
self.sess.span_diagnostic.struct_span_err(sp, m)
}
- pub fn span_bug<S: Into<MultiSpan>>(&self, sp: S, m: impl Into<DiagnosticMessage>) -> ! {
+ pub fn span_bug<S: Into<MultiSpan>>(&self, sp: S, m: impl Into<String>) -> ! {
self.sess.span_diagnostic.span_bug(sp, m)
}
@@ -500,6 +501,10 @@ impl<'a> Parser<'a> {
// Special-case "expected `;`" errors
if expected.contains(&TokenType::Token(token::Semi)) {
+ if self.prev_token == token::Question && self.maybe_recover_from_ternary_operator() {
+ return Ok(true);
+ }
+
if self.token.span == DUMMY_SP || self.prev_token.span == DUMMY_SP {
// Likely inside a macro, can't provide meaningful suggestions.
} else if !sm.is_multiline(self.prev_token.span.until(self.token.span)) {
@@ -569,6 +574,12 @@ impl<'a> Parser<'a> {
return Err(self.sess.create_err(UseEqInstead { span: self.token.span }));
}
+ if self.token.is_keyword(kw::Move) && self.prev_token.is_keyword(kw::Async) {
+ // The 2015 edition is in use because parsing of `async move` has failed.
+ let span = self.prev_token.span.to(self.token.span);
+ return Err(self.sess.create_err(AsyncMoveBlockIn2015 { span }));
+ }
+
let expect = tokens_to_string(&expected);
let actual = super::token_descr(&self.token);
let (msg_exp, (label_sp, label_exp)) = if expected.len() > 1 {
@@ -608,13 +619,13 @@ impl<'a> Parser<'a> {
if let TokenKind::Ident(prev, _) = &self.prev_token.kind
&& let TokenKind::Ident(cur, _) = &self.token.kind
{
- let concat = Symbol::intern(&format!("{}{}", prev, cur));
+ let concat = Symbol::intern(&format!("{prev}{cur}"));
let ident = Ident::new(concat, DUMMY_SP);
if ident.is_used_keyword() || ident.is_reserved() || ident.is_raw_guess() {
let span = self.prev_token.span.to(self.token.span);
err.span_suggestion_verbose(
span,
- format!("consider removing the space to spell keyword `{}`", concat),
+ format!("consider removing the space to spell keyword `{concat}`"),
concat,
Applicability::MachineApplicable,
);
@@ -1330,6 +1341,45 @@ impl<'a> Parser<'a> {
}
}
+ /// Rust has no ternary operator (`cond ? then : else`). Parse it and try
+ /// to recover from it if `then` and `else` are valid expressions. Returns
+ /// whether it was a ternary operator.
+ pub(super) fn maybe_recover_from_ternary_operator(&mut self) -> bool {
+ if self.prev_token != token::Question {
+ return false;
+ }
+
+ let lo = self.prev_token.span.lo();
+ let snapshot = self.create_snapshot_for_diagnostic();
+
+ if match self.parse_expr() {
+ Ok(_) => true,
+ Err(err) => {
+ err.cancel();
+ // The colon can sometimes be mistaken for type
+ // ascription. Catch when this happens and continue.
+ self.token == token::Colon
+ }
+ } {
+ if self.eat_noexpect(&token::Colon) {
+ match self.parse_expr() {
+ Ok(_) => {
+ self.sess.emit_err(TernaryOperator { span: self.token.span.with_lo(lo) });
+ return true;
+ }
+ Err(err) => {
+ err.cancel();
+ self.restore_snapshot(snapshot);
+ }
+ };
+ }
+ } else {
+ self.restore_snapshot(snapshot);
+ };
+
+ false
+ }
+
pub(super) fn maybe_recover_from_bad_type_plus(&mut self, ty: &Ty) -> PResult<'a, ()> {
// Do not add `+` to expected tokens.
if !self.token.is_like_plus() {
@@ -1434,8 +1484,9 @@ impl<'a> Parser<'a> {
self.inc_dec_standalone_suggest(kind, spans).emit_verbose(&mut err)
}
IsStandalone::Subexpr => {
- let Ok(base_src) = self.span_to_snippet(base.span)
- else { return help_base_case(err, base) };
+ let Ok(base_src) = self.span_to_snippet(base.span) else {
+ return help_base_case(err, base);
+ };
match kind.fixity {
UnaryFixity::Pre => {
self.prefix_inc_dec_suggest(base_src, kind, spans).emit(&mut err)
@@ -1666,13 +1717,7 @@ impl<'a> Parser<'a> {
self.recover_await_prefix(await_sp)?
};
let sp = self.error_on_incorrect_await(lo, hi, &expr, is_question);
- let kind = match expr.kind {
- // Avoid knock-down errors as we don't know whether to interpret this as `foo().await?`
- // or `foo()?.await` (the very reason we went with postfix syntax 😅).
- ExprKind::Try(_) => ExprKind::Err,
- _ => ExprKind::Await(expr, await_sp),
- };
- let expr = self.mk_expr(lo.to(sp), kind);
+ let expr = self.mk_expr(lo.to(sp), ExprKind::Err);
self.maybe_recover_from_bad_qpath(expr)
}
@@ -2056,7 +2101,7 @@ impl<'a> Parser<'a> {
}
pub(super) fn recover_arg_parse(&mut self) -> PResult<'a, (P<ast::Pat>, P<ast::Ty>)> {
- let pat = self.parse_pat_no_top_alt(Some(Expected::ArgumentName))?;
+ let pat = self.parse_pat_no_top_alt(Some(Expected::ArgumentName), None)?;
self.expect(&token::Colon)?;
let ty = self.parse_ty()?;
@@ -2110,7 +2155,7 @@ impl<'a> Parser<'a> {
}
_ => (
self.token.span,
- format!("expected expression, found {}", super::token_descr(&self.token),),
+ format!("expected expression, found {}", super::token_descr(&self.token)),
),
};
let mut err = self.struct_span_err(span, msg);
@@ -2464,7 +2509,7 @@ impl<'a> Parser<'a> {
// Skip the `:`.
snapshot_pat.bump();
snapshot_type.bump();
- match snapshot_pat.parse_pat_no_top_alt(expected) {
+ match snapshot_pat.parse_pat_no_top_alt(expected, None) {
Err(inner_err) => {
inner_err.cancel();
}
@@ -2590,6 +2635,7 @@ impl<'a> Parser<'a> {
pub(crate) fn maybe_recover_unexpected_comma(
&mut self,
lo: Span,
+ is_mac_invoc: bool,
rt: CommaRecoveryMode,
) -> PResult<'a, ()> {
if self.token != token::Comma {
@@ -2610,24 +2656,28 @@ impl<'a> Parser<'a> {
let seq_span = lo.to(self.prev_token.span);
let mut err = self.struct_span_err(comma_span, "unexpected `,` in pattern");
if let Ok(seq_snippet) = self.span_to_snippet(seq_span) {
- err.multipart_suggestion(
- format!(
- "try adding parentheses to match on a tuple{}",
- if let CommaRecoveryMode::LikelyTuple = rt { "" } else { "..." },
- ),
- vec![
- (seq_span.shrink_to_lo(), "(".to_string()),
- (seq_span.shrink_to_hi(), ")".to_string()),
- ],
- Applicability::MachineApplicable,
- );
- if let CommaRecoveryMode::EitherTupleOrPipe = rt {
- err.span_suggestion(
- seq_span,
- "...or a vertical bar to match on multiple alternatives",
- seq_snippet.replace(',', " |"),
+ if is_mac_invoc {
+ err.note(fluent::parse_macro_expands_to_match_arm);
+ } else {
+ err.multipart_suggestion(
+ format!(
+ "try adding parentheses to match on a tuple{}",
+ if let CommaRecoveryMode::LikelyTuple = rt { "" } else { "..." },
+ ),
+ vec![
+ (seq_span.shrink_to_lo(), "(".to_string()),
+ (seq_span.shrink_to_hi(), ")".to_string()),
+ ],
Applicability::MachineApplicable,
);
+ if let CommaRecoveryMode::EitherTupleOrPipe = rt {
+ err.span_suggestion(
+ seq_span,
+ "...or a vertical bar to match on multiple alternatives",
+ seq_snippet.replace(',', " |"),
+ Applicability::MachineApplicable,
+ );
+ }
}
}
Err(err)
@@ -2728,7 +2778,7 @@ impl<'a> Parser<'a> {
/// sequence of patterns until `)` is reached.
fn skip_pat_list(&mut self) -> PResult<'a, ()> {
while !self.check(&token::CloseDelim(Delimiter::Parenthesis)) {
- self.parse_pat_no_top_alt(None)?;
+ self.parse_pat_no_top_alt(None, None)?;
if !self.eat(&token::Comma) {
return Ok(());
}
diff --git a/compiler/rustc_parse/src/parser/expr.rs b/compiler/rustc_parse/src/parser/expr.rs
index 7ede4fbc3..9ae3ef617 100644
--- a/compiler/rustc_parse/src/parser/expr.rs
+++ b/compiler/rustc_parse/src/parser/expr.rs
@@ -22,6 +22,7 @@ use rustc_ast::{AnonConst, BinOp, BinOpKind, FnDecl, FnRetTy, MacCall, Param, Ty
use rustc_ast::{Arm, Async, BlockCheckMode, Expr, ExprKind, Label, Movability, RangeLimits};
use rustc_ast::{ClosureBinder, MetaItemLit, StmtKind};
use rustc_ast_pretty::pprust;
+use rustc_data_structures::stack::ensure_sufficient_stack;
use rustc_errors::{
AddToDiagnostic, Applicability, Diagnostic, DiagnosticBuilder, ErrorGuaranteed, IntoDiagnostic,
PResult, StashKey,
@@ -193,13 +194,7 @@ impl<'a> Parser<'a> {
self.expected_tokens.push(TokenType::Operator);
while let Some(op) = self.check_assoc_op() {
- // Adjust the span for interpolated LHS to point to the `$lhs` token
- // and not to what it refers to.
- let lhs_span = match self.prev_token.kind {
- TokenKind::Interpolated(..) => self.prev_token.span,
- _ => lhs.span,
- };
-
+ let lhs_span = self.interpolated_or_expr_span(&lhs);
let cur_op_span = self.token.span;
let restrictions = if op.node.is_assign_like() {
self.restrictions & Restrictions::NO_STRUCT_LITERAL
@@ -238,7 +233,7 @@ impl<'a> Parser<'a> {
_ => unreachable!(),
}
.into();
- let invalid = format!("{}=", &sugg);
+ let invalid = format!("{sugg}=");
self.sess.emit_err(errors::InvalidComparisonOperator {
span: sp,
invalid: invalid.clone(),
@@ -626,8 +621,8 @@ impl<'a> Parser<'a> {
fn parse_expr_prefix_common(&mut self, lo: Span) -> PResult<'a, (Span, P<Expr>)> {
self.bump();
- let expr = self.parse_expr_prefix(None);
- let (span, expr) = self.interpolated_or_expr_span(expr)?;
+ let expr = self.parse_expr_prefix(None)?;
+ let span = self.interpolated_or_expr_span(&expr);
Ok((lo.to(span), expr))
}
@@ -702,20 +697,12 @@ impl<'a> Parser<'a> {
self.parse_expr_unary(lo, UnOp::Not)
}
- /// Returns the span of expr, if it was not interpolated or the span of the interpolated token.
- fn interpolated_or_expr_span(
- &self,
- expr: PResult<'a, P<Expr>>,
- ) -> PResult<'a, (Span, P<Expr>)> {
- expr.map(|e| {
- (
- match self.prev_token.kind {
- TokenKind::Interpolated(..) => self.prev_token.span,
- _ => e.span,
- },
- e,
- )
- })
+ /// Returns the span of expr if it was not interpolated, or the span of the interpolated token.
+ fn interpolated_or_expr_span(&self, expr: &Expr) -> Span {
+ match self.prev_token.kind {
+ TokenKind::Interpolated(..) => self.prev_token.span,
+ _ => expr.span,
+ }
}
fn parse_assoc_op_cast(
@@ -857,7 +844,7 @@ impl<'a> Parser<'a> {
let msg = format!(
"cast cannot be followed by {}",
match with_postfix.kind {
- ExprKind::Index(_, _) => "indexing",
+ ExprKind::Index(..) => "indexing",
ExprKind::Try(_) => "`?`",
ExprKind::Field(_, _) => "a field access",
ExprKind::MethodCall(_) => "a method call",
@@ -898,8 +885,8 @@ impl<'a> Parser<'a> {
self.parse_expr_prefix_range(None)
} else {
self.parse_expr_prefix(None)
- };
- let (hi, expr) = self.interpolated_or_expr_span(expr)?;
+ }?;
+ let hi = self.interpolated_or_expr_span(&expr);
let span = lo.to(hi);
if let Some(lt) = lifetime {
self.error_remove_borrow_lifetime(span, lt.ident.span);
@@ -930,8 +917,8 @@ impl<'a> Parser<'a> {
fn parse_expr_dot_or_call(&mut self, attrs: Option<AttrWrapper>) -> PResult<'a, P<Expr>> {
let attrs = self.parse_or_use_outer_attributes(attrs)?;
self.collect_tokens_for_expr(attrs, |this, attrs| {
- let base = this.parse_expr_bottom();
- let (span, base) = this.interpolated_or_expr_span(base)?;
+ let base = this.parse_expr_bottom()?;
+ let span = this.interpolated_or_expr_span(&base);
this.parse_expr_dot_or_call_with(base, span, attrs)
})
}
@@ -1052,7 +1039,7 @@ impl<'a> Parser<'a> {
}
components.push(Punct(c));
} else {
- panic!("unexpected character in a float token: {:?}", c)
+ panic!("unexpected character in a float token: {c:?}")
}
}
if !ident_like.is_empty() {
@@ -1113,7 +1100,7 @@ impl<'a> Parser<'a> {
self.error_unexpected_after_dot();
DestructuredFloat::Error
}
- _ => panic!("unexpected components in a float token: {:?}", components),
+ _ => panic!("unexpected components in a float token: {components:?}"),
}
}
@@ -1167,7 +1154,7 @@ impl<'a> Parser<'a> {
DestructuredFloat::TrailingDot(sym, sym_span, dot_span) => {
assert!(suffix.is_none());
// Analogous to `Self::break_and_eat`
- self.token_cursor.break_last_token = true;
+ self.break_last_token = true;
// This might work, in cases like `1. 2`, and might not,
// in cases like `offset_of!(Ty, 1.)`. It depends on what comes
// after the float-like token, and therefore we have to make
@@ -1304,12 +1291,15 @@ impl<'a> Parser<'a> {
let index = self.parse_expr()?;
self.suggest_missing_semicolon_before_array(prev_span, open_delim_span)?;
self.expect(&token::CloseDelim(Delimiter::Bracket))?;
- Ok(self.mk_expr(lo.to(self.prev_token.span), self.mk_index(base, index)))
+ Ok(self.mk_expr(
+ lo.to(self.prev_token.span),
+ self.mk_index(base, index, open_delim_span.to(self.prev_token.span)),
+ ))
}
/// Assuming we have just parsed `.`, continue parsing into an expression.
fn parse_dot_suffix(&mut self, self_arg: P<Expr>, lo: Span) -> PResult<'a, P<Expr>> {
- if self.token.uninterpolated_span().rust_2018() && self.eat_keyword(kw::Await) {
+ if self.token.uninterpolated_span().at_least_rust_2018() && self.eat_keyword(kw::Await) {
return Ok(self.mk_await_expr(self_arg, lo));
}
@@ -1442,8 +1432,8 @@ impl<'a> Parser<'a> {
self.parse_expr_let()
} else if self.eat_keyword(kw::Underscore) {
Ok(self.mk_expr(self.prev_token.span, ExprKind::Underscore))
- } else if self.token.uninterpolated_span().rust_2018() {
- // `Span::rust_2018()` is somewhat expensive; don't get it repeatedly.
+ } else if self.token.uninterpolated_span().at_least_rust_2018() {
+ // `Span:.at_least_rust_2018()` is somewhat expensive; don't get it repeatedly.
if self.check_keyword(kw::Async) {
if self.is_async_block() {
// Check for `async {` and `async move {`.
@@ -2230,7 +2220,7 @@ impl<'a> Parser<'a> {
let movability =
if self.eat_keyword(kw::Static) { Movability::Static } else { Movability::Movable };
- let asyncness = if self.token.uninterpolated_span().rust_2018() {
+ let asyncness = if self.token.uninterpolated_span().at_least_rust_2018() {
self.parse_asyncness(Case::Sensitive)
} else {
Async::No
@@ -2338,11 +2328,11 @@ impl<'a> Parser<'a> {
let lo = self.token.span;
let attrs = self.parse_outer_attributes()?;
self.collect_tokens_trailing_token(attrs, ForceCollect::No, |this, attrs| {
- let pat = this.parse_pat_no_top_alt(Some(Expected::ParameterName))?;
+ let pat = this.parse_pat_no_top_alt(Some(Expected::ParameterName), None)?;
let ty = if this.eat(&token::Colon) {
this.parse_ty()?
} else {
- this.mk_ty(this.prev_token.span, TyKind::Infer)
+ this.mk_ty(pat.span, TyKind::Infer)
};
Ok((
@@ -2500,7 +2490,7 @@ impl<'a> Parser<'a> {
let else_span = self.prev_token.span; // `else`
let attrs = self.parse_outer_attributes()?; // For recovery.
let expr = if self.eat_keyword(kw::If) {
- self.parse_expr_if()?
+ ensure_sufficient_stack(|| self.parse_expr_if())?
} else if self.check(&TokenKind::OpenDelim(Delimiter::Brace)) {
self.parse_simple_block()?
} else {
@@ -2599,7 +2589,7 @@ impl<'a> Parser<'a> {
// Recover from missing expression in `for` loop
if matches!(expr.kind, ExprKind::Block(..))
- && !matches!(self.token.kind, token::OpenDelim(token::Delimiter::Brace))
+ && !matches!(self.token.kind, token::OpenDelim(Delimiter::Brace))
&& self.may_recover()
{
self.sess
@@ -2781,7 +2771,7 @@ impl<'a> Parser<'a> {
return None;
}
let pre_pat_snapshot = self.create_snapshot_for_diagnostic();
- match self.parse_pat_no_top_alt(None) {
+ match self.parse_pat_no_top_alt(None, None) {
Ok(_pat) => {
if self.token.kind == token::FatArrow {
// Reached arm end.
@@ -3003,7 +2993,8 @@ impl<'a> Parser<'a> {
fn is_do_catch_block(&self) -> bool {
self.token.is_keyword(kw::Do)
&& self.is_keyword_ahead(1, &[kw::Catch])
- && self.look_ahead(2, |t| *t == token::OpenDelim(Delimiter::Brace))
+ && self
+ .look_ahead(2, |t| *t == token::OpenDelim(Delimiter::Brace) || t.is_whole_block())
&& !self.restrictions.contains(Restrictions::NO_STRUCT_LITERAL)
}
@@ -3013,8 +3004,9 @@ impl<'a> Parser<'a> {
fn is_try_block(&self) -> bool {
self.token.is_keyword(kw::Try)
- && self.look_ahead(1, |t| *t == token::OpenDelim(Delimiter::Brace))
- && self.token.uninterpolated_span().rust_2018()
+ && self
+ .look_ahead(1, |t| *t == token::OpenDelim(Delimiter::Brace) || t.is_whole_block())
+ && self.token.uninterpolated_span().at_least_rust_2018()
}
/// Parses an `async move? {...}` expression.
@@ -3032,10 +3024,14 @@ impl<'a> Parser<'a> {
&& ((
// `async move {`
self.is_keyword_ahead(1, &[kw::Move])
- && self.look_ahead(2, |t| *t == token::OpenDelim(Delimiter::Brace))
+ && self.look_ahead(2, |t| {
+ *t == token::OpenDelim(Delimiter::Brace) || t.is_whole_block()
+ })
) || (
// `async {`
- self.look_ahead(1, |t| *t == token::OpenDelim(Delimiter::Brace))
+ self.look_ahead(1, |t| {
+ *t == token::OpenDelim(Delimiter::Brace) || t.is_whole_block()
+ })
))
}
@@ -3360,8 +3356,8 @@ impl<'a> Parser<'a> {
ExprKind::Binary(binop, lhs, rhs)
}
- fn mk_index(&self, expr: P<Expr>, idx: P<Expr>) -> ExprKind {
- ExprKind::Index(expr, idx)
+ fn mk_index(&self, expr: P<Expr>, idx: P<Expr>, brackets_span: Span) -> ExprKind {
+ ExprKind::Index(expr, idx, brackets_span)
}
fn mk_call(&self, f: P<Expr>, args: ThinVec<P<Expr>>) -> ExprKind {
diff --git a/compiler/rustc_parse/src/parser/generics.rs b/compiler/rustc_parse/src/parser/generics.rs
index 8ab38c4fb..242c9d332 100644
--- a/compiler/rustc_parse/src/parser/generics.rs
+++ b/compiler/rustc_parse/src/parser/generics.rs
@@ -49,7 +49,7 @@ impl<'a> Parser<'a> {
&& self.check_ident()
// `Const` followed by IDENT
{
- return Ok(self.recover_const_param_with_mistyped_const(preceding_attrs, ident)?);
+ return self.recover_const_param_with_mistyped_const(preceding_attrs, ident);
}
// Parse optional colon and param bounds.
diff --git a/compiler/rustc_parse/src/parser/item.rs b/compiler/rustc_parse/src/parser/item.rs
index 1470180de..24c65d061 100644
--- a/compiler/rustc_parse/src/parser/item.rs
+++ b/compiler/rustc_parse/src/parser/item.rs
@@ -1,20 +1,20 @@
-use crate::errors;
-
use super::diagnostics::{dummy_arg, ConsumeClosingDelim};
use super::ty::{AllowPlus, RecoverQPath, RecoverReturnSign};
use super::{AttrWrapper, FollowedByType, ForceCollect, Parser, PathStyle, TrailingToken};
+use crate::errors::{self, MacroExpandsToAdtField};
+use crate::fluent_generated as fluent;
use ast::StaticItem;
use rustc_ast::ast::*;
use rustc_ast::ptr::P;
use rustc_ast::token::{self, Delimiter, TokenKind};
use rustc_ast::tokenstream::{DelimSpan, TokenStream, TokenTree};
use rustc_ast::util::case::Case;
+use rustc_ast::MacCall;
use rustc_ast::{self as ast, AttrVec, Attribute, DUMMY_NODE_ID};
use rustc_ast::{Async, Const, Defaultness, IsAuto, Mutability, Unsafe, UseTree, UseTreeKind};
use rustc_ast::{BindingAnnotation, Block, FnDecl, FnSig, Param, SelfKind};
use rustc_ast::{EnumDef, FieldDef, Generics, TraitRef, Ty, TyKind, Variant, VariantData};
use rustc_ast::{FnHeader, ForeignItem, Path, PathSegment, Visibility, VisibilityKind};
-use rustc_ast::{MacCall, MacDelimiter};
use rustc_ast_pretty::pprust;
use rustc_errors::{
struct_span_err, Applicability, DiagnosticBuilder, ErrorGuaranteed, IntoDiagnostic, PResult,
@@ -226,9 +226,9 @@ impl<'a> Parser<'a> {
} else if self.is_static_global() {
// STATIC ITEM
self.bump(); // `static`
- let m = self.parse_mutability();
- let (ident, ty, expr) = self.parse_item_global(Some(m))?;
- (ident, ItemKind::Static(Box::new(StaticItem { ty, mutability: m, expr })))
+ let mutability = self.parse_mutability();
+ let (ident, item) = self.parse_static_item(mutability)?;
+ (ident, ItemKind::Static(Box::new(item)))
} else if let Const::Yes(const_span) = self.parse_constness(Case::Sensitive) {
// CONST ITEM
if self.token.is_keyword(kw::Impl) {
@@ -236,8 +236,16 @@ impl<'a> Parser<'a> {
self.recover_const_impl(const_span, attrs, def_())?
} else {
self.recover_const_mut(const_span);
- let (ident, ty, expr) = self.parse_item_global(None)?;
- (ident, ItemKind::Const(Box::new(ConstItem { defaultness: def_(), ty, expr })))
+ let (ident, generics, ty, expr) = self.parse_const_item()?;
+ (
+ ident,
+ ItemKind::Const(Box::new(ConstItem {
+ defaultness: def_(),
+ generics,
+ ty,
+ expr,
+ })),
+ )
}
} else if self.check_keyword(kw::Trait) || self.check_auto_or_unsafe_trait_item() {
// TRAIT ITEM
@@ -878,6 +886,7 @@ impl<'a> Parser<'a> {
self.sess.emit_err(errors::AssociatedStaticItemNotAllowed { span });
AssocItemKind::Const(Box::new(ConstItem {
defaultness: Defaultness::Final,
+ generics: Generics::default(),
ty,
expr,
}))
@@ -892,7 +901,7 @@ impl<'a> Parser<'a> {
/// Parses a `type` alias with the following grammar:
/// ```ebnf
- /// TypeAlias = "type" Ident Generics {":" GenericBounds}? {"=" Ty}? ";" ;
+ /// TypeAlias = "type" Ident Generics (":" GenericBounds)? WhereClause ("=" Ty)? WhereClause ";" ;
/// ```
/// The `"type"` has already been eaten.
fn parse_type_alias(&mut self, defaultness: Defaultness) -> PResult<'a, ItemInfo> {
@@ -1220,33 +1229,132 @@ impl<'a> Parser<'a> {
Ok(impl_info)
}
- /// Parse `["const" | ("static" "mut"?)] $ident ":" $ty (= $expr)?` with
- /// `["const" | ("static" "mut"?)]` already parsed and stored in `m`.
+ /// Parse a static item with the prefix `"static" "mut"?` already parsed and stored in `mutability`.
///
- /// When `m` is `"const"`, `$ident` may also be `"_"`.
- fn parse_item_global(
- &mut self,
- m: Option<Mutability>,
- ) -> PResult<'a, (Ident, P<Ty>, Option<P<ast::Expr>>)> {
- let id = if m.is_none() { self.parse_ident_or_underscore() } else { self.parse_ident() }?;
+ /// ```ebnf
+ /// Static = "static" "mut"? $ident ":" $ty (= $expr)? ";" ;
+ /// ```
+ fn parse_static_item(&mut self, mutability: Mutability) -> PResult<'a, (Ident, StaticItem)> {
+ let ident = self.parse_ident()?;
- // Parse the type of a `const` or `static mut?` item.
- // That is, the `":" $ty` fragment.
+ if self.token.kind == TokenKind::Lt && self.may_recover() {
+ let generics = self.parse_generics()?;
+ self.sess.emit_err(errors::StaticWithGenerics { span: generics.span });
+ }
+
+ // Parse the type of a static item. That is, the `":" $ty` fragment.
+ // FIXME: This could maybe benefit from `.may_recover()`?
let ty = match (self.eat(&token::Colon), self.check(&token::Eq) | self.check(&token::Semi))
{
- // If there wasn't a `:` or the colon was followed by a `=` or `;` recover a missing type.
(true, false) => self.parse_ty()?,
- (colon, _) => self.recover_missing_const_type(colon, m),
+ // If there wasn't a `:` or the colon was followed by a `=` or `;`, recover a missing type.
+ (colon, _) => self.recover_missing_global_item_type(colon, Some(mutability)),
};
let expr = if self.eat(&token::Eq) { Some(self.parse_expr()?) } else { None };
+
+ self.expect_semi()?;
+
+ Ok((ident, StaticItem { ty, mutability, expr }))
+ }
+
+ /// Parse a constant item with the prefix `"const"` already parsed.
+ ///
+ /// ```ebnf
+ /// Const = "const" ($ident | "_") Generics ":" $ty (= $expr)? WhereClause ";" ;
+ /// ```
+ fn parse_const_item(&mut self) -> PResult<'a, (Ident, Generics, P<Ty>, Option<P<ast::Expr>>)> {
+ let ident = self.parse_ident_or_underscore()?;
+
+ let mut generics = self.parse_generics()?;
+
+ // Check the span for emptiness instead of the list of parameters in order to correctly
+ // recognize and subsequently flag empty parameter lists (`<>`) as unstable.
+ if !generics.span.is_empty() {
+ self.sess.gated_spans.gate(sym::generic_const_items, generics.span);
+ }
+
+ // Parse the type of a constant item. That is, the `":" $ty` fragment.
+ // FIXME: This could maybe benefit from `.may_recover()`?
+ let ty = match (
+ self.eat(&token::Colon),
+ self.check(&token::Eq) | self.check(&token::Semi) | self.check_keyword(kw::Where),
+ ) {
+ (true, false) => self.parse_ty()?,
+ // If there wasn't a `:` or the colon was followed by a `=`, `;` or `where`, recover a missing type.
+ (colon, _) => self.recover_missing_global_item_type(colon, None),
+ };
+
+ // Proactively parse a where-clause to be able to provide a good error message in case we
+ // encounter the item body following it.
+ let before_where_clause =
+ if self.may_recover() { self.parse_where_clause()? } else { WhereClause::default() };
+
+ let expr = if self.eat(&token::Eq) { Some(self.parse_expr()?) } else { None };
+
+ let after_where_clause = self.parse_where_clause()?;
+
+ // Provide a nice error message if the user placed a where-clause before the item body.
+ // Users may be tempted to write such code if they are still used to the deprecated
+ // where-clause location on type aliases and associated types. See also #89122.
+ if before_where_clause.has_where_token && let Some(expr) = &expr {
+ self.sess.emit_err(errors::WhereClauseBeforeConstBody {
+ span: before_where_clause.span,
+ name: ident.span,
+ body: expr.span,
+ sugg: if !after_where_clause.has_where_token {
+ self.sess.source_map().span_to_snippet(expr.span).ok().map(|body| {
+ errors::WhereClauseBeforeConstBodySugg {
+ left: before_where_clause.span.shrink_to_lo(),
+ snippet: body,
+ right: before_where_clause.span.shrink_to_hi().to(expr.span),
+ }
+ })
+ } else {
+ // FIXME(generic_const_items): Provide a structured suggestion to merge the first
+ // where-clause into the second one.
+ None
+ },
+ });
+ }
+
+ // Merge the predicates of both where-clauses since either one can be relevant.
+ // If we didn't parse a body (which is valid for associated consts in traits) and we were
+ // allowed to recover, `before_where_clause` contains the predicates, otherwise they are
+ // in `after_where_clause`. Further, both of them might contain predicates iff two
+ // where-clauses were provided which is syntactically ill-formed but we want to recover from
+ // it and treat them as one large where-clause.
+ let mut predicates = before_where_clause.predicates;
+ predicates.extend(after_where_clause.predicates);
+ let where_clause = WhereClause {
+ has_where_token: before_where_clause.has_where_token
+ || after_where_clause.has_where_token,
+ predicates,
+ span: if after_where_clause.has_where_token {
+ after_where_clause.span
+ } else {
+ before_where_clause.span
+ },
+ };
+
+ if where_clause.has_where_token {
+ self.sess.gated_spans.gate(sym::generic_const_items, where_clause.span);
+ }
+
+ generics.where_clause = where_clause;
+
self.expect_semi()?;
- Ok((id, ty, expr))
+
+ Ok((ident, generics, ty, expr))
}
/// We were supposed to parse `":" $ty` but the `:` or the type was missing.
/// This means that the type is missing.
- fn recover_missing_const_type(&mut self, colon_present: bool, m: Option<Mutability>) -> P<Ty> {
+ fn recover_missing_global_item_type(
+ &mut self,
+ colon_present: bool,
+ m: Option<Mutability>,
+ ) -> P<Ty> {
// Construct the error and stash it away with the hope
// that typeck will later enrich the error with a type.
let kind = match m {
@@ -1342,6 +1450,17 @@ impl<'a> Parser<'a> {
}
let ident = this.parse_field_ident("enum", vlo)?;
+ if this.token == token::Not {
+ if let Err(mut err) = this.unexpected::<()>() {
+ err.note(fluent::parse_macro_expands_to_enum_variant).emit();
+ }
+
+ this.bump();
+ this.parse_delim_args()?;
+
+ return Ok((None, TrailingToken::MaybeComma));
+ }
+
let struct_def = if this.check(&token::OpenDelim(Delimiter::Brace)) {
// Parse a struct variant.
let (fields, recovered) =
@@ -1369,7 +1488,7 @@ impl<'a> Parser<'a> {
Ok((Some(vr), TrailingToken::MaybeComma))
},
- ).map_err(|mut err|{
+ ).map_err(|mut err| {
err.help("enum variants can be `Variant`, `Variant = <integer>`, `Variant(Type, ..., TypeN)` or `Variant { fields: Types }`");
err
})
@@ -1579,7 +1698,8 @@ impl<'a> Parser<'a> {
self.collect_tokens_trailing_token(attrs, ForceCollect::No, |this, attrs| {
let lo = this.token.span;
let vis = this.parse_visibility(FollowedByType::No)?;
- Ok((this.parse_single_struct_field(adt_ty, lo, vis, attrs)?, TrailingToken::None))
+ this.parse_single_struct_field(adt_ty, lo, vis, attrs)
+ .map(|field| (field, TrailingToken::None))
})
}
@@ -1713,8 +1833,8 @@ impl<'a> Parser<'a> {
"field names and their types are separated with `:`",
":",
Applicability::MachineApplicable,
- );
- err.emit();
+ )
+ .emit();
} else {
return Err(err);
}
@@ -1731,6 +1851,23 @@ impl<'a> Parser<'a> {
attrs: AttrVec,
) -> PResult<'a, FieldDef> {
let name = self.parse_field_ident(adt_ty, lo)?;
+ // Parse the macro invocation and recover
+ if self.token.kind == token::Not {
+ if let Err(mut err) = self.unexpected::<FieldDef>() {
+ err.subdiagnostic(MacroExpandsToAdtField { adt_ty }).emit();
+ self.bump();
+ self.parse_delim_args()?;
+ return Ok(FieldDef {
+ span: DUMMY_SP,
+ ident: None,
+ vis,
+ id: DUMMY_NODE_ID,
+ ty: self.mk_ty(DUMMY_SP, TyKind::Err),
+ attrs,
+ is_placeholder: false,
+ });
+ }
+ }
self.expect_field_ty_separator()?;
let ty = self.parse_ty()?;
if self.token.kind == token::Colon && self.look_ahead(1, |tok| tok.kind != token::Colon) {
@@ -1860,7 +1997,7 @@ impl<'a> Parser<'a> {
let arrow = TokenTree::token_alone(token::FatArrow, pspan.between(bspan)); // `=>`
let tokens = TokenStream::new(vec![params, arrow, body]);
let dspan = DelimSpan::from_pair(pspan.shrink_to_lo(), bspan.shrink_to_hi());
- P(DelimArgs { dspan, delim: MacDelimiter::Brace, tokens })
+ P(DelimArgs { dspan, delim: Delimiter::Brace, tokens })
} else {
return self.unexpected();
};
diff --git a/compiler/rustc_parse/src/parser/mod.rs b/compiler/rustc_parse/src/parser/mod.rs
index c23420661..77c59bb38 100644
--- a/compiler/rustc_parse/src/parser/mod.rs
+++ b/compiler/rustc_parse/src/parser/mod.rs
@@ -24,12 +24,11 @@ use rustc_ast::tokenstream::{TokenStream, TokenTree, TokenTreeCursor};
use rustc_ast::util::case::Case;
use rustc_ast::AttrId;
use rustc_ast::DUMMY_NODE_ID;
-use rustc_ast::{self as ast, AnonConst, AttrStyle, Const, DelimArgs, Extern};
-use rustc_ast::{Async, AttrArgs, AttrArgsEq, Expr, ExprKind, MacDelimiter, Mutability, StrLit};
+use rustc_ast::{self as ast, AnonConst, Const, DelimArgs, Extern};
+use rustc_ast::{Async, AttrArgs, AttrArgsEq, Expr, ExprKind, Mutability, StrLit};
use rustc_ast::{HasAttrs, HasTokens, Unsafe, Visibility, VisibilityKind};
use rustc_ast_pretty::pprust;
use rustc_data_structures::fx::FxHashMap;
-use rustc_data_structures::sync::Ordering;
use rustc_errors::PResult;
use rustc_errors::{
Applicability, DiagnosticBuilder, ErrorGuaranteed, FatalError, IntoDiagnostic, MultiSpan,
@@ -38,7 +37,7 @@ use rustc_session::parse::ParseSess;
use rustc_span::source_map::{Span, DUMMY_SP};
use rustc_span::symbol::{kw, sym, Ident, Symbol};
use std::ops::Range;
-use std::{cmp, mem, slice};
+use std::{mem, slice};
use thin_vec::ThinVec;
use tracing::debug;
@@ -135,10 +134,24 @@ pub struct Parser<'a> {
pub capture_cfg: bool,
restrictions: Restrictions,
expected_tokens: Vec<TokenType>,
- // Important: This must only be advanced from `bump` to ensure that
- // `token_cursor.num_next_calls` is updated properly.
token_cursor: TokenCursor,
- desugar_doc_comments: bool,
+ // The number of calls to `bump`, i.e. the position in the token stream.
+ num_bump_calls: usize,
+ // During parsing we may sometimes need to 'unglue' a glued token into two
+ // component tokens (e.g. '>>' into '>' and '>), so the parser can consume
+ // them one at a time. This process bypasses the normal capturing mechanism
+ // (e.g. `num_bump_calls` will not be incremented), since the 'unglued'
+ // tokens due not exist in the original `TokenStream`.
+ //
+ // If we end up consuming both unglued tokens, this is not an issue. We'll
+ // end up capturing the single 'glued' token.
+ //
+ // However, sometimes we may want to capture just the first 'unglued'
+ // token. For example, capturing the `Vec<u8>` in `Option<Vec<u8>>`
+ // requires us to unglue the trailing `>>` token. The `break_last_token`
+ // field is used to track this token. It gets appended to the captured
+ // stream when we evaluate a `LazyAttrTokenStream`.
+ break_last_token: bool,
/// This field is used to keep track of how many left angle brackets we have seen. This is
/// required in order to detect extra leading left angle brackets (`<` characters) and error
/// appropriately.
@@ -162,7 +175,7 @@ pub struct Parser<'a> {
// This type is used a lot, e.g. it's cloned when matching many declarative macro rules with nonterminals. Make sure
// it doesn't unintentionally get bigger.
#[cfg(all(target_arch = "x86_64", target_pointer_width = "64"))]
-rustc_data_structures::static_assert_size!(Parser<'_>, 272);
+rustc_data_structures::static_assert_size!(Parser<'_>, 264);
/// Stores span information about a closure.
#[derive(Clone)]
@@ -224,64 +237,29 @@ struct TokenCursor {
// tokens are in `stack[n-1]`. `stack[0]` (when present) has no delimiters
// because it's the outermost token stream which never has delimiters.
stack: Vec<(TokenTreeCursor, Delimiter, DelimSpan)>,
-
- desugar_doc_comments: bool,
-
- // Counts the number of calls to `{,inlined_}next`.
- num_next_calls: usize,
-
- // During parsing, we may sometimes need to 'unglue' a
- // glued token into two component tokens
- // (e.g. '>>' into '>' and '>), so that the parser
- // can consume them one at a time. This process
- // bypasses the normal capturing mechanism
- // (e.g. `num_next_calls` will not be incremented),
- // since the 'unglued' tokens due not exist in
- // the original `TokenStream`.
- //
- // If we end up consuming both unglued tokens,
- // then this is not an issue - we'll end up
- // capturing the single 'glued' token.
- //
- // However, in certain circumstances, we may
- // want to capture just the first 'unglued' token.
- // For example, capturing the `Vec<u8>`
- // in `Option<Vec<u8>>` requires us to unglue
- // the trailing `>>` token. The `break_last_token`
- // field is used to track this token - it gets
- // appended to the captured stream when
- // we evaluate a `LazyAttrTokenStream`.
- break_last_token: bool,
}
impl TokenCursor {
- fn next(&mut self, desugar_doc_comments: bool) -> (Token, Spacing) {
- self.inlined_next(desugar_doc_comments)
+ fn next(&mut self) -> (Token, Spacing) {
+ self.inlined_next()
}
/// This always-inlined version should only be used on hot code paths.
#[inline(always)]
- fn inlined_next(&mut self, desugar_doc_comments: bool) -> (Token, Spacing) {
+ fn inlined_next(&mut self) -> (Token, Spacing) {
loop {
- // FIXME: we currently don't return `Delimiter` open/close delims. To fix #67062 we will
- // need to, whereupon the `delim != Delimiter::Invisible` conditions below can be
- // removed.
+ // FIXME: we currently don't return `Delimiter::Invisible` open/close delims. To fix
+ // #67062 we will need to, whereupon the `delim != Delimiter::Invisible` conditions
+ // below can be removed.
if let Some(tree) = self.tree_cursor.next_ref() {
match tree {
- &TokenTree::Token(ref token, spacing) => match (desugar_doc_comments, token) {
- (true, &Token { kind: token::DocComment(_, attr_style, data), span }) => {
- let desugared = self.desugar(attr_style, data, span);
- self.tree_cursor.replace_prev_and_rewind(desugared);
- // Continue to get the first token of the desugared doc comment.
- }
- _ => {
- debug_assert!(!matches!(
- token.kind,
- token::OpenDelim(_) | token::CloseDelim(_)
- ));
- return (token.clone(), spacing);
- }
- },
+ &TokenTree::Token(ref token, spacing) => {
+ debug_assert!(!matches!(
+ token.kind,
+ token::OpenDelim(_) | token::CloseDelim(_)
+ ));
+ return (token.clone(), spacing);
+ }
&TokenTree::Delimited(sp, delim, ref tts) => {
let trees = tts.clone().into_trees();
self.stack.push((mem::replace(&mut self.tree_cursor, trees), delim, sp));
@@ -304,52 +282,6 @@ impl TokenCursor {
}
}
}
-
- // Desugar a doc comment into something like `#[doc = r"foo"]`.
- fn desugar(&mut self, attr_style: AttrStyle, data: Symbol, span: Span) -> Vec<TokenTree> {
- // Searches for the occurrences of `"#*` and returns the minimum number of `#`s
- // required to wrap the text. E.g.
- // - `abc d` is wrapped as `r"abc d"` (num_of_hashes = 0)
- // - `abc "d"` is wrapped as `r#"abc "d""#` (num_of_hashes = 1)
- // - `abc "##d##"` is wrapped as `r###"abc ##"d"##"###` (num_of_hashes = 3)
- let mut num_of_hashes = 0;
- let mut count = 0;
- for ch in data.as_str().chars() {
- count = match ch {
- '"' => 1,
- '#' if count > 0 => count + 1,
- _ => 0,
- };
- num_of_hashes = cmp::max(num_of_hashes, count);
- }
-
- // `/// foo` becomes `doc = r"foo"`.
- let delim_span = DelimSpan::from_single(span);
- let body = TokenTree::Delimited(
- delim_span,
- Delimiter::Bracket,
- [
- TokenTree::token_alone(token::Ident(sym::doc, false), span),
- TokenTree::token_alone(token::Eq, span),
- TokenTree::token_alone(
- TokenKind::lit(token::StrRaw(num_of_hashes), data, None),
- span,
- ),
- ]
- .into_iter()
- .collect::<TokenStream>(),
- );
-
- if attr_style == AttrStyle::Inner {
- vec![
- TokenTree::token_alone(token::Pound, span),
- TokenTree::token_alone(token::Not, span),
- body,
- ]
- } else {
- vec![TokenTree::token_alone(token::Pound, span), body]
- }
- }
}
#[derive(Debug, Clone, PartialEq)]
@@ -368,7 +300,7 @@ impl TokenType {
fn to_string(&self) -> String {
match self {
TokenType::Token(t) => format!("`{}`", pprust::token_kind_to_string(t)),
- TokenType::Keyword(kw) => format!("`{}`", kw),
+ TokenType::Keyword(kw) => format!("`{kw}`"),
TokenType::Operator => "an operator".to_string(),
TokenType::Lifetime => "lifetime".to_string(),
TokenType::Ident => "identifier".to_string(),
@@ -438,14 +370,13 @@ pub(super) fn token_descr(token: &Token) -> String {
TokenDescription::DocComment => "doc comment",
});
- if let Some(kind) = kind { format!("{} `{}`", kind, name) } else { format!("`{}`", name) }
+ if let Some(kind) = kind { format!("{kind} `{name}`") } else { format!("`{name}`") }
}
impl<'a> Parser<'a> {
pub fn new(
sess: &'a ParseSess,
- tokens: TokenStream,
- desugar_doc_comments: bool,
+ stream: TokenStream,
subparser_name: Option<&'static str>,
) -> Self {
let mut parser = Parser {
@@ -456,14 +387,9 @@ impl<'a> Parser<'a> {
capture_cfg: false,
restrictions: Restrictions::empty(),
expected_tokens: Vec::new(),
- token_cursor: TokenCursor {
- tree_cursor: tokens.into_trees(),
- stack: Vec::new(),
- num_next_calls: 0,
- desugar_doc_comments,
- break_last_token: false,
- },
- desugar_doc_comments,
+ token_cursor: TokenCursor { tree_cursor: stream.into_trees(), stack: Vec::new() },
+ num_bump_calls: 0,
+ break_last_token: false,
unmatched_angle_bracket_count: 0,
max_angle_bracket_count: 0,
last_unexpected_token_span: None,
@@ -766,7 +692,7 @@ impl<'a> Parser<'a> {
// If we consume any additional tokens, then this token
// is not needed (we'll capture the entire 'glued' token),
// and `bump` will set this field to `None`
- self.token_cursor.break_last_token = true;
+ self.break_last_token = true;
// Use the spacing of the glued token as the spacing
// of the unglued second token.
self.bump_with((Token::new(second, second_span), self.token_spacing));
@@ -923,7 +849,7 @@ impl<'a> Parser<'a> {
expect_err
.span_suggestion_short(
sp,
- format!("missing `{}`", token_str),
+ format!("missing `{token_str}`"),
token_str,
Applicability::MaybeIncorrect,
)
@@ -1107,12 +1033,12 @@ impl<'a> Parser<'a> {
pub fn bump(&mut self) {
// Note: destructuring here would give nicer code, but it was found in #96210 to be slower
// than `.0`/`.1` access.
- let mut next = self.token_cursor.inlined_next(self.desugar_doc_comments);
- self.token_cursor.num_next_calls += 1;
+ let mut next = self.token_cursor.inlined_next();
+ self.num_bump_calls += 1;
// We've retrieved an token from the underlying
// cursor, so we no longer need to worry about
// an unglued token. See `break_and_eat` for more details
- self.token_cursor.break_last_token = false;
+ self.break_last_token = false;
if next.0.span.is_dummy() {
// Tweak the location for better diagnostics, but keep syntactic context intact.
let fallback_span = self.token.span;
@@ -1126,38 +1052,53 @@ impl<'a> Parser<'a> {
}
/// Look-ahead `dist` tokens of `self.token` and get access to that token there.
- /// When `dist == 0` then the current token is looked at.
+ /// When `dist == 0` then the current token is looked at. `Eof` will be
+ /// returned if the look-ahead is any distance past the end of the tokens.
pub fn look_ahead<R>(&self, dist: usize, looker: impl FnOnce(&Token) -> R) -> R {
if dist == 0 {
return looker(&self.token);
}
- let tree_cursor = &self.token_cursor.tree_cursor;
if let Some(&(_, delim, span)) = self.token_cursor.stack.last()
&& delim != Delimiter::Invisible
{
+ // We are not in the outermost token stream, and the token stream
+ // we are in has non-skipped delimiters. Look for skipped
+ // delimiters in the lookahead range.
+ let tree_cursor = &self.token_cursor.tree_cursor;
let all_normal = (0..dist).all(|i| {
let token = tree_cursor.look_ahead(i);
!matches!(token, Some(TokenTree::Delimited(_, Delimiter::Invisible, _)))
});
if all_normal {
+ // There were no skipped delimiters. Do lookahead by plain indexing.
return match tree_cursor.look_ahead(dist - 1) {
- Some(tree) => match tree {
- TokenTree::Token(token, _) => looker(token),
- TokenTree::Delimited(dspan, delim, _) => {
- looker(&Token::new(token::OpenDelim(*delim), dspan.open))
+ Some(tree) => {
+ // Indexing stayed within the current token stream.
+ match tree {
+ TokenTree::Token(token, _) => looker(token),
+ TokenTree::Delimited(dspan, delim, _) => {
+ looker(&Token::new(token::OpenDelim(*delim), dspan.open))
+ }
}
- },
- None => looker(&Token::new(token::CloseDelim(delim), span.close)),
+ }
+ None => {
+ // Indexing went past the end of the current token
+ // stream. Use the close delimiter, no matter how far
+ // ahead `dist` went.
+ looker(&Token::new(token::CloseDelim(delim), span.close))
+ }
};
}
}
+ // We are in a more complex case. Just clone the token cursor and use
+ // `next`, skipping delimiters as necessary. Slow but simple.
let mut cursor = self.token_cursor.clone();
let mut i = 0;
let mut token = Token::dummy();
while i < dist {
- token = cursor.next(/* desugar_doc_comments */ false).0;
+ token = cursor.next().0;
if matches!(
token.kind,
token::OpenDelim(Delimiter::Invisible) | token::CloseDelim(Delimiter::Invisible)
@@ -1166,7 +1107,7 @@ impl<'a> Parser<'a> {
}
i += 1;
}
- return looker(&token);
+ looker(&token)
}
/// Returns whether any of the given keywords are `dist` tokens ahead of the current one.
@@ -1210,7 +1151,8 @@ impl<'a> Parser<'a> {
fn parse_constness_(&mut self, case: Case, is_closure: bool) -> Const {
// Avoid const blocks and const closures to be parsed as const items
if (self.check_const_closure() == is_closure)
- && self.look_ahead(1, |t| t != &token::OpenDelim(Delimiter::Brace))
+ && !self
+ .look_ahead(1, |t| *t == token::OpenDelim(Delimiter::Brace) || t.is_whole_block())
&& self.eat_keyword_case(kw::Const, case)
{
Const::Yes(self.prev_token.uninterpolated_span())
@@ -1288,10 +1230,10 @@ impl<'a> Parser<'a> {
|| self.check(&token::OpenDelim(Delimiter::Brace));
delimited.then(|| {
- // We've confirmed above that there is a delimiter so unwrapping is OK.
- let TokenTree::Delimited(dspan, delim, tokens) = self.parse_token_tree() else { unreachable!() };
-
- DelimArgs { dspan, delim: MacDelimiter::from_token(delim).unwrap(), tokens }
+ let TokenTree::Delimited(dspan, delim, tokens) = self.parse_token_tree() else {
+ unreachable!()
+ };
+ DelimArgs { dspan, delim, tokens }
})
}
@@ -1307,12 +1249,11 @@ impl<'a> Parser<'a> {
}
/// Parses a single token tree from the input.
- pub(crate) fn parse_token_tree(&mut self) -> TokenTree {
+ pub fn parse_token_tree(&mut self) -> TokenTree {
match self.token.kind {
token::OpenDelim(..) => {
// Grab the tokens within the delimiters.
- let tree_cursor = &self.token_cursor.tree_cursor;
- let stream = tree_cursor.stream.clone();
+ let stream = self.token_cursor.tree_cursor.stream.clone();
let (_, delim, span) = *self.token_cursor.stack.last().unwrap();
// Advance the token cursor through the entire delimited
@@ -1343,15 +1284,6 @@ impl<'a> Parser<'a> {
}
}
- /// Parses a stream of tokens into a list of `TokenTree`s, up to EOF.
- pub fn parse_all_token_trees(&mut self) -> PResult<'a, Vec<TokenTree>> {
- let mut tts = Vec::new();
- while self.token != token::Eof {
- tts.push(self.parse_token_tree());
- }
- Ok(tts)
- }
-
pub fn parse_tokens(&mut self) -> TokenStream {
let mut result = Vec::new();
loop {
@@ -1511,7 +1443,7 @@ impl<'a> Parser<'a> {
}
pub fn approx_token_stream_pos(&self) -> usize {
- self.token_cursor.num_next_calls
+ self.num_bump_calls
}
}
@@ -1537,18 +1469,6 @@ pub(crate) fn make_unclosed_delims_error(
Some(err)
}
-pub fn emit_unclosed_delims(unclosed_delims: &mut Vec<UnmatchedDelim>, sess: &ParseSess) {
- let _ = sess.reached_eof.fetch_or(
- unclosed_delims.iter().any(|unmatched_delim| unmatched_delim.found_delim.is_none()),
- Ordering::Relaxed,
- );
- for unmatched in unclosed_delims.drain(..) {
- if let Some(mut e) = make_unclosed_delims_error(unmatched, sess) {
- e.emit();
- }
- }
-}
-
/// A helper struct used when building an `AttrTokenStream` from
/// a `LazyAttrTokenStream`. Both delimiter and non-delimited tokens
/// are stored as `FlatToken::Token`. A vector of `FlatToken`s
@@ -1571,7 +1491,7 @@ pub enum FlatToken {
}
#[derive(Debug)]
-pub enum NtOrTt {
+pub enum ParseNtResult {
Nt(Nonterminal),
Tt(TokenTree),
}
diff --git a/compiler/rustc_parse/src/parser/nonterminal.rs b/compiler/rustc_parse/src/parser/nonterminal.rs
index adb0d372a..ff059a7e8 100644
--- a/compiler/rustc_parse/src/parser/nonterminal.rs
+++ b/compiler/rustc_parse/src/parser/nonterminal.rs
@@ -1,5 +1,5 @@
use rustc_ast::ptr::P;
-use rustc_ast::token::{self, Delimiter, NonterminalKind, Token};
+use rustc_ast::token::{self, Delimiter, Nonterminal::*, NonterminalKind, Token};
use rustc_ast::HasTokens;
use rustc_ast_pretty::pprust;
use rustc_errors::IntoDiagnostic;
@@ -8,7 +8,7 @@ use rustc_span::symbol::{kw, Ident};
use crate::errors::UnexpectedNonterminal;
use crate::parser::pat::{CommaRecoveryMode, RecoverColon, RecoverComma};
-use crate::parser::{FollowedByType, ForceCollect, NtOrTt, Parser, PathStyle};
+use crate::parser::{FollowedByType, ForceCollect, ParseNtResult, Parser, PathStyle};
impl<'a> Parser<'a> {
/// Checks whether a non-terminal may begin with a particular token.
@@ -20,10 +20,21 @@ impl<'a> Parser<'a> {
pub fn nonterminal_may_begin_with(kind: NonterminalKind, token: &Token) -> bool {
/// Checks whether the non-terminal may contain a single (non-keyword) identifier.
fn may_be_ident(nt: &token::Nonterminal) -> bool {
- !matches!(
- *nt,
- token::NtItem(_) | token::NtBlock(_) | token::NtVis(_) | token::NtLifetime(_)
- )
+ match nt {
+ NtStmt(_)
+ | NtPat(_)
+ | NtExpr(_)
+ | NtTy(_)
+ | NtIdent(..)
+ | NtLiteral(_) // `true`, `false`
+ | NtMeta(_)
+ | NtPath(_) => true,
+
+ NtItem(_)
+ | NtBlock(_)
+ | NtVis(_)
+ | NtLifetime(_) => false,
+ }
}
match kind {
@@ -44,27 +55,19 @@ impl<'a> Parser<'a> {
},
NonterminalKind::Block => match &token.kind {
token::OpenDelim(Delimiter::Brace) => true,
- token::Interpolated(nt) => !matches!(
- **nt,
- token::NtItem(_)
- | token::NtPat(_)
- | token::NtTy(_)
- | token::NtIdent(..)
- | token::NtMeta(_)
- | token::NtPath(_)
- | token::NtVis(_)
- ),
+ token::Interpolated(nt) => match **nt {
+ NtBlock(_) | NtLifetime(_) | NtStmt(_) | NtExpr(_) | NtLiteral(_) => true,
+ NtItem(_) | NtPat(_) | NtTy(_) | NtIdent(..) | NtMeta(_) | NtPath(_)
+ | NtVis(_) => false,
+ },
_ => false,
},
NonterminalKind::Path | NonterminalKind::Meta => match &token.kind {
token::ModSep | token::Ident(..) => true,
- token::Interpolated(nt) => match **nt {
- token::NtPath(_) | token::NtMeta(_) => true,
- _ => may_be_ident(&nt),
- },
+ token::Interpolated(nt) => may_be_ident(nt),
_ => false,
},
- NonterminalKind::PatParam { .. } | NonterminalKind::PatWithOr { .. } => {
+ NonterminalKind::PatParam { .. } | NonterminalKind::PatWithOr => {
match &token.kind {
token::Ident(..) | // box, ref, mut, and other identifiers (can stricten)
token::OpenDelim(Delimiter::Parenthesis) | // tuple pattern
@@ -79,7 +82,7 @@ impl<'a> Parser<'a> {
token::Lt | // path (UFCS constant)
token::BinOp(token::Shl) => true, // path (double UFCS)
// leading vert `|` or-pattern
- token::BinOp(token::Or) => matches!(kind, NonterminalKind::PatWithOr {..}),
+ token::BinOp(token::Or) => matches!(kind, NonterminalKind::PatWithOr),
token::Interpolated(nt) => may_be_ident(nt),
_ => false,
}
@@ -87,7 +90,7 @@ impl<'a> Parser<'a> {
NonterminalKind::Lifetime => match &token.kind {
token::Lifetime(_) => true,
token::Interpolated(nt) => {
- matches!(**nt, token::NtLifetime(_))
+ matches!(**nt, NtLifetime(_))
}
_ => false,
},
@@ -100,18 +103,16 @@ impl<'a> Parser<'a> {
/// Parse a non-terminal (e.g. MBE `:pat` or `:ident`). Inlined because there is only one call
/// site.
#[inline]
- pub fn parse_nonterminal(&mut self, kind: NonterminalKind) -> PResult<'a, NtOrTt> {
- // Any `Nonterminal` which stores its tokens (currently `NtItem` and `NtExpr`)
- // needs to have them force-captured here.
+ pub fn parse_nonterminal(&mut self, kind: NonterminalKind) -> PResult<'a, ParseNtResult> {
// A `macro_rules!` invocation may pass a captured item/expr to a proc-macro,
// which requires having captured tokens available. Since we cannot determine
// in advance whether or not a proc-macro will be (transitively) invoked,
// we always capture tokens for any `Nonterminal` which needs them.
let mut nt = match kind {
// Note that TT is treated differently to all the others.
- NonterminalKind::TT => return Ok(NtOrTt::Tt(self.parse_token_tree())),
+ NonterminalKind::TT => return Ok(ParseNtResult::Tt(self.parse_token_tree())),
NonterminalKind::Item => match self.parse_item(ForceCollect::Yes)? {
- Some(item) => token::NtItem(item),
+ Some(item) => NtItem(item),
None => {
return Err(UnexpectedNonterminal::Item(self.token.span)
.into_diagnostic(&self.sess.span_diagnostic));
@@ -120,19 +121,19 @@ impl<'a> Parser<'a> {
NonterminalKind::Block => {
// While a block *expression* may have attributes (e.g. `#[my_attr] { ... }`),
// the ':block' matcher does not support them
- token::NtBlock(self.collect_tokens_no_attrs(|this| this.parse_block())?)
+ NtBlock(self.collect_tokens_no_attrs(|this| this.parse_block())?)
}
NonterminalKind::Stmt => match self.parse_stmt(ForceCollect::Yes)? {
- Some(s) => token::NtStmt(P(s)),
+ Some(s) => NtStmt(P(s)),
None => {
return Err(UnexpectedNonterminal::Statement(self.token.span)
.into_diagnostic(&self.sess.span_diagnostic));
}
},
- NonterminalKind::PatParam { .. } | NonterminalKind::PatWithOr { .. } => {
- token::NtPat(self.collect_tokens_no_attrs(|this| match kind {
- NonterminalKind::PatParam { .. } => this.parse_pat_no_top_alt(None),
- NonterminalKind::PatWithOr { .. } => this.parse_pat_allow_top_alt(
+ NonterminalKind::PatParam { .. } | NonterminalKind::PatWithOr => {
+ NtPat(self.collect_tokens_no_attrs(|this| match kind {
+ NonterminalKind::PatParam { .. } => this.parse_pat_no_top_alt(None, None),
+ NonterminalKind::PatWithOr => this.parse_pat_allow_top_alt(
None,
RecoverComma::No,
RecoverColon::No,
@@ -142,16 +143,16 @@ impl<'a> Parser<'a> {
})?)
}
- NonterminalKind::Expr => token::NtExpr(self.parse_expr_force_collect()?),
+ NonterminalKind::Expr => NtExpr(self.parse_expr_force_collect()?),
NonterminalKind::Literal => {
// The `:literal` matcher does not support attributes
- token::NtLiteral(
+ NtLiteral(
self.collect_tokens_no_attrs(|this| this.parse_literal_maybe_minus())?,
)
}
- NonterminalKind::Ty => token::NtTy(
- self.collect_tokens_no_attrs(|this| this.parse_no_question_mark_recover())?,
+ NonterminalKind::Ty => NtTy(
+ self.collect_tokens_no_attrs(|this| this.parse_ty_no_question_mark_recover())?,
),
// this could be handled like a token, since it is one
@@ -159,7 +160,7 @@ impl<'a> Parser<'a> {
if let Some((ident, is_raw)) = get_macro_ident(&self.token) =>
{
self.bump();
- token::NtIdent(ident, is_raw)
+ NtIdent(ident, is_raw)
}
NonterminalKind::Ident => {
return Err(UnexpectedNonterminal::Ident {
@@ -167,16 +168,16 @@ impl<'a> Parser<'a> {
token: self.token.clone(),
}.into_diagnostic(&self.sess.span_diagnostic));
}
- NonterminalKind::Path => token::NtPath(
+ NonterminalKind::Path => NtPath(
P(self.collect_tokens_no_attrs(|this| this.parse_path(PathStyle::Type))?),
),
- NonterminalKind::Meta => token::NtMeta(P(self.parse_attr_item(true)?)),
- NonterminalKind::Vis => token::NtVis(
+ NonterminalKind::Meta => NtMeta(P(self.parse_attr_item(true)?)),
+ NonterminalKind::Vis => NtVis(
P(self.collect_tokens_no_attrs(|this| this.parse_visibility(FollowedByType::Yes))?),
),
NonterminalKind::Lifetime => {
if self.check_lifetime() {
- token::NtLifetime(self.expect_lifetime().ident)
+ NtLifetime(self.expect_lifetime().ident)
} else {
return Err(UnexpectedNonterminal::Lifetime {
span: self.token.span,
@@ -196,7 +197,7 @@ impl<'a> Parser<'a> {
);
}
- Ok(NtOrTt::Nt(nt))
+ Ok(ParseNtResult::Nt(nt))
}
}
diff --git a/compiler/rustc_parse/src/parser/pat.rs b/compiler/rustc_parse/src/parser/pat.rs
index fdf365178..3e4e92789 100644
--- a/compiler/rustc_parse/src/parser/pat.rs
+++ b/compiler/rustc_parse/src/parser/pat.rs
@@ -2,13 +2,13 @@ use super::{ForceCollect, Parser, PathStyle, TrailingToken};
use crate::errors::{
self, AmbiguousRangePattern, DotDotDotForRemainingFields, DotDotDotRangeToPatternNotAllowed,
DotDotDotRestPattern, EnumPatternInsteadOfIdentifier, ExpectedBindingLeftOfAt,
- ExpectedCommaAfterPatternField, InclusiveRangeExtraEquals, InclusiveRangeMatchArrow,
- InclusiveRangeNoEnd, InvalidMutInPattern, PatternOnWrongSideOfAt, RefMutOrderIncorrect,
- RemoveLet, RepeatedMutInPattern, TopLevelOrPatternNotAllowed, TopLevelOrPatternNotAllowedSugg,
- TrailingVertNotAllowed, UnexpectedLifetimeInPattern, UnexpectedVertVertBeforeFunctionParam,
+ ExpectedCommaAfterPatternField, GenericArgsInPatRequireTurbofishSyntax,
+ InclusiveRangeExtraEquals, InclusiveRangeMatchArrow, InclusiveRangeNoEnd, InvalidMutInPattern,
+ PatternOnWrongSideOfAt, RefMutOrderIncorrect, RemoveLet, RepeatedMutInPattern,
+ TopLevelOrPatternNotAllowed, TopLevelOrPatternNotAllowedSugg, TrailingVertNotAllowed,
+ UnexpectedLifetimeInPattern, UnexpectedVertVertBeforeFunctionParam,
UnexpectedVertVertInPattern,
};
-use crate::fluent_generated as fluent;
use crate::{maybe_recover_from_interpolated_ty_qpath, maybe_whole};
use rustc_ast::mut_visit::{noop_visit_pat, MutVisitor};
use rustc_ast::ptr::P;
@@ -81,7 +81,8 @@ enum EatOrResult {
}
/// The syntax location of a given pattern. Used for diagnostics.
-pub(super) enum PatternLocation {
+#[derive(Clone, Copy)]
+pub enum PatternLocation {
LetBinding,
FunctionParameter,
}
@@ -92,8 +93,12 @@ impl<'a> Parser<'a> {
/// Corresponds to `pat<no_top_alt>` in RFC 2535 and does not admit or-patterns
/// at the top level. Used when parsing the parameters of lambda expressions,
/// functions, function pointers, and `pat` macro fragments.
- pub fn parse_pat_no_top_alt(&mut self, expected: Option<Expected>) -> PResult<'a, P<Pat>> {
- self.parse_pat_with_range_pat(true, expected)
+ pub fn parse_pat_no_top_alt(
+ &mut self,
+ expected: Option<Expected>,
+ syntax_loc: Option<PatternLocation>,
+ ) -> PResult<'a, P<Pat>> {
+ self.parse_pat_with_range_pat(true, expected, syntax_loc)
}
/// Parses a pattern.
@@ -111,7 +116,7 @@ impl<'a> Parser<'a> {
ra: RecoverColon,
rt: CommaRecoveryMode,
) -> PResult<'a, P<Pat>> {
- self.parse_pat_allow_top_alt_inner(expected, rc, ra, rt).map(|(pat, _)| pat)
+ self.parse_pat_allow_top_alt_inner(expected, rc, ra, rt, None).map(|(pat, _)| pat)
}
/// Returns the pattern and a bool indicating whether we recovered from a trailing vert (true =
@@ -122,6 +127,7 @@ impl<'a> Parser<'a> {
rc: RecoverComma,
ra: RecoverColon,
rt: CommaRecoveryMode,
+ syntax_loc: Option<PatternLocation>,
) -> PResult<'a, (P<Pat>, bool)> {
// Keep track of whether we recovered from a trailing vert so that we can avoid duplicated
// suggestions (which bothers rustfix).
@@ -134,9 +140,13 @@ impl<'a> Parser<'a> {
};
// Parse the first pattern (`p_0`).
- let mut first_pat = self.parse_pat_no_top_alt(expected)?;
+ let mut first_pat = self.parse_pat_no_top_alt(expected, syntax_loc)?;
if rc == RecoverComma::Yes {
- self.maybe_recover_unexpected_comma(first_pat.span, rt)?;
+ self.maybe_recover_unexpected_comma(
+ first_pat.span,
+ matches!(first_pat.kind, PatKind::MacCall(_)),
+ rt,
+ )?;
}
// If the next token is not a `|`,
@@ -173,12 +183,12 @@ impl<'a> Parser<'a> {
break;
}
}
- let pat = self.parse_pat_no_top_alt(expected).map_err(|mut err| {
+ let pat = self.parse_pat_no_top_alt(expected, syntax_loc).map_err(|mut err| {
err.span_label(lo, WHILE_PARSING_OR_MSG);
err
})?;
if rc == RecoverComma::Yes {
- self.maybe_recover_unexpected_comma(pat.span, rt)?;
+ self.maybe_recover_unexpected_comma(pat.span, false, rt)?;
}
pats.push(pat);
}
@@ -209,46 +219,31 @@ impl<'a> Parser<'a> {
rc,
RecoverColon::No,
CommaRecoveryMode::LikelyTuple,
+ Some(syntax_loc),
)?;
let colon = self.eat(&token::Colon);
if let PatKind::Or(pats) = &pat.kind {
let span = pat.span;
-
- if trailing_vert {
- // We already emitted an error and suggestion to remove the trailing vert. Don't
- // emit again.
-
- // FIXME(#100717): pass `TopLevelOrPatternNotAllowed::* { sub: None }` to
- // `delay_span_bug()` instead of fluent message
- self.sess.span_diagnostic.delay_span_bug(
- span,
- match syntax_loc {
- PatternLocation::LetBinding => {
- fluent::parse_or_pattern_not_allowed_in_let_binding
- }
- PatternLocation::FunctionParameter => {
- fluent::parse_or_pattern_not_allowed_in_fn_parameters
- }
- },
- );
+ let pat = pprust::pat_to_string(&pat);
+ let sub = if pats.len() == 1 {
+ Some(TopLevelOrPatternNotAllowedSugg::RemoveLeadingVert { span, pat })
} else {
- let pat = pprust::pat_to_string(&pat);
- let sub = if pats.len() == 1 {
- Some(TopLevelOrPatternNotAllowedSugg::RemoveLeadingVert { span, pat })
- } else {
- Some(TopLevelOrPatternNotAllowedSugg::WrapInParens { span, pat })
- };
+ Some(TopLevelOrPatternNotAllowedSugg::WrapInParens { span, pat })
+ };
- self.sess.emit_err(match syntax_loc {
- PatternLocation::LetBinding => {
- TopLevelOrPatternNotAllowed::LetBinding { span, sub }
- }
- PatternLocation::FunctionParameter => {
- TopLevelOrPatternNotAllowed::FunctionParameter { span, sub }
- }
- });
+ let mut err = self.sess.create_err(match syntax_loc {
+ PatternLocation::LetBinding => {
+ TopLevelOrPatternNotAllowed::LetBinding { span, sub }
+ }
+ PatternLocation::FunctionParameter => {
+ TopLevelOrPatternNotAllowed::FunctionParameter { span, sub }
+ }
+ });
+ if trailing_vert {
+ err.delay_as_bug();
}
+ err.emit();
}
Ok((pat, colon))
@@ -336,6 +331,7 @@ impl<'a> Parser<'a> {
&mut self,
allow_range_pat: bool,
expected: Option<Expected>,
+ syntax_loc: Option<PatternLocation>,
) -> PResult<'a, P<Pat>> {
maybe_recover_from_interpolated_ty_qpath!(self, true);
maybe_whole!(self, NtPat, |x| x);
@@ -375,11 +371,11 @@ impl<'a> Parser<'a> {
// Parse _
PatKind::Wild
} else if self.eat_keyword(kw::Mut) {
- self.parse_pat_ident_mut()?
+ self.parse_pat_ident_mut(syntax_loc)?
} else if self.eat_keyword(kw::Ref) {
// Parse ref ident @ pat / ref mut ident @ pat
let mutbl = self.parse_mutability();
- self.parse_pat_ident(BindingAnnotation(ByRef::Yes, mutbl))?
+ self.parse_pat_ident(BindingAnnotation(ByRef::Yes, mutbl), syntax_loc)?
} else if self.eat_keyword(kw::Box) {
self.parse_pat_box()?
} else if self.check_inline_const(0) {
@@ -401,7 +397,7 @@ impl<'a> Parser<'a> {
// Parse `ident @ pat`
// This can give false positives and parse nullary enums,
// they are dealt with later in resolve.
- self.parse_pat_ident(BindingAnnotation::NONE)?
+ self.parse_pat_ident(BindingAnnotation::NONE, syntax_loc)?
} else if self.is_start_of_pat_with_path() {
// Parse pattern starting with a path
let (qself, path) = if self.eat_lt() {
@@ -445,7 +441,7 @@ impl<'a> Parser<'a> {
);
let mut err = self_.struct_span_err(self_.token.span, msg);
- err.span_label(self_.token.span, format!("expected {}", expected));
+ err.span_label(self_.token.span, format!("expected {expected}"));
err
});
PatKind::Lit(self.mk_expr(lo, ExprKind::Lit(lit)))
@@ -502,7 +498,7 @@ impl<'a> Parser<'a> {
// At this point we attempt to parse `@ $pat_rhs` and emit an error.
self.bump(); // `@`
- let mut rhs = self.parse_pat_no_top_alt(None)?;
+ let mut rhs = self.parse_pat_no_top_alt(None, None)?;
let whole_span = lhs.span.to(rhs.span);
if let PatKind::Ident(_, _, sub @ None) = &mut rhs.kind {
@@ -558,7 +554,7 @@ impl<'a> Parser<'a> {
}
let mutbl = self.parse_mutability();
- let subpat = self.parse_pat_with_range_pat(false, expected)?;
+ let subpat = self.parse_pat_with_range_pat(false, expected, None)?;
Ok(PatKind::Ref(subpat, mutbl))
}
@@ -583,12 +579,12 @@ impl<'a> Parser<'a> {
}
/// Parse a mutable binding with the `mut` token already eaten.
- fn parse_pat_ident_mut(&mut self) -> PResult<'a, PatKind> {
+ fn parse_pat_ident_mut(&mut self, syntax_loc: Option<PatternLocation>) -> PResult<'a, PatKind> {
let mut_span = self.prev_token.span;
if self.eat_keyword(kw::Ref) {
self.sess.emit_err(RefMutOrderIncorrect { span: mut_span.to(self.prev_token.span) });
- return self.parse_pat_ident(BindingAnnotation::REF_MUT);
+ return self.parse_pat_ident(BindingAnnotation::REF_MUT, syntax_loc);
}
self.recover_additional_muts();
@@ -601,7 +597,7 @@ impl<'a> Parser<'a> {
}
// Parse the pattern we hope to be an identifier.
- let mut pat = self.parse_pat_no_top_alt(Some(Expected::Identifier))?;
+ let mut pat = self.parse_pat_no_top_alt(Some(Expected::Identifier), None)?;
// If we don't have `mut $ident (@ pat)?`, error.
if let PatKind::Ident(BindingAnnotation(ByRef::No, m @ Mutability::Not), ..) = &mut pat.kind
@@ -681,7 +677,7 @@ impl<'a> Parser<'a> {
let msg = format!("expected {}, found {}", expected, super::token_descr(&self.token));
let mut err = self.struct_span_err(self.token.span, msg);
- err.span_label(self.token.span, format!("expected {}", expected));
+ err.span_label(self.token.span, format!("expected {expected}"));
let sp = self.sess.source_map().start_point(self.token.span);
if let Some(sp) = self.sess.ambiguous_block_expr_parse.borrow().get(&sp) {
@@ -827,10 +823,26 @@ impl<'a> Parser<'a> {
/// Parses `ident` or `ident @ pat`.
/// Used by the copy foo and ref foo patterns to give a good
/// error message when parsing mistakes like `ref foo(a, b)`.
- fn parse_pat_ident(&mut self, binding_annotation: BindingAnnotation) -> PResult<'a, PatKind> {
+ fn parse_pat_ident(
+ &mut self,
+ binding_annotation: BindingAnnotation,
+ syntax_loc: Option<PatternLocation>,
+ ) -> PResult<'a, PatKind> {
let ident = self.parse_ident()?;
+
+ if self.may_recover()
+ && !matches!(syntax_loc, Some(PatternLocation::FunctionParameter))
+ && self.check_noexpect(&token::Lt)
+ && self.look_ahead(1, |t| t.can_begin_type())
+ {
+ return Err(self.sess.create_err(GenericArgsInPatRequireTurbofishSyntax {
+ span: self.token.span,
+ suggest_turbofish: self.token.span.shrink_to_lo(),
+ }));
+ }
+
let sub = if self.eat(&token::At) {
- Some(self.parse_pat_no_top_alt(Some(Expected::BindingPattern))?)
+ Some(self.parse_pat_no_top_alt(Some(Expected::BindingPattern), None)?)
} else {
None
};
@@ -919,14 +931,14 @@ impl<'a> Parser<'a> {
// We cannot use `parse_pat_ident()` since it will complain `box`
// is not an identifier.
let sub = if self.eat(&token::At) {
- Some(self.parse_pat_no_top_alt(Some(Expected::BindingPattern))?)
+ Some(self.parse_pat_no_top_alt(Some(Expected::BindingPattern), None)?)
} else {
None
};
Ok(PatKind::Ident(BindingAnnotation::NONE, Ident::new(kw::Box, box_span), sub))
} else {
- let pat = self.parse_pat_with_range_pat(false, None)?;
+ let pat = self.parse_pat_with_range_pat(false, None, None)?;
self.sess.gated_spans.gate(sym::box_patterns, box_span.to(self.prev_token.span));
Ok(PatKind::Box(pat))
}
@@ -994,7 +1006,7 @@ impl<'a> Parser<'a> {
break;
}
let token_str = super::token_descr(&self.token);
- let msg = format!("expected `}}`, found {}", token_str);
+ let msg = format!("expected `}}`, found {token_str}");
let mut err = self.struct_span_err(self.token.span, msg);
err.span_label(self.token.span, "expected `}`");
diff --git a/compiler/rustc_parse/src/parser/path.rs b/compiler/rustc_parse/src/parser/path.rs
index feb7e829c..445516c03 100644
--- a/compiler/rustc_parse/src/parser/path.rs
+++ b/compiler/rustc_parse/src/parser/path.rs
@@ -679,7 +679,7 @@ impl<'a> Parser<'a> {
);
err.span_suggestion(
eq.to(before_next),
- format!("remove the `=` if `{}` is a type", ident),
+ format!("remove the `=` if `{ident}` is a type"),
"",
Applicability::MaybeIncorrect,
)
diff --git a/compiler/rustc_parse/src/parser/stmt.rs b/compiler/rustc_parse/src/parser/stmt.rs
index 9fcf51a04..12c267351 100644
--- a/compiler/rustc_parse/src/parser/stmt.rs
+++ b/compiler/rustc_parse/src/parser/stmt.rs
@@ -193,10 +193,9 @@ impl<'a> Parser<'a> {
/// At this point, the `!` token after the path has already been eaten.
fn parse_stmt_mac(&mut self, lo: Span, attrs: AttrVec, path: ast::Path) -> PResult<'a, Stmt> {
let args = self.parse_delim_args()?;
- let delim = args.delim.to_token();
let hi = self.prev_token.span;
- let style = match delim {
+ let style = match args.delim {
Delimiter::Brace => MacStmtStyle::Braces,
_ => MacStmtStyle::NoBraces,
};
@@ -300,7 +299,7 @@ impl<'a> Parser<'a> {
Ok(ty) => (None, Some(ty)),
Err(mut err) => {
if let Ok(snip) = self.span_to_snippet(pat.span) {
- err.span_label(pat.span, format!("while parsing the type for `{}`", snip));
+ err.span_label(pat.span, format!("while parsing the type for `{snip}`"));
}
// we use noexpect here because we don't actually expect Eq to be here
// but we are still checking for it in order to be able to handle it if
@@ -502,7 +501,7 @@ impl<'a> Parser<'a> {
fn error_block_no_opening_brace<T>(&mut self) -> PResult<'a, T> {
let tok = super::token_descr(&self.token);
- let msg = format!("expected `{{`, found {}", tok);
+ let msg = format!("expected `{{`, found {tok}");
Err(self.error_block_no_opening_brace_msg(Cow::from(msg)))
}
@@ -638,10 +637,9 @@ impl<'a> Parser<'a> {
e.span_suggestion(
sp.with_hi(sp.lo() + BytePos(marker.len() as u32)),
format!(
- "add a space before `{}` to use a regular comment",
- doc_comment_marker,
+ "add a space before `{doc_comment_marker}` to use a regular comment",
),
- format!("{} {}", comment_marker, doc_comment_marker),
+ format!("{comment_marker} {doc_comment_marker}"),
Applicability::MaybeIncorrect,
);
}
diff --git a/compiler/rustc_parse/src/parser/ty.rs b/compiler/rustc_parse/src/parser/ty.rs
index a29b696ae..2d888efb1 100644
--- a/compiler/rustc_parse/src/parser/ty.rs
+++ b/compiler/rustc_parse/src/parser/ty.rs
@@ -180,7 +180,7 @@ impl<'a> Parser<'a> {
)
}
- pub(super) fn parse_no_question_mark_recover(&mut self) -> PResult<'a, P<Ty>> {
+ pub(super) fn parse_ty_no_question_mark_recover(&mut self) -> PResult<'a, P<Ty>> {
self.parse_ty_common(
AllowPlus::Yes,
AllowCVariadic::No,
@@ -608,7 +608,7 @@ impl<'a> Parser<'a> {
/// Is a `dyn B0 + ... + Bn` type allowed here?
fn is_explicit_dyn_type(&mut self) -> bool {
self.check_keyword(kw::Dyn)
- && (self.token.uninterpolated_span().rust_2018()
+ && (self.token.uninterpolated_span().at_least_rust_2018()
|| self.look_ahead(1, |t| {
(t.can_begin_bound() || t.kind == TokenKind::BinOp(token::Star))
&& !can_continue_type_after_non_fn_ident(t)
@@ -714,6 +714,7 @@ impl<'a> Parser<'a> {
/// ```
fn parse_generic_bound(&mut self) -> PResult<'a, GenericBound> {
let lo = self.token.span;
+ let leading_token = self.prev_token.clone();
let has_parens = self.eat(&token::OpenDelim(Delimiter::Parenthesis));
let inner_lo = self.token.span;
@@ -722,7 +723,7 @@ impl<'a> Parser<'a> {
self.error_lt_bound_with_modifiers(modifiers);
self.parse_generic_lt_bound(lo, inner_lo, has_parens)?
} else {
- self.parse_generic_ty_bound(lo, has_parens, modifiers)?
+ self.parse_generic_ty_bound(lo, has_parens, modifiers, &leading_token)?
};
Ok(bound)
@@ -827,6 +828,7 @@ impl<'a> Parser<'a> {
lo: Span,
has_parens: bool,
modifiers: BoundModifiers,
+ leading_token: &Token,
) -> PResult<'a, GenericBound> {
let mut lifetime_defs = self.parse_late_bound_lifetime_defs()?;
let mut path = if self.token.is_keyword(kw::Fn)
@@ -873,18 +875,18 @@ impl<'a> Parser<'a> {
}
if has_parens {
- if self.token.is_like_plus() {
- // Someone has written something like `&dyn (Trait + Other)`. The correct code
- // would be `&(dyn Trait + Other)`, but we don't have access to the appropriate
- // span to suggest that. When written as `&dyn Trait + Other`, an appropriate
- // suggestion is given.
+ // Someone has written something like `&dyn (Trait + Other)`. The correct code
+ // would be `&(dyn Trait + Other)`
+ if self.token.is_like_plus() && leading_token.is_keyword(kw::Dyn) {
let bounds = vec![];
self.parse_remaining_bounds(bounds, true)?;
self.expect(&token::CloseDelim(Delimiter::Parenthesis))?;
- let sp = vec![lo, self.prev_token.span];
- self.sess.emit_err(errors::IncorrectBracesTraitBounds {
- span: sp,
- sugg: errors::IncorrectBracesTraitBoundsSugg { l: lo, r: self.prev_token.span },
+ self.sess.emit_err(errors::IncorrectParensTraitBounds {
+ span: vec![lo, self.prev_token.span],
+ sugg: errors::IncorrectParensTraitBoundsSugg {
+ wrong_span: leading_token.span.shrink_to_hi().to(lo),
+ new_span: leading_token.span.shrink_to_lo(),
+ },
});
} else {
self.expect(&token::CloseDelim(Delimiter::Parenthesis))?;
diff --git a/compiler/rustc_parse/src/validate_attr.rs b/compiler/rustc_parse/src/validate_attr.rs
index 928fdce31..f73965982 100644
--- a/compiler/rustc_parse/src/validate_attr.rs
+++ b/compiler/rustc_parse/src/validate_attr.rs
@@ -2,9 +2,10 @@
use crate::{errors, parse_in};
+use rustc_ast::token::Delimiter;
use rustc_ast::tokenstream::DelimSpan;
use rustc_ast::MetaItemKind;
-use rustc_ast::{self as ast, AttrArgs, AttrArgsEq, Attribute, DelimArgs, MacDelimiter, MetaItem};
+use rustc_ast::{self as ast, AttrArgs, AttrArgsEq, Attribute, DelimArgs, MetaItem};
use rustc_ast_pretty::pprust;
use rustc_errors::{Applicability, FatalError, PResult};
use rustc_feature::{AttributeTemplate, BuiltinAttribute, BUILTIN_ATTRIBUTE_MAP};
@@ -84,8 +85,8 @@ pub fn parse_meta<'a>(sess: &'a ParseSess, attr: &Attribute) -> PResult<'a, Meta
})
}
-pub fn check_meta_bad_delim(sess: &ParseSess, span: DelimSpan, delim: MacDelimiter) {
- if let ast::MacDelimiter::Parenthesis = delim {
+pub fn check_meta_bad_delim(sess: &ParseSess, span: DelimSpan, delim: Delimiter) {
+ if let Delimiter::Parenthesis = delim {
return;
}
sess.emit_err(errors::MetaBadDelim {
@@ -94,8 +95,8 @@ pub fn check_meta_bad_delim(sess: &ParseSess, span: DelimSpan, delim: MacDelimit
});
}
-pub fn check_cfg_attr_bad_delim(sess: &ParseSess, span: DelimSpan, delim: MacDelimiter) {
- if let ast::MacDelimiter::Parenthesis = delim {
+pub fn check_cfg_attr_bad_delim(sess: &ParseSess, span: DelimSpan, delim: Delimiter) {
+ if let Delimiter::Parenthesis = delim {
return;
}
sess.emit_err(errors::CfgAttrBadDelim {
@@ -157,15 +158,15 @@ fn emit_malformed_attribute(
matches!(name, sym::doc | sym::ignore | sym::inline | sym::link | sym::test | sym::bench)
};
- let error_msg = format!("malformed `{}` attribute input", name);
+ let error_msg = format!("malformed `{name}` attribute input");
let mut msg = "attribute must be of the form ".to_owned();
let mut suggestions = vec![];
let mut first = true;
let inner = if style == ast::AttrStyle::Inner { "!" } else { "" };
if template.word {
first = false;
- let code = format!("#{}[{}]", inner, name);
- msg.push_str(&format!("`{}`", &code));
+ let code = format!("#{inner}[{name}]");
+ msg.push_str(&format!("`{code}`"));
suggestions.push(code);
}
if let Some(descr) = template.list {
@@ -173,16 +174,16 @@ fn emit_malformed_attribute(
msg.push_str(" or ");
}
first = false;
- let code = format!("#{}[{}({})]", inner, name, descr);
- msg.push_str(&format!("`{}`", &code));
+ let code = format!("#{inner}[{name}({descr})]");
+ msg.push_str(&format!("`{code}`"));
suggestions.push(code);
}
if let Some(descr) = template.name_value_str {
if !first {
msg.push_str(" or ");
}
- let code = format!("#{}[{} = \"{}\"]", inner, name, descr);
- msg.push_str(&format!("`{}`", &code));
+ let code = format!("#{inner}[{name} = \"{descr}\"]");
+ msg.push_str(&format!("`{code}`"));
suggestions.push(code);
}
if should_warn(name) {
diff --git a/compiler/rustc_parse_format/src/lib.rs b/compiler/rustc_parse_format/src/lib.rs
index 7de84db21..88452ccdf 100644
--- a/compiler/rustc_parse_format/src/lib.rs
+++ b/compiler/rustc_parse_format/src/lib.rs
@@ -109,6 +109,8 @@ pub struct Argument<'a> {
pub struct FormatSpec<'a> {
/// Optionally specified character to fill alignment with.
pub fill: Option<char>,
+ /// Span of the optionally specified fill character.
+ pub fill_span: Option<InnerSpan>,
/// Optionally specified alignment.
pub align: Alignment,
/// The `+` or `-` flag.
@@ -264,7 +266,7 @@ impl<'a> Iterator for Parser<'a> {
Some(String(self.string(pos + 1)))
} else {
let arg = self.argument(lbrace_end);
- if let Some(rbrace_pos) = self.must_consume('}') {
+ if let Some(rbrace_pos) = self.consume_closing_brace(&arg) {
if self.is_source_literal {
let lbrace_byte_pos = self.to_span_index(pos);
let rbrace_byte_pos = self.to_span_index(rbrace_pos);
@@ -450,69 +452,51 @@ impl<'a> Parser<'a> {
/// Forces consumption of the specified character. If the character is not
/// found, an error is emitted.
- fn must_consume(&mut self, c: char) -> Option<usize> {
+ fn consume_closing_brace(&mut self, arg: &Argument<'_>) -> Option<usize> {
self.ws();
- if let Some(&(pos, maybe)) = self.cur.peek() {
- if c == maybe {
+ let pos;
+ let description;
+
+ if let Some(&(peek_pos, maybe)) = self.cur.peek() {
+ if maybe == '}' {
self.cur.next();
- Some(pos)
- } else {
- let pos = self.to_span_index(pos);
- let description = format!("expected `'}}'`, found `{maybe:?}`");
- let label = "expected `}`".to_owned();
- let (note, secondary_label) = if c == '}' {
- (
- Some(
- "if you intended to print `{`, you can escape it using `{{`".to_owned(),
- ),
- self.last_opening_brace
- .map(|sp| ("because of this opening brace".to_owned(), sp)),
- )
- } else {
- (None, None)
- };
- self.errors.push(ParseError {
- description,
- note,
- label,
- span: pos.to(pos),
- secondary_label,
- should_be_replaced_with_positional_argument: false,
- });
- None
+ return Some(peek_pos);
}
+
+ pos = peek_pos;
+ description = format!("expected `'}}'`, found `{maybe:?}`");
} else {
- let description = format!("expected `{c:?}` but string was terminated");
+ description = "expected `'}'` but string was terminated".to_owned();
// point at closing `"`
- let pos = self.input.len() - if self.append_newline { 1 } else { 0 };
- let pos = self.to_span_index(pos);
- if c == '}' {
- let label = format!("expected `{c:?}`");
- let (note, secondary_label) = if c == '}' {
- (
- Some(
- "if you intended to print `{`, you can escape it using `{{`".to_owned(),
- ),
- self.last_opening_brace
- .map(|sp| ("because of this opening brace".to_owned(), sp)),
- )
- } else {
- (None, None)
- };
- self.errors.push(ParseError {
- description,
- note,
- label,
- span: pos.to(pos),
- secondary_label,
- should_be_replaced_with_positional_argument: false,
- });
- } else {
- self.err(description, format!("expected `{c:?}`"), pos.to(pos));
- }
- None
+ pos = self.input.len() - if self.append_newline { 1 } else { 0 };
}
+
+ let pos = self.to_span_index(pos);
+
+ let label = "expected `'}'`".to_owned();
+ let (note, secondary_label) = if arg.format.fill == Some('}') {
+ (
+ Some("the character `'}'` is interpreted as a fill character because of the `:` that precedes it".to_owned()),
+ arg.format.fill_span.map(|sp| ("this is not interpreted as a formatting closing brace".to_owned(), sp)),
+ )
+ } else {
+ (
+ Some("if you intended to print `{`, you can escape it using `{{`".to_owned()),
+ self.last_opening_brace.map(|sp| ("because of this opening brace".to_owned(), sp)),
+ )
+ };
+
+ self.errors.push(ParseError {
+ description,
+ note,
+ label,
+ span: pos.to(pos),
+ secondary_label,
+ should_be_replaced_with_positional_argument: false,
+ });
+
+ None
}
/// Consumes all whitespace characters until the first non-whitespace character
@@ -608,6 +592,7 @@ impl<'a> Parser<'a> {
fn format(&mut self) -> FormatSpec<'a> {
let mut spec = FormatSpec {
fill: None,
+ fill_span: None,
align: AlignUnknown,
sign: None,
alternate: false,
@@ -625,9 +610,10 @@ impl<'a> Parser<'a> {
}
// fill character
- if let Some(&(_, c)) = self.cur.peek() {
+ if let Some(&(idx, c)) = self.cur.peek() {
if let Some((_, '>' | '<' | '^')) = self.cur.clone().nth(1) {
spec.fill = Some(c);
+ spec.fill_span = Some(self.span(idx, idx + 1));
self.cur.next();
}
}
@@ -722,6 +708,7 @@ impl<'a> Parser<'a> {
fn inline_asm(&mut self) -> FormatSpec<'a> {
let mut spec = FormatSpec {
fill: None,
+ fill_span: None,
align: AlignUnknown,
sign: None,
alternate: false,
diff --git a/compiler/rustc_parse_format/src/tests.rs b/compiler/rustc_parse_format/src/tests.rs
index 45314e2fb..0c594f910 100644
--- a/compiler/rustc_parse_format/src/tests.rs
+++ b/compiler/rustc_parse_format/src/tests.rs
@@ -9,6 +9,7 @@ fn same(fmt: &'static str, p: &[Piece<'static>]) {
fn fmtdflt() -> FormatSpec<'static> {
return FormatSpec {
fill: None,
+ fill_span: None,
align: AlignUnknown,
sign: None,
alternate: false,
@@ -128,6 +129,7 @@ fn format_type() {
position_span: InnerSpan { start: 2, end: 3 },
format: FormatSpec {
fill: None,
+ fill_span: None,
align: AlignUnknown,
sign: None,
alternate: false,
@@ -152,6 +154,7 @@ fn format_align_fill() {
position_span: InnerSpan { start: 2, end: 3 },
format: FormatSpec {
fill: None,
+ fill_span: None,
align: AlignRight,
sign: None,
alternate: false,
@@ -173,6 +176,7 @@ fn format_align_fill() {
position_span: InnerSpan { start: 2, end: 3 },
format: FormatSpec {
fill: Some('0'),
+ fill_span: Some(InnerSpan::new(4, 5)),
align: AlignLeft,
sign: None,
alternate: false,
@@ -194,6 +198,7 @@ fn format_align_fill() {
position_span: InnerSpan { start: 2, end: 3 },
format: FormatSpec {
fill: Some('*'),
+ fill_span: Some(InnerSpan::new(4, 5)),
align: AlignLeft,
sign: None,
alternate: false,
@@ -218,6 +223,7 @@ fn format_counts() {
position_span: InnerSpan { start: 2, end: 2 },
format: FormatSpec {
fill: None,
+ fill_span: None,
align: AlignUnknown,
sign: None,
alternate: false,
@@ -239,6 +245,7 @@ fn format_counts() {
position_span: InnerSpan { start: 2, end: 2 },
format: FormatSpec {
fill: None,
+ fill_span: None,
align: AlignUnknown,
sign: None,
alternate: false,
@@ -260,6 +267,7 @@ fn format_counts() {
position_span: InnerSpan { start: 2, end: 3 },
format: FormatSpec {
fill: None,
+ fill_span: None,
align: AlignUnknown,
sign: None,
alternate: false,
@@ -281,6 +289,7 @@ fn format_counts() {
position_span: InnerSpan { start: 2, end: 2 },
format: FormatSpec {
fill: None,
+ fill_span: None,
align: AlignUnknown,
sign: None,
alternate: false,
@@ -302,6 +311,7 @@ fn format_counts() {
position_span: InnerSpan { start: 2, end: 2 },
format: FormatSpec {
fill: None,
+ fill_span: None,
align: AlignUnknown,
sign: None,
alternate: false,
@@ -323,6 +333,7 @@ fn format_counts() {
position_span: InnerSpan { start: 2, end: 2 },
format: FormatSpec {
fill: None,
+ fill_span: None,
align: AlignUnknown,
sign: None,
alternate: false,
@@ -344,6 +355,7 @@ fn format_counts() {
position_span: InnerSpan { start: 2, end: 2 },
format: FormatSpec {
fill: None,
+ fill_span: None,
align: AlignUnknown,
sign: None,
alternate: false,
@@ -368,6 +380,7 @@ fn format_flags() {
position_span: InnerSpan { start: 2, end: 2 },
format: FormatSpec {
fill: None,
+ fill_span: None,
align: AlignUnknown,
sign: Some(Sign::Minus),
alternate: false,
@@ -389,6 +402,7 @@ fn format_flags() {
position_span: InnerSpan { start: 2, end: 2 },
format: FormatSpec {
fill: None,
+ fill_span: None,
align: AlignUnknown,
sign: Some(Sign::Plus),
alternate: true,
@@ -415,6 +429,7 @@ fn format_mixture() {
position_span: InnerSpan { start: 7, end: 8 },
format: FormatSpec {
fill: None,
+ fill_span: None,
align: AlignUnknown,
sign: None,
alternate: false,
diff --git a/compiler/rustc_passes/messages.ftl b/compiler/rustc_passes/messages.ftl
index a607e483c..6eacbebe7 100644
--- a/compiler/rustc_passes/messages.ftl
+++ b/compiler/rustc_passes/messages.ftl
@@ -98,6 +98,9 @@ passes_collapse_debuginfo =
`collapse_debuginfo` attribute should be applied to macro definitions
.label = not a macro definition
+passes_confusables = attribute should be applied to an inherent method
+ .label = not an inherent method
+
passes_const_impl_const_trait =
const `impl`s must be for traits marked with `#[const_trait]`
.note = this trait must be annotated with `#[const_trait]`
@@ -208,6 +211,17 @@ passes_doc_keyword_not_mod =
passes_doc_keyword_only_impl =
`#[doc(keyword = "...")]` should be used on impl blocks
+passes_doc_masked_not_extern_crate_self =
+ this attribute cannot be applied to an `extern crate self` item
+ .label = not applicable on `extern crate self` items
+ .extern_crate_self_label = `extern crate self` defined here
+
+passes_doc_masked_only_extern_crate =
+ this attribute can only be applied to an `extern crate` item
+ .label = only applicable on `extern crate` items
+ .not_an_extern_crate_label = not an `extern crate` item
+ .note = read <https://doc.rust-lang.org/unstable-book/language-features/doc-masked.html> for more information
+
passes_doc_test_literal = `#![doc(test(...)]` does not take a literal
passes_doc_test_takes_list =
@@ -266,6 +280,9 @@ passes_duplicate_lang_item_crate_depends =
.first_definition_path = first definition in `{$orig_crate_name}` loaded from {$orig_path}
.second_definition_path = second definition in `{$crate_name}` loaded from {$path}
+passes_empty_confusables =
+ expected at least one confusable name
+
passes_export_name =
attribute should be applied to a free function, impl method or static
.label = not a free function, impl method or static
@@ -326,6 +343,9 @@ passes_implied_feature_not_exist =
passes_incorrect_do_not_recommend_location =
`#[do_not_recommend]` can only be placed on trait implementations
+passes_incorrect_meta_item = expected a quoted string literal
+passes_incorrect_meta_item_suggestion = consider surrounding this with quotes
+
passes_incorrect_target =
`{$name}` language item must be applied to a {$kind} with {$at_least ->
[true] at least {$num}
@@ -408,6 +428,10 @@ passes_link_section =
passes_macro_export =
`#[macro_export]` only has an effect on macro definitions
+passes_macro_export_on_decl_macro =
+ `#[macro_export]` has no effect on declarative macro definitions
+ .note = declarative macros follow the same exporting rules as regular items
+
passes_macro_use =
`#[{$name}]` only has an effect on `extern crate` and modules
diff --git a/compiler/rustc_passes/src/check_attr.rs b/compiler/rustc_passes/src/check_attr.rs
index 073760f39..197b335bd 100644
--- a/compiler/rustc_passes/src/check_attr.rs
+++ b/compiler/rustc_passes/src/check_attr.rs
@@ -10,7 +10,7 @@ use rustc_data_structures::fx::FxHashMap;
use rustc_errors::{Applicability, IntoDiagnosticArg, MultiSpan};
use rustc_feature::{AttributeDuplicates, AttributeType, BuiltinAttribute, BUILTIN_ATTRIBUTE_MAP};
use rustc_hir as hir;
-use rustc_hir::def_id::LocalDefId;
+use rustc_hir::def_id::LocalModDefId;
use rustc_hir::intravisit::{self, Visitor};
use rustc_hir::{
self, FnSig, ForeignItem, HirId, Item, ItemKind, TraitItem, CRATE_HIR_ID, CRATE_OWNER_ID,
@@ -183,6 +183,7 @@ impl CheckAttrVisitor<'_> {
| sym::rustc_allowed_through_unstable_modules
| sym::rustc_promotable => self.check_stability_promotable(&attr, span, target),
sym::link_ordinal => self.check_link_ordinal(&attr, span, target),
+ sym::rustc_confusables => self.check_confusables(&attr, target),
_ => true,
};
@@ -694,7 +695,6 @@ impl CheckAttrVisitor<'_> {
| Target::GlobalAsm
| Target::TyAlias
| Target::OpaqueTy
- | Target::ImplTraitPlaceholder
| Target::Enum
| Target::Variant
| Target::Struct
@@ -878,6 +878,44 @@ impl CheckAttrVisitor<'_> {
}
}
+ fn check_doc_masked(
+ &self,
+ attr: &Attribute,
+ meta: &NestedMetaItem,
+ hir_id: HirId,
+ target: Target,
+ ) -> bool {
+ if target != Target::ExternCrate {
+ self.tcx.emit_spanned_lint(
+ INVALID_DOC_ATTRIBUTES,
+ hir_id,
+ meta.span(),
+ errors::DocMaskedOnlyExternCrate {
+ attr_span: meta.span(),
+ item_span: (attr.style == AttrStyle::Outer)
+ .then(|| self.tcx.hir().span(hir_id)),
+ },
+ );
+ return false;
+ }
+
+ if self.tcx.extern_mod_stmt_cnum(hir_id.owner).is_none() {
+ self.tcx.emit_spanned_lint(
+ INVALID_DOC_ATTRIBUTES,
+ hir_id,
+ meta.span(),
+ errors::DocMaskedNotExternCrateSelf {
+ attr_span: meta.span(),
+ item_span: (attr.style == AttrStyle::Outer)
+ .then(|| self.tcx.hir().span(hir_id)),
+ },
+ );
+ return false;
+ }
+
+ true
+ }
+
/// Checks that an attribute is *not* used at the crate level. Returns `true` if valid.
fn check_attr_not_crate_level(
&self,
@@ -1048,6 +1086,17 @@ impl CheckAttrVisitor<'_> {
is_valid = false;
}
+ sym::masked
+ if !self.check_doc_masked(
+ attr,
+ meta,
+ hir_id,
+ target,
+ ) =>
+ {
+ is_valid = false;
+ }
+
// no_default_passes: deprecated
// passes: deprecated
// plugins: removed, but rustdoc warns about it itself
@@ -1433,9 +1482,9 @@ impl CheckAttrVisitor<'_> {
};
let Some(ItemLike::Item(Item {
- kind: ItemKind::Fn(FnSig { decl, .. }, generics, _),
- ..
- })) = item else {
+ kind: ItemKind::Fn(FnSig { decl, .. }, generics, _), ..
+ })) = item
+ else {
bug!("should be a function item");
};
@@ -1986,6 +2035,46 @@ impl CheckAttrVisitor<'_> {
}
}
+ fn check_confusables(&self, attr: &Attribute, target: Target) -> bool {
+ match target {
+ Target::Method(MethodKind::Inherent) => {
+ let Some(meta) = attr.meta() else {
+ return false;
+ };
+ let ast::MetaItem { kind: MetaItemKind::List(ref metas), .. } = meta else {
+ return false;
+ };
+
+ let mut candidates = Vec::new();
+
+ for meta in metas {
+ let NestedMetaItem::Lit(meta_lit) = meta else {
+ self.tcx.sess.emit_err(errors::IncorrectMetaItem {
+ span: meta.span(),
+ suggestion: errors::IncorrectMetaItemSuggestion {
+ lo: meta.span().shrink_to_lo(),
+ hi: meta.span().shrink_to_hi(),
+ },
+ });
+ return false;
+ };
+ candidates.push(meta_lit.symbol);
+ }
+
+ if candidates.is_empty() {
+ self.tcx.sess.emit_err(errors::EmptyConfusables { span: attr.span });
+ return false;
+ }
+
+ true
+ }
+ _ => {
+ self.tcx.sess.emit_err(errors::Confusables { attr_span: attr.span });
+ false
+ }
+ }
+ }
+
fn check_deprecated(&self, hir_id: HirId, attr: &Attribute, _span: Span, target: Target) {
match target {
Target::Closure | Target::Expression | Target::Statement | Target::Arm => {
@@ -2044,6 +2133,20 @@ impl CheckAttrVisitor<'_> {
);
}
}
+ } else {
+ // special case when `#[macro_export]` is applied to a macro 2.0
+ let (macro_definition, _) =
+ self.tcx.hir().find(hir_id).unwrap().expect_item().expect_macro();
+ let is_decl_macro = !macro_definition.macro_rules;
+
+ if is_decl_macro {
+ self.tcx.emit_spanned_lint(
+ UNUSED_ATTRIBUTES,
+ hir_id,
+ attr.span,
+ errors::MacroExport::OnDeclMacro,
+ );
+ }
}
}
@@ -2107,8 +2210,12 @@ impl CheckAttrVisitor<'_> {
}
let tcx = self.tcx;
- let Some(token_stream_def_id) = tcx.get_diagnostic_item(sym::TokenStream) else { return; };
- let Some(token_stream) = tcx.type_of(token_stream_def_id).no_bound_vars() else { return; };
+ let Some(token_stream_def_id) = tcx.get_diagnostic_item(sym::TokenStream) else {
+ return;
+ };
+ let Some(token_stream) = tcx.type_of(token_stream_def_id).no_bound_vars() else {
+ return;
+ };
let def_id = hir_id.expect_owner().def_id;
let param_env = ty::ParamEnv::empty();
@@ -2117,10 +2224,10 @@ impl CheckAttrVisitor<'_> {
let ocx = ObligationCtxt::new(&infcx);
let span = tcx.def_span(def_id);
- let fresh_substs = infcx.fresh_substs_for_item(span, def_id.to_def_id());
+ let fresh_args = infcx.fresh_args_for_item(span, def_id.to_def_id());
let sig = tcx.liberate_late_bound_regions(
def_id.to_def_id(),
- tcx.fn_sig(def_id).subst(tcx, fresh_substs),
+ tcx.fn_sig(def_id).instantiate(tcx, fresh_args),
);
let mut cause = ObligationCause::misc(span, def_id);
@@ -2358,10 +2465,10 @@ fn check_non_exported_macro_for_invalid_attrs(tcx: TyCtxt<'_>, item: &Item<'_>)
}
}
-fn check_mod_attrs(tcx: TyCtxt<'_>, module_def_id: LocalDefId) {
+fn check_mod_attrs(tcx: TyCtxt<'_>, module_def_id: LocalModDefId) {
let check_attr_visitor = &mut CheckAttrVisitor { tcx, abort: Cell::new(false) };
tcx.hir().visit_item_likes_in_module(module_def_id, check_attr_visitor);
- if module_def_id.is_top_level_module() {
+ if module_def_id.to_local_def_id().is_top_level_module() {
check_attr_visitor.check_attributes(CRATE_HIR_ID, DUMMY_SP, Target::Mod, None);
check_invalid_crate_level_attr(tcx, tcx.hir().krate_attrs());
}
diff --git a/compiler/rustc_passes/src/check_const.rs b/compiler/rustc_passes/src/check_const.rs
index fc437c429..8437e9a40 100644
--- a/compiler/rustc_passes/src/check_const.rs
+++ b/compiler/rustc_passes/src/check_const.rs
@@ -9,7 +9,7 @@
use rustc_attr as attr;
use rustc_hir as hir;
-use rustc_hir::def_id::LocalDefId;
+use rustc_hir::def_id::{LocalDefId, LocalModDefId};
use rustc_hir::intravisit::{self, Visitor};
use rustc_middle::hir::nested_filter;
use rustc_middle::query::Providers;
@@ -45,7 +45,7 @@ impl NonConstExpr {
Self::Loop(ForLoop) | Self::Match(ForLoopDesugar) => &[sym::const_for],
- Self::Match(TryDesugar) => &[sym::const_try],
+ Self::Match(TryDesugar(_)) => &[sym::const_try],
// All other expressions are allowed.
Self::Loop(Loop | While) | Self::Match(Normal | FormatArgs) => &[],
@@ -55,7 +55,7 @@ impl NonConstExpr {
}
}
-fn check_mod_const_bodies(tcx: TyCtxt<'_>, module_def_id: LocalDefId) {
+fn check_mod_const_bodies(tcx: TyCtxt<'_>, module_def_id: LocalModDefId) {
let mut vis = CheckConstVisitor::new(tcx);
tcx.hir().visit_item_likes_in_module(module_def_id, &mut vis);
}
@@ -157,10 +157,8 @@ impl<'tcx> CheckConstVisitor<'tcx> {
// is a pretty narrow case, however.
if tcx.sess.is_nightly_build() {
for gate in missing_secondary {
- let note = format!(
- "add `#![feature({})]` to the crate attributes to enable",
- gate,
- );
+ let note =
+ format!("add `#![feature({gate})]` to the crate attributes to enable",);
err.help(note);
}
}
diff --git a/compiler/rustc_passes/src/dead.rs b/compiler/rustc_passes/src/dead.rs
index d5ac1cd9c..d1c3bcf38 100644
--- a/compiler/rustc_passes/src/dead.rs
+++ b/compiler/rustc_passes/src/dead.rs
@@ -4,10 +4,11 @@
use hir::def_id::{LocalDefIdMap, LocalDefIdSet};
use itertools::Itertools;
+use rustc_data_structures::unord::UnordSet;
use rustc_errors::MultiSpan;
use rustc_hir as hir;
use rustc_hir::def::{CtorOf, DefKind, Res};
-use rustc_hir::def_id::{DefId, LocalDefId};
+use rustc_hir::def_id::{DefId, LocalDefId, LocalModDefId};
use rustc_hir::intravisit::{self, Visitor};
use rustc_hir::{Node, PatKind, TyKind};
use rustc_middle::middle::codegen_fn_attrs::CodegenFnAttrFlags;
@@ -42,8 +43,16 @@ fn should_explore(tcx: TyCtxt<'_>, def_id: LocalDefId) -> bool {
)
}
+/// Determine if a work from the worklist is coming from the a `#[allow]`
+/// or a `#[expect]` of `dead_code`
+#[derive(Debug, Copy, Clone, Eq, PartialEq, Hash)]
+enum ComesFromAllowExpect {
+ Yes,
+ No,
+}
+
struct MarkSymbolVisitor<'tcx> {
- worklist: Vec<LocalDefId>,
+ worklist: Vec<(LocalDefId, ComesFromAllowExpect)>,
tcx: TyCtxt<'tcx>,
maybe_typeck_results: Option<&'tcx ty::TypeckResults<'tcx>>,
live_symbols: LocalDefIdSet,
@@ -72,7 +81,7 @@ impl<'tcx> MarkSymbolVisitor<'tcx> {
fn check_def_id(&mut self, def_id: DefId) {
if let Some(def_id) = def_id.as_local() {
if should_explore(self.tcx, def_id) || self.struct_constructors.contains_key(&def_id) {
- self.worklist.push(def_id);
+ self.worklist.push((def_id, ComesFromAllowExpect::No));
}
self.live_symbols.insert(def_id);
}
@@ -87,7 +96,7 @@ impl<'tcx> MarkSymbolVisitor<'tcx> {
fn handle_res(&mut self, res: Res) {
match res {
- Res::Def(DefKind::Const | DefKind::AssocConst | DefKind::TyAlias, def_id) => {
+ Res::Def(DefKind::Const | DefKind::AssocConst | DefKind::TyAlias { .. }, def_id) => {
self.check_def_id(def_id);
}
_ if self.in_pat => {}
@@ -269,14 +278,16 @@ impl<'tcx> MarkSymbolVisitor<'tcx> {
}
fn mark_live_symbols(&mut self) {
- let mut scanned = LocalDefIdSet::default();
- while let Some(id) = self.worklist.pop() {
- if !scanned.insert(id) {
+ let mut scanned = UnordSet::default();
+ while let Some(work) = self.worklist.pop() {
+ if !scanned.insert(work) {
continue;
}
+ let (id, comes_from_allow_expect) = work;
+
// Avoid accessing the HIR for the synthesized associated type generated for RPITITs.
- if self.tcx.opt_rpitit_info(id.to_def_id()).is_some() {
+ if self.tcx.is_impl_trait_in_trait(id.to_def_id()) {
self.live_symbols.insert(id);
continue;
}
@@ -286,7 +297,30 @@ impl<'tcx> MarkSymbolVisitor<'tcx> {
let id = self.struct_constructors.get(&id).copied().unwrap_or(id);
if let Some(node) = self.tcx.hir().find_by_def_id(id) {
- self.live_symbols.insert(id);
+ // When using `#[allow]` or `#[expect]` of `dead_code`, we do a QOL improvement
+ // by declaring fn calls, statics, ... within said items as live, as well as
+ // the item itself, although technically this is not the case.
+ //
+ // This means that the lint for said items will never be fired.
+ //
+ // This doesn't make any difference for the item declared with `#[allow]`, as
+ // the lint firing will be a nop, as it will be silenced by the `#[allow]` of
+ // the item.
+ //
+ // However, for `#[expect]`, the presence or absence of the lint is relevant,
+ // so we don't add it to the list of live symbols when it comes from a
+ // `#[expect]`. This means that we will correctly report an item as live or not
+ // for the `#[expect]` case.
+ //
+ // Note that an item can and will be duplicated on the worklist with different
+ // `ComesFromAllowExpect`, particulary if it was added from the
+ // `effective_visibilities` query or from the `#[allow]`/`#[expect]` checks,
+ // this "duplication" is essential as otherwise a function with `#[expect]`
+ // called from a `pub fn` may be falsely reported as not live, falsely
+ // triggering the `unfulfilled_lint_expectations` lint.
+ if comes_from_allow_expect != ComesFromAllowExpect::Yes {
+ self.live_symbols.insert(id);
+ }
self.visit_node(node);
}
}
@@ -304,7 +338,7 @@ impl<'tcx> MarkSymbolVisitor<'tcx> {
if let Some(trait_of) = self.tcx.trait_id_of_impl(impl_of)
&& self.tcx.has_attr(trait_of, sym::rustc_trivial_field_reads)
{
- let trait_ref = self.tcx.impl_trait_ref(impl_of).unwrap().subst_identity();
+ let trait_ref = self.tcx.impl_trait_ref(impl_of).unwrap().instantiate_identity();
if let ty::Adt(adt_def, _) = trait_ref.self_ty().kind()
&& let Some(adt_def_id) = adt_def.did().as_local()
{
@@ -353,7 +387,7 @@ impl<'tcx> MarkSymbolVisitor<'tcx> {
//// This is done to handle the case where, for example, the static
//// method of a private type is used, but the type itself is never
//// called directly.
- let self_ty = self.tcx.type_of(item).subst_identity();
+ let self_ty = self.tcx.type_of(item).instantiate_identity();
match *self_ty.kind() {
ty::Adt(def, _) => self.check_def_id(def.did()),
ty::Foreign(did) => self.check_def_id(did),
@@ -513,16 +547,20 @@ impl<'tcx> Visitor<'tcx> for MarkSymbolVisitor<'tcx> {
}
}
-fn has_allow_dead_code_or_lang_attr(tcx: TyCtxt<'_>, def_id: LocalDefId) -> bool {
+fn has_allow_dead_code_or_lang_attr(
+ tcx: TyCtxt<'_>,
+ def_id: LocalDefId,
+) -> Option<ComesFromAllowExpect> {
fn has_lang_attr(tcx: TyCtxt<'_>, def_id: LocalDefId) -> bool {
tcx.has_attr(def_id, sym::lang)
// Stable attribute for #[lang = "panic_impl"]
|| tcx.has_attr(def_id, sym::panic_handler)
}
- fn has_allow_dead_code(tcx: TyCtxt<'_>, def_id: LocalDefId) -> bool {
+ fn has_allow_expect_dead_code(tcx: TyCtxt<'_>, def_id: LocalDefId) -> bool {
let hir_id = tcx.hir().local_def_id_to_hir_id(def_id);
- tcx.lint_level_at_node(lint::builtin::DEAD_CODE, hir_id).0 == lint::Allow
+ let lint_level = tcx.lint_level_at_node(lint::builtin::DEAD_CODE, hir_id).0;
+ matches!(lint_level, lint::Allow | lint::Expect(_))
}
fn has_used_like_attr(tcx: TyCtxt<'_>, def_id: LocalDefId) -> bool {
@@ -537,9 +575,13 @@ fn has_allow_dead_code_or_lang_attr(tcx: TyCtxt<'_>, def_id: LocalDefId) -> bool
}
}
- has_allow_dead_code(tcx, def_id)
- || has_used_like_attr(tcx, def_id)
- || has_lang_attr(tcx, def_id)
+ if has_allow_expect_dead_code(tcx, def_id) {
+ Some(ComesFromAllowExpect::Yes)
+ } else if has_used_like_attr(tcx, def_id) || has_lang_attr(tcx, def_id) {
+ Some(ComesFromAllowExpect::No)
+ } else {
+ None
+ }
}
// These check_* functions seeds items that
@@ -557,21 +599,23 @@ fn has_allow_dead_code_or_lang_attr(tcx: TyCtxt<'_>, def_id: LocalDefId) -> bool
// * Implementations of traits and trait methods
fn check_item<'tcx>(
tcx: TyCtxt<'tcx>,
- worklist: &mut Vec<LocalDefId>,
+ worklist: &mut Vec<(LocalDefId, ComesFromAllowExpect)>,
struct_constructors: &mut LocalDefIdMap<LocalDefId>,
id: hir::ItemId,
) {
let allow_dead_code = has_allow_dead_code_or_lang_attr(tcx, id.owner_id.def_id);
- if allow_dead_code {
- worklist.push(id.owner_id.def_id);
+ if let Some(comes_from_allow) = allow_dead_code {
+ worklist.push((id.owner_id.def_id, comes_from_allow));
}
match tcx.def_kind(id.owner_id) {
DefKind::Enum => {
let item = tcx.hir().item(id);
if let hir::ItemKind::Enum(ref enum_def, _) = item.kind {
- if allow_dead_code {
- worklist.extend(enum_def.variants.iter().map(|variant| variant.def_id));
+ if let Some(comes_from_allow) = allow_dead_code {
+ worklist.extend(
+ enum_def.variants.iter().map(|variant| (variant.def_id, comes_from_allow)),
+ );
}
for variant in enum_def.variants {
@@ -583,7 +627,7 @@ fn check_item<'tcx>(
}
DefKind::Impl { of_trait } => {
if of_trait {
- worklist.push(id.owner_id.def_id);
+ worklist.push((id.owner_id.def_id, ComesFromAllowExpect::No));
}
// get DefIds from another query
@@ -594,8 +638,10 @@ fn check_item<'tcx>(
// And we access the Map here to get HirId from LocalDefId
for id in local_def_ids {
- if of_trait || has_allow_dead_code_or_lang_attr(tcx, id) {
- worklist.push(id);
+ if of_trait {
+ worklist.push((id, ComesFromAllowExpect::No));
+ } else if let Some(comes_from_allow) = has_allow_dead_code_or_lang_attr(tcx, id) {
+ worklist.push((id, comes_from_allow));
}
}
}
@@ -609,43 +655,59 @@ fn check_item<'tcx>(
}
DefKind::GlobalAsm => {
// global_asm! is always live.
- worklist.push(id.owner_id.def_id);
+ worklist.push((id.owner_id.def_id, ComesFromAllowExpect::No));
}
_ => {}
}
}
-fn check_trait_item(tcx: TyCtxt<'_>, worklist: &mut Vec<LocalDefId>, id: hir::TraitItemId) {
+fn check_trait_item(
+ tcx: TyCtxt<'_>,
+ worklist: &mut Vec<(LocalDefId, ComesFromAllowExpect)>,
+ id: hir::TraitItemId,
+) {
use hir::TraitItemKind::{Const, Fn};
if matches!(tcx.def_kind(id.owner_id), DefKind::AssocConst | DefKind::AssocFn) {
let trait_item = tcx.hir().trait_item(id);
if matches!(trait_item.kind, Const(_, Some(_)) | Fn(_, hir::TraitFn::Provided(_)))
- && has_allow_dead_code_or_lang_attr(tcx, trait_item.owner_id.def_id)
+ && let Some(comes_from_allow) = has_allow_dead_code_or_lang_attr(tcx, trait_item.owner_id.def_id)
{
- worklist.push(trait_item.owner_id.def_id);
+ worklist.push((trait_item.owner_id.def_id, comes_from_allow));
}
}
}
-fn check_foreign_item(tcx: TyCtxt<'_>, worklist: &mut Vec<LocalDefId>, id: hir::ForeignItemId) {
+fn check_foreign_item(
+ tcx: TyCtxt<'_>,
+ worklist: &mut Vec<(LocalDefId, ComesFromAllowExpect)>,
+ id: hir::ForeignItemId,
+) {
if matches!(tcx.def_kind(id.owner_id), DefKind::Static(_) | DefKind::Fn)
- && has_allow_dead_code_or_lang_attr(tcx, id.owner_id.def_id)
+ && let Some(comes_from_allow) = has_allow_dead_code_or_lang_attr(tcx, id.owner_id.def_id)
{
- worklist.push(id.owner_id.def_id);
+ worklist.push((id.owner_id.def_id, comes_from_allow));
}
}
-fn create_and_seed_worklist(tcx: TyCtxt<'_>) -> (Vec<LocalDefId>, LocalDefIdMap<LocalDefId>) {
+fn create_and_seed_worklist(
+ tcx: TyCtxt<'_>,
+) -> (Vec<(LocalDefId, ComesFromAllowExpect)>, LocalDefIdMap<LocalDefId>) {
let effective_visibilities = &tcx.effective_visibilities(());
// see `MarkSymbolVisitor::struct_constructors`
let mut struct_constructors = Default::default();
let mut worklist = effective_visibilities
.iter()
.filter_map(|(&id, effective_vis)| {
- effective_vis.is_public_at_level(Level::Reachable).then_some(id)
+ effective_vis
+ .is_public_at_level(Level::Reachable)
+ .then_some(id)
+ .map(|id| (id, ComesFromAllowExpect::No))
})
// Seed entry point
- .chain(tcx.entry_fn(()).and_then(|(def_id, _)| def_id.as_local()))
+ .chain(
+ tcx.entry_fn(())
+ .and_then(|(def_id, _)| def_id.as_local().map(|id| (id, ComesFromAllowExpect::No))),
+ )
.collect::<Vec<_>>();
let crate_items = tcx.hir_crate_items(());
@@ -707,7 +769,7 @@ impl<'tcx> DeadVisitor<'tcx> {
if self.live_symbols.contains(&field.did.expect_local()) {
return ShouldWarnAboutField::No;
}
- let field_type = self.tcx.type_of(field.did).subst_identity();
+ let field_type = self.tcx.type_of(field.did).instantiate_identity();
if field_type.is_phantom_data() {
return ShouldWarnAboutField::No;
}
@@ -861,7 +923,7 @@ impl<'tcx> DeadVisitor<'tcx> {
| DefKind::Fn
| DefKind::Static(_)
| DefKind::Const
- | DefKind::TyAlias
+ | DefKind::TyAlias { .. }
| DefKind::Enum
| DefKind::Union
| DefKind::ForeignTy => self.warn_dead_code(def_id, "used"),
@@ -878,13 +940,11 @@ impl<'tcx> DeadVisitor<'tcx> {
return true;
};
- self.live_symbols.contains(&def_id)
- || has_allow_dead_code_or_lang_attr(self.tcx, def_id)
- || name.as_str().starts_with('_')
+ self.live_symbols.contains(&def_id) || name.as_str().starts_with('_')
}
}
-fn check_mod_deathness(tcx: TyCtxt<'_>, module: LocalDefId) {
+fn check_mod_deathness(tcx: TyCtxt<'_>, module: LocalModDefId) {
let (live_symbols, ignored_derived_traits) = tcx.live_symbols_and_ignored_derived_traits(());
let mut visitor = DeadVisitor { tcx, live_symbols, ignored_derived_traits };
@@ -909,7 +969,7 @@ fn check_mod_deathness(tcx: TyCtxt<'_>, module: LocalDefId) {
if !live_symbols.contains(&item.owner_id.def_id) {
let parent = tcx.local_parent(item.owner_id.def_id);
- if parent != module && !live_symbols.contains(&parent) {
+ if parent != module.to_local_def_id() && !live_symbols.contains(&parent) {
// We already have diagnosed something.
continue;
}
diff --git a/compiler/rustc_passes/src/entry.rs b/compiler/rustc_passes/src/entry.rs
index ffd8f77b7..4f71704b8 100644
--- a/compiler/rustc_passes/src/entry.rs
+++ b/compiler/rustc_passes/src/entry.rs
@@ -31,7 +31,7 @@ struct EntryContext<'tcx> {
}
fn entry_fn(tcx: TyCtxt<'_>, (): ()) -> Option<(DefId, EntryFnType)> {
- let any_exe = tcx.sess.crate_types().iter().any(|ty| *ty == CrateType::Executable);
+ let any_exe = tcx.crate_types().iter().any(|ty| *ty == CrateType::Executable);
if !any_exe {
// No need to find a main function.
return None;
@@ -187,12 +187,6 @@ fn sigpipe(tcx: TyCtxt<'_>, def_id: DefId) -> u8 {
fn no_main_err(tcx: TyCtxt<'_>, visitor: &EntryContext<'_>) {
let sp = tcx.def_span(CRATE_DEF_ID);
- if tcx.sess.parse_sess.reached_eof.load(rustc_data_structures::sync::Ordering::Relaxed) {
- // There's an unclosed brace that made the parser reach `Eof`, we shouldn't complain about
- // the missing `fn main()` then as it might have been hidden inside an unclosed block.
- tcx.sess.delay_span_bug(sp, "`main` not found, but expected unclosed brace error");
- return;
- }
// There is no main function.
let mut has_filename = true;
diff --git a/compiler/rustc_passes/src/errors.rs b/compiler/rustc_passes/src/errors.rs
index 3fe7feb9d..683717344 100644
--- a/compiler/rustc_passes/src/errors.rs
+++ b/compiler/rustc_passes/src/errors.rs
@@ -267,6 +267,25 @@ pub struct DocInlineOnlyUse {
pub item_span: Option<Span>,
}
+#[derive(LintDiagnostic)]
+#[diag(passes_doc_masked_only_extern_crate)]
+#[note]
+pub struct DocMaskedOnlyExternCrate {
+ #[label]
+ pub attr_span: Span,
+ #[label(passes_not_an_extern_crate_label)]
+ pub item_span: Option<Span>,
+}
+
+#[derive(LintDiagnostic)]
+#[diag(passes_doc_masked_not_extern_crate_self)]
+pub struct DocMaskedNotExternCrateSelf {
+ #[label]
+ pub attr_span: Span,
+ #[label(passes_extern_crate_self_label)]
+ pub item_span: Option<Span>,
+}
+
#[derive(Diagnostic)]
#[diag(passes_doc_attr_not_crate_level)]
pub struct DocAttrNotCrateLevel<'a> {
@@ -618,6 +637,38 @@ pub struct LinkOrdinal {
}
#[derive(Diagnostic)]
+#[diag(passes_confusables)]
+pub struct Confusables {
+ #[primary_span]
+ pub attr_span: Span,
+}
+
+#[derive(Diagnostic)]
+#[diag(passes_empty_confusables)]
+pub(crate) struct EmptyConfusables {
+ #[primary_span]
+ pub span: Span,
+}
+
+#[derive(Diagnostic)]
+#[diag(passes_incorrect_meta_item, code = "E0539")]
+pub(crate) struct IncorrectMetaItem {
+ #[primary_span]
+ pub span: Span,
+ #[subdiagnostic]
+ pub suggestion: IncorrectMetaItemSuggestion,
+}
+
+#[derive(Subdiagnostic)]
+#[multipart_suggestion(passes_incorrect_meta_item_suggestion, applicability = "maybe-incorrect")]
+pub(crate) struct IncorrectMetaItemSuggestion {
+ #[suggestion_part(code = "\"")]
+ pub lo: Span,
+ #[suggestion_part(code = "\"")]
+ pub hi: Span,
+}
+
+#[derive(Diagnostic)]
#[diag(passes_stability_promotable)]
pub struct StabilityPromotable {
#[primary_span]
@@ -639,6 +690,10 @@ pub enum MacroExport {
#[diag(passes_macro_export)]
Normal,
+ #[diag(passes_macro_export_on_decl_macro)]
+ #[note]
+ OnDeclMacro,
+
#[diag(passes_invalid_macro_export_arguments)]
UnknownItem { name: Symbol },
diff --git a/compiler/rustc_passes/src/hir_id_validator.rs b/compiler/rustc_passes/src/hir_id_validator.rs
index 363e17436..f825363ae 100644
--- a/compiler/rustc_passes/src/hir_id_validator.rs
+++ b/compiler/rustc_passes/src/hir_id_validator.rs
@@ -89,9 +89,8 @@ impl<'a, 'hir> HirIdValidator<'a, 'hir> {
self.error(|| {
format!(
- "ItemLocalIds not assigned densely in {}. \
- Max ItemLocalId = {}, missing IDs = {:#?}; seen IDs = {:#?}",
- pretty_owner, max, missing_items, seen_items
+ "ItemLocalIds not assigned densely in {pretty_owner}. \
+ Max ItemLocalId = {max}, missing IDs = {missing_items:#?}; seen IDs = {seen_items:#?}"
)
});
}
diff --git a/compiler/rustc_passes/src/hir_stats.rs b/compiler/rustc_passes/src/hir_stats.rs
index 6c748147a..5aa8aef6a 100644
--- a/compiler/rustc_passes/src/hir_stats.rs
+++ b/compiler/rustc_passes/src/hir_stats.rs
@@ -126,12 +126,12 @@ impl<'k> StatCollector<'k> {
let total_size = nodes.iter().map(|(_, node)| node.stats.count * node.stats.size).sum();
- eprintln!("{} {}", prefix, title);
+ eprintln!("{prefix} {title}");
eprintln!(
"{} {:<18}{:>18}{:>14}{:>14}",
prefix, "Name", "Accumulated Size", "Count", "Item Size"
);
- eprintln!("{} ----------------------------------------------------------------", prefix);
+ eprintln!("{prefix} ----------------------------------------------------------------");
let percent = |m, n| (m * 100) as f64 / n as f64;
@@ -163,9 +163,9 @@ impl<'k> StatCollector<'k> {
}
}
}
- eprintln!("{} ----------------------------------------------------------------", prefix);
+ eprintln!("{prefix} ----------------------------------------------------------------");
eprintln!("{} {:<18}{:>10}", prefix, "Total", to_readable_str(total_size));
- eprintln!("{}", prefix);
+ eprintln!("{prefix}");
}
}
diff --git a/compiler/rustc_passes/src/layout_test.rs b/compiler/rustc_passes/src/layout_test.rs
index 098107f8f..a7a8af864 100644
--- a/compiler/rustc_passes/src/layout_test.rs
+++ b/compiler/rustc_passes/src/layout_test.rs
@@ -16,7 +16,7 @@ pub fn test_layout(tcx: TyCtxt<'_>) {
for id in tcx.hir().items() {
if matches!(
tcx.def_kind(id.owner_id),
- DefKind::TyAlias | DefKind::Enum | DefKind::Struct | DefKind::Union
+ DefKind::TyAlias { .. } | DefKind::Enum | DefKind::Struct | DefKind::Union
) {
for attr in tcx.get_attrs(id.owner_id, sym::rustc_layout) {
dump_layout_of(tcx, id.owner_id.def_id, attr);
@@ -27,9 +27,8 @@ pub fn test_layout(tcx: TyCtxt<'_>) {
}
fn dump_layout_of(tcx: TyCtxt<'_>, item_def_id: LocalDefId, attr: &Attribute) {
- let tcx = tcx;
let param_env = tcx.param_env(item_def_id);
- let ty = tcx.type_of(item_def_id).subst_identity();
+ let ty = tcx.type_of(item_def_id).instantiate_identity();
match tcx.layout_of(param_env.and(ty)) {
Ok(ty_layout) => {
// Check out the `#[rustc_layout(..)]` attribute to tell what to dump.
diff --git a/compiler/rustc_passes/src/liveness.rs b/compiler/rustc_passes/src/liveness.rs
index 803ca05b2..20e996eae 100644
--- a/compiler/rustc_passes/src/liveness.rs
+++ b/compiler/rustc_passes/src/liveness.rs
@@ -605,7 +605,7 @@ impl<'a, 'tcx> Liveness<'a, 'tcx> {
for var_idx in 0..self.ir.var_kinds.len() {
let var = Variable::from(var_idx);
if test(var) {
- write!(wr, " {:?}", var)?;
+ write!(wr, " {var:?}")?;
}
}
Ok(())
@@ -747,7 +747,7 @@ impl<'a, 'tcx> Liveness<'a, 'tcx> {
let ty = self.typeck_results.node_type(hir_id);
match ty.kind() {
- ty::Closure(_def_id, substs) => match substs.as_closure().kind() {
+ ty::Closure(_def_id, args) => match args.as_closure().kind() {
ty::ClosureKind::Fn => {}
ty::ClosureKind::FnMut => {}
ty::ClosureKind::FnOnce => return succ,
@@ -1061,7 +1061,7 @@ impl<'a, 'tcx> Liveness<'a, 'tcx> {
self.propagate_through_expr(&l, ln)
}
- hir::ExprKind::Index(ref l, ref r) | hir::ExprKind::Binary(_, ref l, ref r) => {
+ hir::ExprKind::Index(ref l, ref r, _) | hir::ExprKind::Binary(_, ref l, ref r) => {
let r_succ = self.propagate_through_expr(&r, succ);
self.propagate_through_expr(&l, r_succ)
}
@@ -1105,7 +1105,6 @@ impl<'a, 'tcx> Liveness<'a, 'tcx> {
}
// Then do a second pass for inputs
- let mut succ = succ;
for (op, _op_sp) in asm.operands.iter().rev() {
match op {
hir::InlineAsmOperand::In { expr, .. } => {
@@ -1683,12 +1682,16 @@ impl<'tcx> Liveness<'_, 'tcx> {
opt_body: Option<&hir::Body<'_>>,
) -> Vec<errors::UnusedVariableStringInterp> {
let mut suggs = Vec::new();
- let Some(opt_body) = opt_body else { return suggs; };
+ let Some(opt_body) = opt_body else {
+ return suggs;
+ };
let mut visitor = CollectLitsVisitor { lit_exprs: vec![] };
intravisit::walk_body(&mut visitor, opt_body);
for lit_expr in visitor.lit_exprs {
let hir::ExprKind::Lit(litx) = &lit_expr.kind else { continue };
- let rustc_ast::LitKind::Str(syb, _) = litx.node else{ continue; };
+ let rustc_ast::LitKind::Str(syb, _) = litx.node else {
+ continue;
+ };
let name_str: &str = syb.as_str();
let name_pa = format!("{{{name}}}");
if name_str.contains(&name_pa) {
diff --git a/compiler/rustc_passes/src/loops.rs b/compiler/rustc_passes/src/loops.rs
index 7c64df6a5..0aaf85086 100644
--- a/compiler/rustc_passes/src/loops.rs
+++ b/compiler/rustc_passes/src/loops.rs
@@ -1,7 +1,7 @@
use Context::*;
use rustc_hir as hir;
-use rustc_hir::def_id::LocalDefId;
+use rustc_hir::def_id::LocalModDefId;
use rustc_hir::intravisit::{self, Visitor};
use rustc_hir::{Destination, Movability, Node};
use rustc_middle::hir::map::Map;
@@ -34,7 +34,7 @@ struct CheckLoopVisitor<'a, 'hir> {
cx: Context,
}
-fn check_mod_loops(tcx: TyCtxt<'_>, module_def_id: LocalDefId) {
+fn check_mod_loops(tcx: TyCtxt<'_>, module_def_id: LocalModDefId) {
tcx.hir().visit_item_likes_in_module(
module_def_id,
&mut CheckLoopVisitor { sess: &tcx.sess, hir_map: tcx.hir(), cx: Normal },
diff --git a/compiler/rustc_passes/src/naked_functions.rs b/compiler/rustc_passes/src/naked_functions.rs
index 769b38900..7f36c59ad 100644
--- a/compiler/rustc_passes/src/naked_functions.rs
+++ b/compiler/rustc_passes/src/naked_functions.rs
@@ -3,7 +3,7 @@
use rustc_ast::InlineAsmOptions;
use rustc_hir as hir;
use rustc_hir::def::DefKind;
-use rustc_hir::def_id::LocalDefId;
+use rustc_hir::def_id::{LocalDefId, LocalModDefId};
use rustc_hir::intravisit::Visitor;
use rustc_hir::{ExprKind, InlineAsmOperand, StmtKind};
use rustc_middle::query::Providers;
@@ -23,7 +23,7 @@ pub(crate) fn provide(providers: &mut Providers) {
*providers = Providers { check_mod_naked_functions, ..*providers };
}
-fn check_mod_naked_functions(tcx: TyCtxt<'_>, module_def_id: LocalDefId) {
+fn check_mod_naked_functions(tcx: TyCtxt<'_>, module_def_id: LocalModDefId) {
let items = tcx.hir_module_items(module_def_id);
for def_id in items.definitions() {
if !matches!(tcx.def_kind(def_id), DefKind::Fn | DefKind::AssocFn) {
diff --git a/compiler/rustc_passes/src/reachable.rs b/compiler/rustc_passes/src/reachable.rs
index 160528e40..e62833b35 100644
--- a/compiler/rustc_passes/src/reachable.rs
+++ b/compiler/rustc_passes/src/reachable.rs
@@ -98,15 +98,11 @@ impl<'tcx> Visitor<'tcx> for ReachableContext<'tcx> {
self.worklist.push(def_id);
} else {
match res {
- // If this path leads to a constant, then we need to
- // recurse into the constant to continue finding
- // items that are reachable.
- Res::Def(DefKind::Const | DefKind::AssocConst, _) => {
+ // Reachable constants and reachable statics can have their contents inlined
+ // into other crates. Mark them as reachable and recurse into their body.
+ Res::Def(DefKind::Const | DefKind::AssocConst | DefKind::Static(_), _) => {
self.worklist.push(def_id);
}
-
- // If this wasn't a static, then the destination is
- // surely reachable.
_ => {
self.reachable_symbols.insert(def_id);
}
@@ -236,7 +232,7 @@ impl<'tcx> ReachableContext<'tcx> {
// Reachable constants will be inlined into other crates
// unconditionally, so we need to make sure that their
// contents are also reachable.
- hir::ItemKind::Const(_, init) | hir::ItemKind::Static(_, _, init) => {
+ hir::ItemKind::Const(_, _, init) | hir::ItemKind::Static(_, _, init) => {
self.visit_nested_body(init);
}
@@ -364,10 +360,10 @@ fn has_custom_linkage(tcx: TyCtxt<'_>, def_id: LocalDefId) -> bool {
fn reachable_set(tcx: TyCtxt<'_>, (): ()) -> LocalDefIdSet {
let effective_visibilities = &tcx.effective_visibilities(());
- let any_library =
- tcx.sess.crate_types().iter().any(|ty| {
- *ty == CrateType::Rlib || *ty == CrateType::Dylib || *ty == CrateType::ProcMacro
- });
+ let any_library = tcx
+ .crate_types()
+ .iter()
+ .any(|ty| *ty == CrateType::Rlib || *ty == CrateType::Dylib || *ty == CrateType::ProcMacro);
let mut reachable_context = ReachableContext {
tcx,
maybe_typeck_results: None,
diff --git a/compiler/rustc_passes/src/stability.rs b/compiler/rustc_passes/src/stability.rs
index b81b7ad60..9c265e8ec 100644
--- a/compiler/rustc_passes/src/stability.rs
+++ b/compiler/rustc_passes/src/stability.rs
@@ -9,7 +9,7 @@ use rustc_attr::{
use rustc_data_structures::fx::{FxHashMap, FxHashSet, FxIndexMap};
use rustc_hir as hir;
use rustc_hir::def::{DefKind, Res};
-use rustc_hir::def_id::{LocalDefId, CRATE_DEF_ID};
+use rustc_hir::def_id::{LocalDefId, LocalModDefId, CRATE_DEF_ID};
use rustc_hir::hir_id::CRATE_HIR_ID;
use rustc_hir::intravisit::{self, Visitor};
use rustc_hir::{FieldDef, Item, ItemKind, TraitRef, Ty, TyKind, Variant};
@@ -115,7 +115,7 @@ impl<'a, 'tcx> Annotator<'a, 'tcx> {
let attrs = self.tcx.hir().attrs(self.tcx.hir().local_def_id_to_hir_id(def_id));
debug!("annotate(id = {:?}, attrs = {:?})", def_id, attrs);
- let depr = attr::find_deprecation(&self.tcx.sess, attrs);
+ let depr = attr::find_deprecation(self.tcx.sess, self.tcx.features(), attrs);
let mut is_deprecated = false;
if let Some((depr, span)) = &depr {
is_deprecated = true;
@@ -682,7 +682,7 @@ fn stability_index(tcx: TyCtxt<'_>, (): ()) -> Index {
/// Cross-references the feature names of unstable APIs with enabled
/// features and possibly prints errors.
-fn check_mod_unstable_api_usage(tcx: TyCtxt<'_>, module_def_id: LocalDefId) {
+fn check_mod_unstable_api_usage(tcx: TyCtxt<'_>, module_def_id: LocalModDefId) {
tcx.hir().visit_item_likes_in_module(module_def_id, &mut Checker { tcx });
}
@@ -732,13 +732,7 @@ impl<'tcx> Visitor<'tcx> for Checker<'tcx> {
// For implementations of traits, check the stability of each item
// individually as it's possible to have a stable trait with unstable
// items.
- hir::ItemKind::Impl(hir::Impl {
- of_trait: Some(ref t),
- self_ty,
- items,
- constness,
- ..
- }) => {
+ hir::ItemKind::Impl(hir::Impl { of_trait: Some(ref t), self_ty, items, .. }) => {
let features = self.tcx.features();
if features.staged_api {
let attrs = self.tcx.hir().attrs(item.hir_id());
@@ -769,7 +763,7 @@ impl<'tcx> Visitor<'tcx> for Checker<'tcx> {
// `#![feature(const_trait_impl)]` is unstable, so any impl declared stable
// needs to have an error emitted.
if features.const_trait_impl
- && *constness == hir::Constness::Const
+ && self.tcx.is_const_trait_impl_raw(item.owner_id.to_def_id())
&& const_stab.is_some_and(|(stab, _)| stab.is_const_stable())
{
self.tcx.sess.emit_err(errors::TraitImplConstStable { span: item.span });
@@ -856,7 +850,9 @@ impl<'tcx> Visitor<'tcx> for Checker<'tcx> {
/// See issue #94972 for details on why this is a special case
fn is_unstable_reexport(tcx: TyCtxt<'_>, id: hir::HirId) -> bool {
// Get the LocalDefId so we can lookup the item to check the kind.
- let Some(owner) = id.as_owner() else { return false; };
+ let Some(owner) = id.as_owner() else {
+ return false;
+ };
let def_id = owner.def_id;
let Some(stab) = tcx.stability().local_stability(def_id) else {
diff --git a/compiler/rustc_passes/src/weak_lang_items.rs b/compiler/rustc_passes/src/weak_lang_items.rs
index fc6372cf9..75e071f1f 100644
--- a/compiler/rustc_passes/src/weak_lang_items.rs
+++ b/compiler/rustc_passes/src/weak_lang_items.rs
@@ -43,7 +43,7 @@ pub fn check_crate(tcx: TyCtxt<'_>, items: &mut lang_items::LanguageItems) {
fn verify(tcx: TyCtxt<'_>, items: &lang_items::LanguageItems) {
// We only need to check for the presence of weak lang items if we're
// emitting something that's not an rlib.
- let needs_check = tcx.sess.crate_types().iter().any(|kind| match *kind {
+ let needs_check = tcx.crate_types().iter().any(|kind| match *kind {
CrateType::Dylib
| CrateType::ProcMacro
| CrateType::Cdylib
diff --git a/compiler/rustc_privacy/src/lib.rs b/compiler/rustc_privacy/src/lib.rs
index 4fcee9396..0eb344ba6 100644
--- a/compiler/rustc_privacy/src/lib.rs
+++ b/compiler/rustc_privacy/src/lib.rs
@@ -20,7 +20,7 @@ use rustc_errors::{DiagnosticMessage, SubdiagnosticMessage};
use rustc_fluent_macro::fluent_messages;
use rustc_hir as hir;
use rustc_hir::def::{DefKind, Res};
-use rustc_hir::def_id::{DefId, LocalDefId, CRATE_DEF_ID};
+use rustc_hir::def_id::{DefId, LocalDefId, LocalModDefId, CRATE_DEF_ID};
use rustc_hir::intravisit::{self, Visitor};
use rustc_hir::{AssocItemKind, ForeignItemKind, HirIdSet, ItemId, Node, PatKind};
use rustc_middle::bug;
@@ -28,7 +28,7 @@ use rustc_middle::hir::nested_filter;
use rustc_middle::middle::privacy::{EffectiveVisibilities, EffectiveVisibility, Level};
use rustc_middle::query::Providers;
use rustc_middle::span_bug;
-use rustc_middle::ty::subst::InternalSubsts;
+use rustc_middle::ty::GenericArgs;
use rustc_middle::ty::{self, Const, GenericParamDefKind};
use rustc_middle::ty::{TraitRef, Ty, TyCtxt, TypeSuperVisitable, TypeVisitable, TypeVisitor};
use rustc_session::lint;
@@ -129,37 +129,25 @@ where
V: DefIdVisitor<'tcx> + ?Sized,
{
fn visit_trait(&mut self, trait_ref: TraitRef<'tcx>) -> ControlFlow<V::BreakTy> {
- let TraitRef { def_id, substs, .. } = trait_ref;
+ let TraitRef { def_id, args, .. } = trait_ref;
self.def_id_visitor.visit_def_id(def_id, "trait", &trait_ref.print_only_trait_path())?;
- if V::SHALLOW { ControlFlow::Continue(()) } else { substs.visit_with(self) }
+ if V::SHALLOW { ControlFlow::Continue(()) } else { args.visit_with(self) }
}
fn visit_projection_ty(&mut self, projection: ty::AliasTy<'tcx>) -> ControlFlow<V::BreakTy> {
let tcx = self.def_id_visitor.tcx();
- let (trait_ref, assoc_substs) = if tcx.def_kind(projection.def_id)
- != DefKind::ImplTraitPlaceholder
- {
- projection.trait_ref_and_own_substs(tcx)
- } else {
- // HACK(RPITIT): Remove this when RPITITs are lowered to regular assoc tys
- let def_id = tcx.impl_trait_in_trait_parent_fn(projection.def_id);
- let trait_generics = tcx.generics_of(def_id);
- (
- ty::TraitRef::new(tcx, def_id, projection.substs.truncate_to(tcx, trait_generics)),
- &projection.substs[trait_generics.count()..],
- )
- };
+ let (trait_ref, assoc_args) = projection.trait_ref_and_own_args(tcx);
self.visit_trait(trait_ref)?;
if V::SHALLOW {
ControlFlow::Continue(())
} else {
- assoc_substs.iter().try_for_each(|subst| subst.visit_with(self))
+ assoc_args.iter().try_for_each(|subst| subst.visit_with(self))
}
}
fn visit_clause(&mut self, clause: ty::Clause<'tcx>) -> ControlFlow<V::BreakTy> {
match clause.kind().skip_binder() {
- ty::ClauseKind::Trait(ty::TraitPredicate { trait_ref, constness: _, polarity: _ }) => {
+ ty::ClauseKind::Trait(ty::TraitPredicate { trait_ref, polarity: _ }) => {
self.visit_trait(trait_ref)
}
ty::ClauseKind::Projection(ty::ProjectionPredicate { projection_ty, term }) => {
@@ -190,7 +178,7 @@ where
fn visit_ty(&mut self, ty: Ty<'tcx>) -> ControlFlow<V::BreakTy> {
let tcx = self.def_id_visitor.tcx();
- // InternalSubsts are not visited here because they are visited below
+ // GenericArgs are not visited here because they are visited below
// in `super_visit_with`.
match *ty.kind() {
ty::Adt(ty::AdtDef(Interned(&ty::AdtDefData { did: def_id, .. }, _)), ..)
@@ -206,16 +194,16 @@ where
// Something like `fn() -> Priv {my_func}` is considered a private type even if
// `my_func` is public, so we need to visit signatures.
if let ty::FnDef(..) = ty.kind() {
- // FIXME: this should probably use `substs` from `FnDef`
- tcx.fn_sig(def_id).subst_identity().visit_with(self)?;
+ // FIXME: this should probably use `args` from `FnDef`
+ tcx.fn_sig(def_id).instantiate_identity().visit_with(self)?;
}
- // Inherent static methods don't have self type in substs.
+ // Inherent static methods don't have self type in args.
// Something like `fn() {my_method}` type of the method
// `impl Pub<Priv> { pub fn my_method() {} }` is considered a private type,
// so we need to visit the self type additionally.
if let Some(assoc_item) = tcx.opt_associated_item(def_id) {
if let Some(impl_def_id) = assoc_item.impl_container(tcx) {
- tcx.type_of(impl_def_id).subst_identity().visit_with(self)?;
+ tcx.type_of(impl_def_id).instantiate_identity().visit_with(self)?;
}
}
}
@@ -231,7 +219,7 @@ where
// free type aliases, but this isn't done yet.
return ControlFlow::Continue(());
}
- // This will also visit substs if necessary, so we don't need to recurse.
+ // This will also visit args if necessary, so we don't need to recurse.
return self.visit_projection_ty(proj);
}
ty::Alias(ty::Inherent, data) => {
@@ -250,11 +238,11 @@ where
&LazyDefPathStr { def_id: data.def_id, tcx },
)?;
- // This will also visit substs if necessary, so we don't need to recurse.
+ // This will also visit args if necessary, so we don't need to recurse.
return if V::SHALLOW {
ControlFlow::Continue(())
} else {
- data.substs.iter().try_for_each(|subst| subst.visit_with(self))
+ data.args.iter().try_for_each(|subst| subst.visit_with(self))
};
}
ty::Dynamic(predicates, ..) => {
@@ -265,10 +253,10 @@ where
ty::ExistentialPredicate::Trait(trait_ref) => trait_ref,
ty::ExistentialPredicate::Projection(proj) => proj.trait_ref(tcx),
ty::ExistentialPredicate::AutoTrait(def_id) => {
- ty::ExistentialTraitRef { def_id, substs: InternalSubsts::empty() }
+ ty::ExistentialTraitRef { def_id, args: GenericArgs::empty() }
}
};
- let ty::ExistentialTraitRef { def_id, substs: _ } = trait_ref;
+ let ty::ExistentialTraitRef { def_id, args: _ } = trait_ref;
self.def_id_visitor.visit_def_id(def_id, "trait", &trait_ref)?;
}
}
@@ -369,9 +357,9 @@ trait VisibilityLike: Sized {
effective_visibilities: &EffectiveVisibilities,
) -> Self {
let mut find = FindMin::<_, SHALLOW> { tcx, effective_visibilities, min: Self::MAX };
- find.visit(tcx.type_of(def_id).subst_identity());
+ find.visit(tcx.type_of(def_id).instantiate_identity());
if let Some(trait_ref) = tcx.impl_trait_ref(def_id) {
- find.visit_trait(trait_ref.subst_identity());
+ find.visit_trait(trait_ref.instantiate_identity());
}
find.min
}
@@ -394,8 +382,9 @@ impl VisibilityLike for EffectiveVisibility {
) -> Self {
let effective_vis =
find.effective_visibilities.effective_vis(def_id).copied().unwrap_or_else(|| {
- let private_vis =
- ty::Visibility::Restricted(find.tcx.parent_module_from_def_id(def_id));
+ let private_vis = ty::Visibility::Restricted(
+ find.tcx.parent_module_from_def_id(def_id).to_local_def_id(),
+ );
EffectiveVisibility::from_vis(private_vis)
});
@@ -424,7 +413,7 @@ struct EmbargoVisitor<'tcx> {
/// pub macro m() {
/// n::p::f()
/// }
- macro_reachable: FxHashSet<(LocalDefId, LocalDefId)>,
+ macro_reachable: FxHashSet<(LocalModDefId, LocalModDefId)>,
/// Preliminary pass for marking all underlying types of `impl Trait`s as reachable.
impl_trait_pass: bool,
/// Has something changed in the level map?
@@ -461,7 +450,9 @@ impl<'tcx> EmbargoVisitor<'tcx> {
max_vis: Option<ty::Visibility>,
level: Level,
) {
- let private_vis = ty::Visibility::Restricted(self.tcx.parent_module_from_def_id(def_id));
+ // FIXME(typed_def_id): Make `Visibility::Restricted` use a `LocalModDefId` by default.
+ let private_vis =
+ ty::Visibility::Restricted(self.tcx.parent_module_from_def_id(def_id).into());
if max_vis != Some(private_vis) {
self.changed |= self.effective_visibilities.update(
def_id,
@@ -520,6 +511,8 @@ impl<'tcx> EmbargoVisitor<'tcx> {
// The macro's parent doesn't correspond to a `mod`, return early (#63164, #65252).
return;
}
+ // FIXME(typed_def_id): Introduce checked constructors that check def_kind.
+ let macro_module_def_id = LocalModDefId::new_unchecked(macro_module_def_id);
if self.effective_visibilities.public_at_level(local_def_id).is_none() {
return;
@@ -531,10 +524,10 @@ impl<'tcx> EmbargoVisitor<'tcx> {
loop {
let changed_reachability =
self.update_macro_reachable(module_def_id, macro_module_def_id, macro_ev);
- if changed_reachability || module_def_id == CRATE_DEF_ID {
+ if changed_reachability || module_def_id == LocalModDefId::CRATE_DEF_ID {
break;
}
- module_def_id = self.tcx.local_parent(module_def_id);
+ module_def_id = LocalModDefId::new_unchecked(self.tcx.local_parent(module_def_id));
}
}
@@ -542,8 +535,8 @@ impl<'tcx> EmbargoVisitor<'tcx> {
/// module. Returns `true` if the level has changed.
fn update_macro_reachable(
&mut self,
- module_def_id: LocalDefId,
- defining_mod: LocalDefId,
+ module_def_id: LocalModDefId,
+ defining_mod: LocalModDefId,
macro_ev: EffectiveVisibility,
) -> bool {
if self.macro_reachable.insert((module_def_id, defining_mod)) {
@@ -556,8 +549,8 @@ impl<'tcx> EmbargoVisitor<'tcx> {
fn update_macro_reachable_mod(
&mut self,
- module_def_id: LocalDefId,
- defining_mod: LocalDefId,
+ module_def_id: LocalModDefId,
+ defining_mod: LocalModDefId,
macro_ev: EffectiveVisibility,
) {
let module = self.tcx.hir().get_module(module_def_id).0;
@@ -572,7 +565,7 @@ impl<'tcx> EmbargoVisitor<'tcx> {
macro_ev,
);
}
- for child in self.tcx.module_children_local(module_def_id) {
+ for child in self.tcx.module_children_local(module_def_id.to_local_def_id()) {
// FIXME: Use module children for the logic above too.
if !child.reexport_chain.is_empty()
&& child.vis.is_accessible_from(defining_mod, self.tcx)
@@ -589,13 +582,13 @@ impl<'tcx> EmbargoVisitor<'tcx> {
def_id: LocalDefId,
def_kind: DefKind,
vis: ty::Visibility,
- module: LocalDefId,
+ module: LocalModDefId,
macro_ev: EffectiveVisibility,
) {
self.update(def_id, macro_ev, Level::Reachable);
match def_kind {
// No type privacy, so can be directly marked as reachable.
- DefKind::Const | DefKind::Static(_) | DefKind::TraitAlias | DefKind::TyAlias => {
+ DefKind::Const | DefKind::Static(_) | DefKind::TraitAlias | DefKind::TyAlias { .. } => {
if vis.is_accessible_from(module, self.tcx) {
self.update(def_id, macro_ev, Level::Reachable);
}
@@ -620,7 +613,11 @@ impl<'tcx> EmbargoVisitor<'tcx> {
// the module, however may be reachable.
DefKind::Mod => {
if vis.is_accessible_from(module, self.tcx) {
- self.update_macro_reachable(def_id, module, macro_ev);
+ self.update_macro_reachable(
+ LocalModDefId::new_unchecked(def_id),
+ module,
+ macro_ev,
+ );
}
}
@@ -651,7 +648,6 @@ impl<'tcx> EmbargoVisitor<'tcx> {
| DefKind::ForeignTy
| DefKind::Fn
| DefKind::OpaqueTy
- | DefKind::ImplTraitPlaceholder
| DefKind::AssocFn
| DefKind::Trait
| DefKind::TyParam
@@ -740,7 +736,7 @@ impl<'tcx> Visitor<'tcx> for EmbargoVisitor<'tcx> {
// Type inference is very smart sometimes. It can make an impl reachable even some
// components of its type or trait are unreachable. E.g. methods of
// `impl ReachableTrait<UnreachableTy> for ReachableTy<UnreachableTy> { ... }`
- // can be usable from other crates (#57264). So we skip substs when calculating
+ // can be usable from other crates (#57264). So we skip args when calculating
// reachability and consider an impl reachable if its "shallow" type and trait are
// reachable.
//
@@ -836,13 +832,15 @@ impl ReachEverythingInTheInterfaceVisitor<'_, '_> {
GenericParamDefKind::Lifetime => {}
GenericParamDefKind::Type { has_default, .. } => {
if has_default {
- self.visit(self.ev.tcx.type_of(param.def_id).subst_identity());
+ self.visit(self.ev.tcx.type_of(param.def_id).instantiate_identity());
}
}
GenericParamDefKind::Const { has_default } => {
- self.visit(self.ev.tcx.type_of(param.def_id).subst_identity());
+ self.visit(self.ev.tcx.type_of(param.def_id).instantiate_identity());
if has_default {
- self.visit(self.ev.tcx.const_param_default(param.def_id).subst_identity());
+ self.visit(
+ self.ev.tcx.const_param_default(param.def_id).instantiate_identity(),
+ );
}
}
}
@@ -856,13 +854,13 @@ impl ReachEverythingInTheInterfaceVisitor<'_, '_> {
}
fn ty(&mut self) -> &mut Self {
- self.visit(self.ev.tcx.type_of(self.item_def_id).subst_identity());
+ self.visit(self.ev.tcx.type_of(self.item_def_id).instantiate_identity());
self
}
fn trait_ref(&mut self) -> &mut Self {
if let Some(trait_ref) = self.ev.tcx.impl_trait_ref(self.item_def_id) {
- self.visit_trait(trait_ref.subst_identity());
+ self.visit_trait(trait_ref.instantiate_identity());
}
self
}
@@ -903,7 +901,7 @@ fn vis_to_string<'tcx>(def_id: LocalDefId, vis: ty::Visibility, tcx: TyCtxt<'tcx
ty::Visibility::Restricted(restricted_id) => {
if restricted_id.is_top_level_module() {
"pub(crate)".to_string()
- } else if restricted_id == tcx.parent_module_from_def_id(def_id) {
+ } else if restricted_id == tcx.parent_module_from_def_id(def_id).to_local_def_id() {
"pub(self)".to_string()
} else {
format!("pub({})", tcx.item_name(restricted_id.to_def_id()))
@@ -1137,7 +1135,7 @@ impl<'tcx> TypePrivacyVisitor<'tcx> {
let typeck_results = self.typeck_results();
let result: ControlFlow<()> = try {
self.visit(typeck_results.node_type(id))?;
- self.visit(typeck_results.node_substs(id))?;
+ self.visit(typeck_results.node_args(id))?;
if let Some(adjustments) = typeck_results.adjustments().get(id) {
adjustments.iter().try_for_each(|adjustment| self.visit(adjustment.target))?;
}
@@ -1272,7 +1270,7 @@ impl<'tcx> Visitor<'tcx> for TypePrivacyVisitor<'tcx> {
// Method calls have to be checked specially.
self.span = segment.ident.span;
if let Some(def_id) = self.typeck_results().type_dependent_def_id(expr.hir_id) {
- if self.visit(self.tcx.type_of(def_id).subst_identity()).is_break() {
+ if self.visit(self.tcx.type_of(def_id).instantiate_identity()).is_break() {
return;
}
} else {
@@ -1749,12 +1747,12 @@ impl SearchInterfaceForPrivateItemsVisitor<'_> {
GenericParamDefKind::Lifetime => {}
GenericParamDefKind::Type { has_default, .. } => {
if has_default {
- self.visit(self.tcx.type_of(param.def_id).subst_identity());
+ self.visit(self.tcx.type_of(param.def_id).instantiate_identity());
}
}
// FIXME(generic_const_exprs): May want to look inside const here
GenericParamDefKind::Const { .. } => {
- self.visit(self.tcx.type_of(param.def_id).subst_identity());
+ self.visit(self.tcx.type_of(param.def_id).instantiate_identity());
}
}
}
@@ -1781,7 +1779,7 @@ impl SearchInterfaceForPrivateItemsVisitor<'_> {
fn ty(&mut self) -> &mut Self {
self.in_primary_interface = true;
- self.visit(self.tcx.type_of(self.item_def_id).subst_identity());
+ self.visit(self.tcx.type_of(self.item_def_id).instantiate_identity());
self
}
@@ -1811,7 +1809,7 @@ impl SearchInterfaceForPrivateItemsVisitor<'_> {
let vis_descr = match vis {
ty::Visibility::Public => "public",
ty::Visibility::Restricted(vis_def_id) => {
- if vis_def_id == self.tcx.parent_module(hir_id) {
+ if vis_def_id == self.tcx.parent_module(hir_id).to_local_def_id() {
"private"
} else if vis_def_id.is_top_level_module() {
"crate-private"
@@ -1952,7 +1950,7 @@ impl<'tcx> PrivateItemsInPublicInterfacesChecker<'tcx, '_> {
let reexported_at_vis = effective_vis.at_level(Level::Reexported);
let reachable_at_vis = effective_vis.at_level(Level::Reachable);
- if reexported_at_vis != reachable_at_vis {
+ if reachable_at_vis.is_public() && reexported_at_vis != reachable_at_vis {
let hir_id = self.tcx.hir().local_def_id_to_hir_id(def_id);
let span = self.tcx.def_span(def_id.to_def_id());
self.tcx.emit_spanned_lint(
@@ -1984,10 +1982,6 @@ impl<'tcx> PrivateItemsInPublicInterfacesChecker<'tcx, '_> {
AssocItemKind::Type => (self.tcx.defaultness(def_id).has_value(), true),
};
- if is_assoc_ty {
- self.check_unnameable(def_id, self.get(def_id));
- }
-
check.in_assoc_ty = is_assoc_ty;
check.generics().predicates();
if check_ty {
@@ -2007,8 +2001,8 @@ impl<'tcx> PrivateItemsInPublicInterfacesChecker<'tcx, '_> {
let def_kind = tcx.def_kind(def_id);
match def_kind {
- DefKind::Const | DefKind::Static(_) | DefKind::Fn | DefKind::TyAlias => {
- if let DefKind::TyAlias = def_kind {
+ DefKind::Const | DefKind::Static(_) | DefKind::Fn | DefKind::TyAlias { .. } => {
+ if let DefKind::TyAlias { .. } = def_kind {
self.check_unnameable(def_id, effective_vis);
}
self.check(def_id, item_visibility, effective_vis).generics().predicates().ty();
@@ -2211,7 +2205,7 @@ fn local_visibility(tcx: TyCtxt<'_>, def_id: LocalDefId) -> ty::Visibility {
kind: hir::ItemKind::Use(_, hir::UseKind::ListStem)
| hir::ItemKind::OpaqueTy(..),
..
- }) => ty::Visibility::Restricted(tcx.parent_module(hir_id)),
+ }) => ty::Visibility::Restricted(tcx.parent_module(hir_id).to_local_def_id()),
// Visibilities of trait impl items are inherited from their traits
// and are not filled in resolve.
Node::ImplItem(impl_item) => {
@@ -2239,18 +2233,25 @@ fn local_visibility(tcx: TyCtxt<'_>, def_id: LocalDefId) -> ty::Visibility {
}
}
-fn check_mod_privacy(tcx: TyCtxt<'_>, module_def_id: LocalDefId) {
+fn check_mod_privacy(tcx: TyCtxt<'_>, module_def_id: LocalModDefId) {
// Check privacy of names not checked in previous compilation stages.
- let mut visitor =
- NamePrivacyVisitor { tcx, maybe_typeck_results: None, current_item: module_def_id };
+ let mut visitor = NamePrivacyVisitor {
+ tcx,
+ maybe_typeck_results: None,
+ current_item: module_def_id.to_local_def_id(),
+ };
let (module, span, hir_id) = tcx.hir().get_module(module_def_id);
intravisit::walk_mod(&mut visitor, module, hir_id);
// Check privacy of explicitly written types and traits as well as
// inferred types of expressions and patterns.
- let mut visitor =
- TypePrivacyVisitor { tcx, maybe_typeck_results: None, current_item: module_def_id, span };
+ let mut visitor = TypePrivacyVisitor {
+ tcx,
+ maybe_typeck_results: None,
+ current_item: module_def_id.to_local_def_id(),
+ span,
+ };
intravisit::walk_mod(&mut visitor, module, hir_id);
}
diff --git a/compiler/rustc_query_impl/src/lib.rs b/compiler/rustc_query_impl/src/lib.rs
index 4cf0f1305..53005ede8 100644
--- a/compiler/rustc_query_impl/src/lib.rs
+++ b/compiler/rustc_query_impl/src/lib.rs
@@ -11,6 +11,7 @@
#![allow(rustc::potential_query_instability, unused_parens)]
#![deny(rustc::untranslatable_diagnostic)]
#![deny(rustc::diagnostic_outside_of_impl)]
+#![cfg_attr(not(bootstrap), allow(internal_features))]
#[macro_use]
extern crate rustc_middle;
diff --git a/compiler/rustc_query_impl/src/plumbing.rs b/compiler/rustc_query_impl/src/plumbing.rs
index 12a3f2ac8..def6ac280 100644
--- a/compiler/rustc_query_impl/src/plumbing.rs
+++ b/compiler/rustc_query_impl/src/plumbing.rs
@@ -183,7 +183,7 @@ pub(super) fn encode_all_query_results<'tcx>(
encoder: &mut CacheEncoder<'_, 'tcx>,
query_result_index: &mut EncodedDepNodeIndex,
) {
- for encode in super::ENCODE_QUERY_RESULTS.iter().copied().filter_map(|e| e) {
+ for encode in super::ENCODE_QUERY_RESULTS.iter().copied().flatten() {
encode(tcx, encoder, query_result_index);
}
}
diff --git a/compiler/rustc_query_system/Cargo.toml b/compiler/rustc_query_system/Cargo.toml
index e02cf38b6..584355df8 100644
--- a/compiler/rustc_query_system/Cargo.toml
+++ b/compiler/rustc_query_system/Cargo.toml
@@ -6,7 +6,7 @@ edition = "2021"
[lib]
[dependencies]
-parking_lot = "0.11"
+parking_lot = "0.12"
rustc_ast = { path = "../rustc_ast" }
rustc_data_structures = { path = "../rustc_data_structures" }
rustc_errors = { path = "../rustc_errors" }
diff --git a/compiler/rustc_query_system/messages.ftl b/compiler/rustc_query_system/messages.ftl
index 49b423d1a..d5fed8fe1 100644
--- a/compiler/rustc_query_system/messages.ftl
+++ b/compiler/rustc_query_system/messages.ftl
@@ -1,4 +1,5 @@
query_system_cycle = cycle detected when {$stack_bottom}
+ .note = see https://rustc-dev-guide.rust-lang.org/overview.html#queries and https://rustc-dev-guide.rust-lang.org/query.html for more information
query_system_cycle_recursive_trait_alias = trait aliases cannot be recursive
diff --git a/compiler/rustc_query_system/src/dep_graph/graph.rs b/compiler/rustc_query_system/src/dep_graph/graph.rs
index c9e80a6d9..30422ea11 100644
--- a/compiler/rustc_query_system/src/dep_graph/graph.rs
+++ b/compiler/rustc_query_system/src/dep_graph/graph.rs
@@ -1,6 +1,5 @@
-use parking_lot::Mutex;
use rustc_data_structures::fingerprint::Fingerprint;
-use rustc_data_structures::fx::{FxHashMap, FxHashSet, FxIndexMap};
+use rustc_data_structures::fx::{FxHashMap, FxHashSet};
use rustc_data_structures::profiling::{EventId, QueryInvocationId, SelfProfilerRef};
use rustc_data_structures::sharded::{self, Sharded};
use rustc_data_structures::stable_hasher::{HashStable, StableHasher};
@@ -88,13 +87,13 @@ pub struct DepGraphData<K: DepKind> {
colors: DepNodeColorMap,
- processed_side_effects: Mutex<FxHashSet<DepNodeIndex>>,
+ processed_side_effects: Lock<FxHashSet<DepNodeIndex>>,
/// When we load, there may be `.o` files, cached MIR, or other such
/// things available to us. If we find that they are not dirty, we
/// load the path to the file storing those work-products here into
/// this map. We can later look for and extract that data.
- previous_work_products: FxIndexMap<WorkProductId, WorkProduct>,
+ previous_work_products: WorkProductMap,
dep_node_debug: Lock<FxHashMap<DepNode<K>, String>>,
@@ -117,7 +116,7 @@ impl<K: DepKind> DepGraph<K> {
pub fn new(
profiler: &SelfProfilerRef,
prev_graph: SerializedDepGraph<K>,
- prev_work_products: FxIndexMap<WorkProductId, WorkProduct>,
+ prev_work_products: WorkProductMap,
encoder: FileEncoder,
record_graph: bool,
record_stats: bool,
@@ -557,7 +556,7 @@ impl<K: DepKind> DepGraph<K> {
result,
prev_index,
hash_result,
- |value| format!("{:?}", value),
+ |value| format!("{value:?}"),
);
#[cfg(debug_assertions)]
@@ -689,7 +688,7 @@ impl<K: DepKind> DepGraph<K> {
/// Access the map of work-products created during the cached run. Only
/// used during saving of the dep-graph.
- pub fn previous_work_products(&self) -> &FxIndexMap<WorkProductId, WorkProduct> {
+ pub fn previous_work_products(&self) -> &WorkProductMap {
&self.data.as_ref().unwrap().previous_work_products
}
@@ -1052,6 +1051,8 @@ pub struct WorkProduct {
pub saved_files: UnordMap<String, String>,
}
+pub type WorkProductMap = UnordMap<WorkProductId, WorkProduct>;
+
// Index type for `DepNodeData`'s edges.
rustc_index::newtype_index! {
struct EdgeIndex {}
@@ -1433,7 +1434,7 @@ pub(crate) fn print_markframe_trace<K: DepKind>(
let mut current = frame;
while let Some(frame) = current {
let node = data.previous.index_to_node(frame.index);
- eprintln!("#{i} {:?}", node);
+ eprintln!("#{i} {node:?}");
current = frame.parent;
i += 1;
}
diff --git a/compiler/rustc_query_system/src/dep_graph/mod.rs b/compiler/rustc_query_system/src/dep_graph/mod.rs
index 40e713198..0fd9e35d6 100644
--- a/compiler/rustc_query_system/src/dep_graph/mod.rs
+++ b/compiler/rustc_query_system/src/dep_graph/mod.rs
@@ -7,7 +7,7 @@ mod serialized;
pub use dep_node::{DepKindStruct, DepNode, DepNodeParams, WorkProductId};
pub use graph::{
hash_result, DepGraph, DepGraphData, DepNodeColor, DepNodeIndex, TaskDeps, TaskDepsRef,
- WorkProduct,
+ WorkProduct, WorkProductMap,
};
pub use query::DepGraphQuery;
pub use serialized::{SerializedDepGraph, SerializedDepNodeIndex};
diff --git a/compiler/rustc_query_system/src/error.rs b/compiler/rustc_query_system/src/error.rs
index cf2f04c74..e49e78cc7 100644
--- a/compiler/rustc_query_system/src/error.rs
+++ b/compiler/rustc_query_system/src/error.rs
@@ -57,6 +57,8 @@ pub struct Cycle {
pub alias: Option<Alias>,
#[subdiagnostic]
pub cycle_usage: Option<CycleUsage>,
+ #[note]
+ pub note_span: (),
}
#[derive(Diagnostic)]
diff --git a/compiler/rustc_query_system/src/query/caches.rs b/compiler/rustc_query_system/src/query/caches.rs
index 9a09f516e..4ba9d53a9 100644
--- a/compiler/rustc_query_system/src/query/caches.rs
+++ b/compiler/rustc_query_system/src/query/caches.rs
@@ -1,9 +1,7 @@
use crate::dep_graph::DepNodeIndex;
use rustc_data_structures::fx::FxHashMap;
-use rustc_data_structures::sharded;
-#[cfg(parallel_compiler)]
-use rustc_data_structures::sharded::Sharded;
+use rustc_data_structures::sharded::{self, Sharded};
use rustc_data_structures::sync::Lock;
use rustc_index::{Idx, IndexVec};
use std::fmt::Debug;
@@ -37,10 +35,7 @@ impl<'tcx, K: Eq + Hash, V: 'tcx> CacheSelector<'tcx, V> for DefaultCacheSelecto
}
pub struct DefaultCache<K, V> {
- #[cfg(parallel_compiler)]
cache: Sharded<FxHashMap<K, (V, DepNodeIndex)>>,
- #[cfg(not(parallel_compiler))]
- cache: Lock<FxHashMap<K, (V, DepNodeIndex)>>,
}
impl<K, V> Default for DefaultCache<K, V> {
@@ -60,10 +55,7 @@ where
#[inline(always)]
fn lookup(&self, key: &K) -> Option<(V, DepNodeIndex)> {
let key_hash = sharded::make_hash(key);
- #[cfg(parallel_compiler)]
let lock = self.cache.get_shard_by_hash(key_hash).lock();
- #[cfg(not(parallel_compiler))]
- let lock = self.cache.lock();
let result = lock.raw_entry().from_key_hashed_nocheck(key_hash, key);
if let Some((_, value)) = result { Some(*value) } else { None }
@@ -71,29 +63,16 @@ where
#[inline]
fn complete(&self, key: K, value: V, index: DepNodeIndex) {
- #[cfg(parallel_compiler)]
let mut lock = self.cache.get_shard_by_value(&key).lock();
- #[cfg(not(parallel_compiler))]
- let mut lock = self.cache.lock();
// We may be overwriting another value. This is all right, since the dep-graph
// will check that the fingerprint matches.
lock.insert(key, (value, index));
}
fn iter(&self, f: &mut dyn FnMut(&Self::Key, &Self::Value, DepNodeIndex)) {
- #[cfg(parallel_compiler)]
- {
- let shards = self.cache.lock_shards();
- for shard in shards.iter() {
- for (k, v) in shard.iter() {
- f(k, &v.0, v.1);
- }
- }
- }
- #[cfg(not(parallel_compiler))]
- {
- let map = self.cache.lock();
- for (k, v) in map.iter() {
+ let shards = self.cache.lock_shards();
+ for shard in shards.iter() {
+ for (k, v) in shard.iter() {
f(k, &v.0, v.1);
}
}
@@ -151,10 +130,7 @@ impl<'tcx, K: Idx, V: 'tcx> CacheSelector<'tcx, V> for VecCacheSelector<K> {
}
pub struct VecCache<K: Idx, V> {
- #[cfg(parallel_compiler)]
cache: Sharded<IndexVec<K, Option<(V, DepNodeIndex)>>>,
- #[cfg(not(parallel_compiler))]
- cache: Lock<IndexVec<K, Option<(V, DepNodeIndex)>>>,
}
impl<K: Idx, V> Default for VecCache<K, V> {
@@ -173,38 +149,20 @@ where
#[inline(always)]
fn lookup(&self, key: &K) -> Option<(V, DepNodeIndex)> {
- #[cfg(parallel_compiler)]
let lock = self.cache.get_shard_by_hash(key.index() as u64).lock();
- #[cfg(not(parallel_compiler))]
- let lock = self.cache.lock();
if let Some(Some(value)) = lock.get(*key) { Some(*value) } else { None }
}
#[inline]
fn complete(&self, key: K, value: V, index: DepNodeIndex) {
- #[cfg(parallel_compiler)]
let mut lock = self.cache.get_shard_by_hash(key.index() as u64).lock();
- #[cfg(not(parallel_compiler))]
- let mut lock = self.cache.lock();
lock.insert(key, (value, index));
}
fn iter(&self, f: &mut dyn FnMut(&Self::Key, &Self::Value, DepNodeIndex)) {
- #[cfg(parallel_compiler)]
- {
- let shards = self.cache.lock_shards();
- for shard in shards.iter() {
- for (k, v) in shard.iter_enumerated() {
- if let Some(v) = v {
- f(&k, &v.0, v.1);
- }
- }
- }
- }
- #[cfg(not(parallel_compiler))]
- {
- let map = self.cache.lock();
- for (k, v) in map.iter_enumerated() {
+ let shards = self.cache.lock_shards();
+ for shard in shards.iter() {
+ for (k, v) in shard.iter_enumerated() {
if let Some(v) = v {
f(&k, &v.0, v.1);
}
diff --git a/compiler/rustc_query_system/src/query/job.rs b/compiler/rustc_query_system/src/query/job.rs
index f45f7ca5d..1b1248924 100644
--- a/compiler/rustc_query_system/src/query/job.rs
+++ b/compiler/rustc_query_system/src/query/job.rs
@@ -13,6 +13,7 @@ use rustc_session::Session;
use rustc_span::Span;
use std::hash::Hash;
+use std::io::Write;
use std::num::NonZeroU64;
#[cfg(parallel_compiler)]
@@ -20,12 +21,11 @@ use {
parking_lot::{Condvar, Mutex},
rayon_core,
rustc_data_structures::fx::FxHashSet,
- rustc_data_structures::sync::Lock,
- rustc_data_structures::sync::Lrc,
rustc_data_structures::{defer, jobserver},
rustc_span::DUMMY_SP,
std::iter,
std::process,
+ std::sync::Arc,
};
/// Represents a span and a query key.
@@ -190,7 +190,7 @@ struct QueryWaiter<D: DepKind> {
query: Option<QueryJobId>,
condvar: Condvar,
span: Span,
- cycle: Lock<Option<CycleError<D>>>,
+ cycle: Mutex<Option<CycleError<D>>>,
}
#[cfg(parallel_compiler)]
@@ -204,20 +204,20 @@ impl<D: DepKind> QueryWaiter<D> {
#[cfg(parallel_compiler)]
struct QueryLatchInfo<D: DepKind> {
complete: bool,
- waiters: Vec<Lrc<QueryWaiter<D>>>,
+ waiters: Vec<Arc<QueryWaiter<D>>>,
}
#[cfg(parallel_compiler)]
#[derive(Clone)]
pub(super) struct QueryLatch<D: DepKind> {
- info: Lrc<Mutex<QueryLatchInfo<D>>>,
+ info: Arc<Mutex<QueryLatchInfo<D>>>,
}
#[cfg(parallel_compiler)]
impl<D: DepKind> QueryLatch<D> {
fn new() -> Self {
QueryLatch {
- info: Lrc::new(Mutex::new(QueryLatchInfo { complete: false, waiters: Vec::new() })),
+ info: Arc::new(Mutex::new(QueryLatchInfo { complete: false, waiters: Vec::new() })),
}
}
@@ -228,11 +228,11 @@ impl<D: DepKind> QueryLatch<D> {
span: Span,
) -> Result<(), CycleError<D>> {
let waiter =
- Lrc::new(QueryWaiter { query, span, cycle: Lock::new(None), condvar: Condvar::new() });
+ Arc::new(QueryWaiter { query, span, cycle: Mutex::new(None), condvar: Condvar::new() });
self.wait_on_inner(&waiter);
// FIXME: Get rid of this lock. We have ownership of the QueryWaiter
- // although another thread may still have a Lrc reference so we cannot
- // use Lrc::get_mut
+ // although another thread may still have a Arc reference so we cannot
+ // use Arc::get_mut
let mut cycle = waiter.cycle.lock();
match cycle.take() {
None => Ok(()),
@@ -241,7 +241,7 @@ impl<D: DepKind> QueryLatch<D> {
}
/// Awaits the caller on this latch by blocking the current thread.
- fn wait_on_inner(&self, waiter: &Lrc<QueryWaiter<D>>) {
+ fn wait_on_inner(&self, waiter: &Arc<QueryWaiter<D>>) {
let mut info = self.info.lock();
if !info.complete {
// We push the waiter on to the `waiters` list. It can be accessed inside
@@ -275,7 +275,7 @@ impl<D: DepKind> QueryLatch<D> {
/// Removes a single waiter from the list of waiters.
/// This is used to break query cycles.
- fn extract_waiter(&self, waiter: usize) -> Lrc<QueryWaiter<D>> {
+ fn extract_waiter(&self, waiter: usize) -> Arc<QueryWaiter<D>> {
let mut info = self.info.lock();
debug_assert!(!info.complete);
// Remove the waiter from the list of waiters
@@ -427,7 +427,7 @@ where
fn remove_cycle<D: DepKind>(
query_map: &QueryMap<D>,
jobs: &mut Vec<QueryJobId>,
- wakelist: &mut Vec<Lrc<QueryWaiter<D>>>,
+ wakelist: &mut Vec<Arc<QueryWaiter<D>>>,
) -> bool {
let mut visited = FxHashSet::default();
let mut stack = Vec::new();
@@ -592,7 +592,10 @@ pub(crate) fn report_cycle<'a, D: DepKind>(
});
}
- let alias = if stack.iter().all(|entry| entry.query.def_kind == Some(DefKind::TyAlias)) {
+ let alias = if stack
+ .iter()
+ .all(|entry| matches!(entry.query.def_kind, Some(DefKind::TyAlias { .. })))
+ {
Some(crate::error::Alias::Ty)
} else if stack.iter().all(|entry| entry.query.def_kind == Some(DefKind::TraitAlias)) {
Some(crate::error::Alias::Trait)
@@ -607,6 +610,7 @@ pub(crate) fn report_cycle<'a, D: DepKind>(
alias,
cycle_usage: cycle_usage,
stack_count,
+ note_span: (),
};
cycle_diag.into_diagnostic(&sess.parse_sess.span_diagnostic)
@@ -617,30 +621,50 @@ pub fn print_query_stack<Qcx: QueryContext>(
mut current_query: Option<QueryJobId>,
handler: &Handler,
num_frames: Option<usize>,
+ mut file: Option<std::fs::File>,
) -> usize {
// Be careful relying on global state here: this code is called from
// a panic hook, which means that the global `Handler` may be in a weird
// state if it was responsible for triggering the panic.
- let mut i = 0;
+ let mut count_printed = 0;
+ let mut count_total = 0;
let query_map = qcx.try_collect_active_jobs();
+ if let Some(ref mut file) = file {
+ let _ = writeln!(file, "\n\nquery stack during panic:");
+ }
while let Some(query) = current_query {
- if Some(i) == num_frames {
- break;
- }
let Some(query_info) = query_map.as_ref().and_then(|map| map.get(&query)) else {
break;
};
- let mut diag = Diagnostic::new(
- Level::FailureNote,
- format!("#{} [{:?}] {}", i, query_info.query.dep_kind, query_info.query.description),
- );
- diag.span = query_info.job.span.into();
- handler.force_print_diagnostic(diag);
+ if Some(count_printed) < num_frames || num_frames.is_none() {
+ // Only print to stderr as many stack frames as `num_frames` when present.
+ let mut diag = Diagnostic::new(
+ Level::FailureNote,
+ format!(
+ "#{} [{:?}] {}",
+ count_printed, query_info.query.dep_kind, query_info.query.description
+ ),
+ );
+ diag.span = query_info.job.span.into();
+ handler.force_print_diagnostic(diag);
+ count_printed += 1;
+ }
+
+ if let Some(ref mut file) = file {
+ let _ = writeln!(
+ file,
+ "#{} [{:?}] {}",
+ count_total, query_info.query.dep_kind, query_info.query.description
+ );
+ }
current_query = query_info.job.parent;
- i += 1;
+ count_total += 1;
}
- i
+ if let Some(ref mut file) = file {
+ let _ = writeln!(file, "end of query stack");
+ }
+ count_printed
}
diff --git a/compiler/rustc_resolve/src/build_reduced_graph.rs b/compiler/rustc_resolve/src/build_reduced_graph.rs
index e6ceedddf..127bec22c 100644
--- a/compiler/rustc_resolve/src/build_reduced_graph.rs
+++ b/compiler/rustc_resolve/src/build_reduced_graph.rs
@@ -41,6 +41,7 @@ impl<'a, Id: Into<DefId>> ToNameBinding<'a>
arenas.alloc_name_binding(NameBindingData {
kind: NameBindingKind::Module(self.0),
ambiguity: None,
+ warn_ambiguity: false,
vis: self.1.to_def_id(),
span: self.2,
expansion: self.3,
@@ -53,6 +54,7 @@ impl<'a, Id: Into<DefId>> ToNameBinding<'a> for (Res, ty::Visibility<Id>, Span,
arenas.alloc_name_binding(NameBindingData {
kind: NameBindingKind::Res(self.0),
ambiguity: None,
+ warn_ambiguity: false,
vis: self.1.to_def_id(),
span: self.2,
expansion: self.3,
@@ -69,7 +71,7 @@ impl<'a, 'tcx> Resolver<'a, 'tcx> {
{
let binding = def.to_name_binding(self.arenas);
let key = self.new_disambiguated_key(ident, ns);
- if let Err(old_binding) = self.try_define(parent, key, binding) {
+ if let Err(old_binding) = self.try_define(parent, key, binding, false) {
self.report_conflict(parent, ident, ns, old_binding, binding);
}
}
@@ -169,7 +171,7 @@ impl<'a, 'tcx> Resolver<'a, 'tcx> {
return macro_data.clone();
}
- let load_macro_untracked = self.cstore().load_macro_untracked(def_id, &self.tcx.sess);
+ let load_macro_untracked = self.cstore().load_macro_untracked(def_id, self.tcx);
let (ext, macro_rules) = match load_macro_untracked {
LoadedMacro::MacroDef(item, edition) => (
Lrc::new(self.compile_macro(&item, edition).0),
@@ -276,7 +278,7 @@ impl<'a, 'b, 'tcx> BuildReducedGraphVisitor<'a, 'b, 'tcx> {
};
match self.r.resolve_path(
&segments,
- Some(TypeNS),
+ None,
parent_scope,
finalize.then(|| Finalize::new(id, path.span)),
None,
@@ -698,7 +700,10 @@ impl<'a, 'b, 'tcx> BuildReducedGraphVisitor<'a, 'b, 'tcx> {
// These items live in the type namespace.
ItemKind::TyAlias(..) => {
- let res = Res::Def(DefKind::TyAlias, def_id);
+ let res = Res::Def(
+ DefKind::TyAlias { lazy: self.r.tcx.features().lazy_type_alias },
+ def_id,
+ );
self.r.define(parent, ident, TypeNS, (res, vis, sp, expansion));
}
@@ -946,10 +951,9 @@ impl<'a, 'b, 'tcx> BuildReducedGraphVisitor<'a, 'b, 'tcx> {
DefKind::Struct
| DefKind::Union
| DefKind::Variant
- | DefKind::TyAlias
+ | DefKind::TyAlias { .. }
| DefKind::ForeignTy
| DefKind::OpaqueTy
- | DefKind::ImplTraitPlaceholder
| DefKind::TraitAlias
| DefKind::AssocTy,
_,
@@ -1000,7 +1004,7 @@ impl<'a, 'b, 'tcx> BuildReducedGraphVisitor<'a, 'b, 'tcx> {
allow_shadowing: bool,
) {
if self.r.macro_use_prelude.insert(name, binding).is_some() && !allow_shadowing {
- let msg = format!("`{}` is already in scope", name);
+ let msg = format!("`{name}` is already in scope");
let note =
"macro-expanded `#[macro_use]`s may not shadow existing macros (see RFC 1560)";
self.r.tcx.sess.struct_span_err(span, msg).note(note).emit();
diff --git a/compiler/rustc_resolve/src/check_unused.rs b/compiler/rustc_resolve/src/check_unused.rs
index dc35c8b17..7dbbd4c34 100644
--- a/compiler/rustc_resolve/src/check_unused.rs
+++ b/compiler/rustc_resolve/src/check_unused.rs
@@ -362,7 +362,7 @@ impl Resolver<'_, '_> {
let mut span_snippets = spans
.iter()
.filter_map(|s| match tcx.sess.source_map().span_to_snippet(*s) {
- Ok(s) => Some(format!("`{}`", s)),
+ Ok(s) => Some(format!("`{s}`")),
_ => None,
})
.collect::<Vec<String>>();
@@ -440,7 +440,7 @@ impl Resolver<'_, '_> {
// If we are not in Rust 2018 edition, then we don't make any further
// suggestions.
- if !tcx.sess.rust_2018() {
+ if !tcx.sess.at_least_rust_2018() {
continue;
}
diff --git a/compiler/rustc_resolve/src/diagnostics.rs b/compiler/rustc_resolve/src/diagnostics.rs
index d3dcdfa42..cd1a9b934 100644
--- a/compiler/rustc_resolve/src/diagnostics.rs
+++ b/compiler/rustc_resolve/src/diagnostics.rs
@@ -5,10 +5,8 @@ use rustc_ast::{self as ast, Crate, ItemKind, ModKind, NodeId, Path, CRATE_NODE_
use rustc_ast::{MetaItemKind, NestedMetaItem};
use rustc_ast_pretty::pprust;
use rustc_data_structures::fx::FxHashSet;
-use rustc_errors::{
- pluralize, Applicability, Diagnostic, DiagnosticBuilder, ErrorGuaranteed, MultiSpan,
-};
-use rustc_errors::{struct_span_err, SuggestionStyle};
+use rustc_errors::{pluralize, report_ambiguity_error, struct_span_err, SuggestionStyle};
+use rustc_errors::{Applicability, Diagnostic, DiagnosticBuilder, ErrorGuaranteed, MultiSpan};
use rustc_feature::BUILTIN_ATTRIBUTES;
use rustc_hir::def::Namespace::{self, *};
use rustc_hir::def::{self, CtorKind, CtorOf, DefKind, NonMacroAttrKind, PerNS};
@@ -17,8 +15,9 @@ use rustc_hir::PrimTy;
use rustc_middle::bug;
use rustc_middle::ty::TyCtxt;
use rustc_session::lint::builtin::ABSOLUTE_PATHS_NOT_STARTING_WITH_CRATE;
+use rustc_session::lint::builtin::AMBIGUOUS_GLOB_IMPORTS;
use rustc_session::lint::builtin::MACRO_EXPANDED_MACRO_EXPORTS_ACCESSED_BY_ABSOLUTE_PATHS;
-use rustc_session::lint::BuiltinLintDiagnostics;
+use rustc_session::lint::{AmbiguityErrorDiag, BuiltinLintDiagnostics};
use rustc_session::Session;
use rustc_span::edit_distance::find_best_match_for_name;
use rustc_span::edition::Edition;
@@ -135,7 +134,23 @@ impl<'a, 'tcx> Resolver<'a, 'tcx> {
}
for ambiguity_error in &self.ambiguity_errors {
- self.report_ambiguity_error(ambiguity_error);
+ let diag = self.ambiguity_diagnostics(ambiguity_error);
+ if ambiguity_error.warning {
+ let NameBindingKind::Import { import, .. } = ambiguity_error.b1.0.kind else {
+ unreachable!()
+ };
+ self.lint_buffer.buffer_lint_with_diagnostic(
+ AMBIGUOUS_GLOB_IMPORTS,
+ import.root_id,
+ ambiguity_error.ident.span,
+ diag.msg.to_string(),
+ BuiltinLintDiagnostics::AmbiguousGlobImports { diag },
+ );
+ } else {
+ let mut err = struct_span_err!(self.tcx.sess, diag.span, E0659, "{}", &diag.msg);
+ report_ambiguity_error(&mut err, diag);
+ err.emit();
+ }
}
let mut reported_spans = FxHashSet::default();
@@ -228,7 +243,7 @@ impl<'a, 'tcx> Resolver<'a, 'tcx> {
(TypeNS, _) => "type",
};
- let msg = format!("the name `{}` is defined multiple times", name);
+ let msg = format!("the name `{name}` is defined multiple times");
let mut err = match (old_binding.is_extern_crate(), new_binding.is_extern_crate()) {
(true, true) => struct_span_err!(self.tcx.sess, span, E0259, "{}", msg),
@@ -250,11 +265,11 @@ impl<'a, 'tcx> Resolver<'a, 'tcx> {
container
));
- err.span_label(span, format!("`{}` re{} here", name, new_participle));
+ err.span_label(span, format!("`{name}` re{new_participle} here"));
if !old_binding.span.is_dummy() && old_binding.span != span {
err.span_label(
self.tcx.sess.source_map().guess_head_span(old_binding.span),
- format!("previous {} of the {} `{}` here", old_noun, old_kind, name),
+ format!("previous {old_noun} of the {old_kind} `{name}` here"),
);
}
@@ -343,15 +358,15 @@ impl<'a, 'tcx> Resolver<'a, 'tcx> {
binding_span: Span,
) {
let suggested_name = if name.as_str().chars().next().unwrap().is_uppercase() {
- format!("Other{}", name)
+ format!("Other{name}")
} else {
- format!("other_{}", name)
+ format!("other_{name}")
};
let mut suggestion = None;
match import.kind {
ImportKind::Single { type_ns_only: true, .. } => {
- suggestion = Some(format!("self as {}", suggested_name))
+ suggestion = Some(format!("self as {suggested_name}"))
}
ImportKind::Single { source, .. } => {
if let Some(pos) =
@@ -587,11 +602,11 @@ impl<'a, 'tcx> Resolver<'a, 'tcx> {
let sugg_msg = "try using a local generic parameter instead";
let name = self.tcx.item_name(def_id);
let (span, snippet) = if span.is_empty() {
- let snippet = format!("<{}>", name);
+ let snippet = format!("<{name}>");
(span, snippet)
} else {
let span = sm.span_through_char(span, '<').shrink_to_hi();
- let snippet = format!("{}, ", name);
+ let snippet = format!("{name}, ");
(span, snippet)
};
// Suggest the modification to the user
@@ -652,7 +667,7 @@ impl<'a, 'tcx> Resolver<'a, 'tcx> {
name,
);
for sp in target_sp {
- err.span_label(sp, format!("pattern doesn't bind `{}`", name));
+ err.span_label(sp, format!("pattern doesn't bind `{name}`"));
}
for sp in origin_sp {
err.span_label(sp, "variable not in all patterns");
@@ -679,8 +694,7 @@ impl<'a, 'tcx> Resolver<'a, 'tcx> {
if import_suggestions.is_empty() {
let help_msg = format!(
"if you meant to match on a variant or a `const` item, consider \
- making the path in the pattern qualified: `path::to::ModOrType::{}`",
- name,
+ making the path in the pattern qualified: `path::to::ModOrType::{name}`",
);
err.span_help(span, help_msg);
}
@@ -938,8 +952,7 @@ impl<'a, 'tcx> Resolver<'a, 'tcx> {
let mut err = self.tcx.sess.struct_span_err_with_code(
span,
format!(
- "item `{}` is an associated {}, which doesn't match its trait `{}`",
- name, kind, trait_path,
+ "item `{name}` is an associated {kind}, which doesn't match its trait `{trait_path}`",
),
code,
);
@@ -1203,7 +1216,7 @@ impl<'a, 'tcx> Resolver<'a, 'tcx> {
if filter_fn(res) {
// create the path
let mut segms = path_segments.clone();
- if lookup_ident.span.rust_2018() {
+ if lookup_ident.span.at_least_rust_2018() {
// crate-local absolute paths start with `crate::` in edition 2018
// FIXME: may also be stabilized for Rust 2015 (Issues #45477, #44660)
segms.insert(0, ast::PathSegment::from_ident(crate_name));
@@ -1268,7 +1281,7 @@ impl<'a, 'tcx> Resolver<'a, 'tcx> {
path_segments.push(ast::PathSegment::from_ident(ident));
let is_extern_crate_that_also_appears_in_prelude =
- name_binding.is_extern_crate() && lookup_ident.span.rust_2018();
+ name_binding.is_extern_crate() && lookup_ident.span.at_least_rust_2018();
if !is_extern_crate_that_also_appears_in_prelude {
// add the module to the lookup
@@ -1315,7 +1328,7 @@ impl<'a, 'tcx> Resolver<'a, 'tcx> {
&filter_fn,
);
- if lookup_ident.span.rust_2018() {
+ if lookup_ident.span.at_least_rust_2018() {
let extern_prelude_names = self.extern_prelude.clone();
for (ident, _) in extern_prelude_names.into_iter() {
if ident.span.from_expansion() {
@@ -1395,7 +1408,7 @@ impl<'a, 'tcx> Resolver<'a, 'tcx> {
let head_span = source_map.guess_head_span(span);
err.subdiagnostic(ConsiderAddingADerive {
span: head_span.shrink_to_lo(),
- suggestion: format!("#[derive(Default)]\n")
+ suggestion: "#[derive(Default)]\n".to_string(),
});
}
for ns in [Namespace::MacroNS, Namespace::TypeNS, Namespace::ValueNS] {
@@ -1412,10 +1425,10 @@ impl<'a, 'tcx> Resolver<'a, 'tcx> {
"a function-like macro".to_string()
}
Res::Def(DefKind::Macro(MacroKind::Attr), _) | Res::NonMacroAttr(..) => {
- format!("an attribute: `#[{}]`", ident)
+ format!("an attribute: `#[{ident}]`")
}
Res::Def(DefKind::Macro(MacroKind::Derive), _) => {
- format!("a derive macro: `#[derive({})]`", ident)
+ format!("a derive macro: `#[derive({ident})]`")
}
Res::ToolMod => {
// Don't confuse the user with tool modules.
@@ -1436,7 +1449,7 @@ impl<'a, 'tcx> Resolver<'a, 'tcx> {
if !import.span.is_dummy() {
err.span_note(
import.span,
- format!("`{}` is imported here, but it is {}", ident, desc),
+ format!("`{ident}` is imported here, but it is {desc}"),
);
// Silence the 'unused import' warning we might get,
// since this diagnostic already covers that import.
@@ -1444,7 +1457,7 @@ impl<'a, 'tcx> Resolver<'a, 'tcx> {
return;
}
}
- err.note(format!("`{}` is in scope, but it is {}", ident, desc));
+ err.note(format!("`{ident}` is in scope, but it is {desc}"));
return;
}
}
@@ -1540,20 +1553,15 @@ impl<'a, 'tcx> Resolver<'a, 'tcx> {
}
}
- fn report_ambiguity_error(&self, ambiguity_error: &AmbiguityError<'_>) {
- let AmbiguityError { kind, ident, b1, b2, misc1, misc2 } = *ambiguity_error;
+ fn ambiguity_diagnostics(&self, ambiguity_error: &AmbiguityError<'_>) -> AmbiguityErrorDiag {
+ let AmbiguityError { kind, ident, b1, b2, misc1, misc2, .. } = *ambiguity_error;
let (b1, b2, misc1, misc2, swapped) = if b2.span.is_dummy() && !b1.span.is_dummy() {
// We have to print the span-less alternative first, otherwise formatting looks bad.
(b2, b1, misc2, misc1, true)
} else {
(b1, b2, misc1, misc2, false)
};
-
- let mut err = struct_span_err!(self.tcx.sess, ident.span, E0659, "`{ident}` is ambiguous");
- err.span_label(ident.span, "ambiguous name");
- err.note(format!("ambiguous because of {}", kind.descr()));
-
- let mut could_refer_to = |b: NameBinding<'_>, misc: AmbiguityErrorMisc, also: &str| {
+ let could_refer_to = |b: NameBinding<'_>, misc: AmbiguityErrorMisc, also: &str| {
let what = self.binding_description(b, ident, misc == AmbiguityErrorMisc::FromPrelude);
let note_msg = format!("`{ident}` could{also} refer to {what}");
@@ -1568,7 +1576,7 @@ impl<'a, 'tcx> Resolver<'a, 'tcx> {
"consider adding an explicit import of `{ident}` to disambiguate"
))
}
- if b.is_extern_crate() && ident.span.rust_2018() {
+ if b.is_extern_crate() && ident.span.at_least_rust_2018() {
help_msgs.push(format!("use `::{ident}` to refer to this {thing} unambiguously"))
}
match misc {
@@ -1579,16 +1587,35 @@ impl<'a, 'tcx> Resolver<'a, 'tcx> {
AmbiguityErrorMisc::FromPrelude | AmbiguityErrorMisc::None => {}
}
- err.span_note(b.span, note_msg);
- for (i, help_msg) in help_msgs.iter().enumerate() {
- let or = if i == 0 { "" } else { "or " };
- err.help(format!("{}{}", or, help_msg));
- }
+ (
+ b.span,
+ note_msg,
+ help_msgs
+ .iter()
+ .enumerate()
+ .map(|(i, help_msg)| {
+ let or = if i == 0 { "" } else { "or " };
+ format!("{or}{help_msg}")
+ })
+ .collect::<Vec<_>>(),
+ )
};
-
- could_refer_to(b1, misc1, "");
- could_refer_to(b2, misc2, " also");
- err.emit();
+ let (b1_span, b1_note_msg, b1_help_msgs) = could_refer_to(b1, misc1, "");
+ let (b2_span, b2_note_msg, b2_help_msgs) = could_refer_to(b2, misc2, " also");
+
+ AmbiguityErrorDiag {
+ msg: format!("`{ident}` is ambiguous"),
+ span: ident.span,
+ label_span: ident.span,
+ label_msg: "ambiguous name".to_string(),
+ note_msg: format!("ambiguous because of {}", kind.descr()),
+ b1_span,
+ b1_note_msg,
+ b1_help_msgs,
+ b2_span,
+ b2_note_msg,
+ b2_help_msgs,
+ }
}
/// If the binding refers to a tuple struct constructor with fields,
@@ -1626,7 +1653,7 @@ impl<'a, 'tcx> Resolver<'a, 'tcx> {
let descr = get_descr(binding);
let mut err =
struct_span_err!(self.tcx.sess, ident.span, E0603, "{} `{}` is private", descr, ident);
- err.span_label(ident.span, format!("private {}", descr));
+ err.span_label(ident.span, format!("private {descr}"));
if let Some((this_res, outer_ident)) = outermost_res {
let import_suggestions = self.lookup_import_candidates(
@@ -1718,7 +1745,7 @@ impl<'a, 'tcx> Resolver<'a, 'tcx> {
if next_binding.is_none() && let Some(span) = non_exhaustive {
note_span.push_span_label(
span,
- format!("cannot be constructed because it is `#[non_exhaustive]`"),
+ "cannot be constructed because it is `#[non_exhaustive]`",
);
}
err.span_note(note_span, msg);
@@ -1811,7 +1838,7 @@ impl<'a, 'tcx> Resolver<'a, 'tcx> {
_ => format!("`{parent}`"),
};
- let mut msg = format!("could not find `{}` in {}", ident, parent);
+ let mut msg = format!("could not find `{ident}` in {parent}");
if ns == TypeNS || ns == ValueNS {
let ns_to_try = if ns == TypeNS { ValueNS } else { TypeNS };
let binding = if let Some(module) = module {
@@ -1926,12 +1953,12 @@ impl<'a, 'tcx> Resolver<'a, 'tcx> {
let suggestion = match_span.map(|span| {
(
vec![(span, String::from(""))],
- format!("`{}` is defined here, but is not a type", ident),
+ format!("`{ident}` is defined here, but is not a type"),
Applicability::MaybeIncorrect,
)
});
- (format!("use of undeclared type `{}`", ident), suggestion)
+ (format!("use of undeclared type `{ident}`"), suggestion)
} else {
let mut suggestion = None;
if ident.name == sym::alloc {
@@ -1953,7 +1980,7 @@ impl<'a, 'tcx> Resolver<'a, 'tcx> {
},
)
});
- (format!("use of undeclared crate or module `{}`", ident), suggestion)
+ (format!("use of undeclared crate or module `{ident}`"), suggestion)
}
}
@@ -1973,7 +2000,7 @@ impl<'a, 'tcx> Resolver<'a, 'tcx> {
if fst.ident.name == kw::PathRoot && !snd.ident.is_path_segment_keyword() => {}
// `ident::...` on 2018.
(Some(fst), _)
- if fst.ident.span.rust_2018() && !fst.ident.is_path_segment_keyword() =>
+ if fst.ident.span.at_least_rust_2018() && !fst.ident.is_path_segment_keyword() =>
{
// Insert a placeholder that's later replaced by `self`/`super`/etc.
path.insert(0, Segment::from_ident(Ident::empty()));
@@ -2137,16 +2164,16 @@ impl<'a, 'tcx> Resolver<'a, 'tcx> {
let module_name = crate_module.kind.name().unwrap();
let import_snippet = match import.kind {
ImportKind::Single { source, target, .. } if source != target => {
- format!("{} as {}", source, target)
+ format!("{source} as {target}")
}
- _ => format!("{}", ident),
+ _ => format!("{ident}"),
};
let mut corrections: Vec<(Span, String)> = Vec::new();
if !import.is_nested() {
// Assume this is the easy case of `use issue_59764::foo::makro;` and just remove
// intermediate segments.
- corrections.push((import.span, format!("{}::{}", module_name, import_snippet)));
+ corrections.push((import.span, format!("{module_name}::{import_snippet}")));
} else {
// Find the binding span (and any trailing commas and spaces).
// ie. `use a::b::{c, d, e};`
@@ -2213,11 +2240,11 @@ impl<'a, 'tcx> Resolver<'a, 'tcx> {
start_point,
if has_nested {
// In this case, `start_snippet` must equal '{'.
- format!("{}{}, ", start_snippet, import_snippet)
+ format!("{start_snippet}{import_snippet}, ")
} else {
// In this case, add a `{`, then the moved import, then whatever
// was there before.
- format!("{{{}, {}", import_snippet, start_snippet)
+ format!("{{{import_snippet}, {start_snippet}")
},
));
@@ -2634,9 +2661,9 @@ fn show_candidates(
"item"
};
let plural_descr =
- if descr.ends_with('s') { format!("{}es", descr) } else { format!("{}s", descr) };
+ if descr.ends_with('s') { format!("{descr}es") } else { format!("{descr}s") };
- let mut msg = format!("{}these {} exist but are inaccessible", prefix, plural_descr);
+ let mut msg = format!("{prefix}these {plural_descr} exist but are inaccessible");
let mut has_colon = false;
let mut spans = Vec::new();
@@ -2657,7 +2684,7 @@ fn show_candidates(
let mut multi_span = MultiSpan::from_spans(spans.iter().map(|(_, sp)| *sp).collect());
for (name, span) in spans {
- multi_span.push_span_label(span, format!("`{}`: not accessible", name));
+ multi_span.push_span_label(span, format!("`{name}`: not accessible"));
}
for note in inaccessible_path_strings.iter().flat_map(|cand| cand.3.as_ref()) {
diff --git a/compiler/rustc_resolve/src/effective_visibilities.rs b/compiler/rustc_resolve/src/effective_visibilities.rs
index eb210532f..46f5df5ca 100644
--- a/compiler/rustc_resolve/src/effective_visibilities.rs
+++ b/compiler/rustc_resolve/src/effective_visibilities.rs
@@ -128,11 +128,14 @@ impl<'r, 'a, 'tcx> EffectiveVisibilitiesVisitor<'r, 'a, 'tcx> {
// If the binding is ambiguous, put the root ambiguity binding and all reexports
// leading to it into the table. They are used by the `ambiguous_glob_reexports`
// lint. For all bindings added to the table this way `is_ambiguity` returns true.
+ let is_ambiguity =
+ |binding: NameBinding<'a>, warn: bool| binding.ambiguity.is_some() && !warn;
let mut parent_id = ParentId::Def(module_id);
+ let mut warn_ambiguity = binding.warn_ambiguity;
while let NameBindingKind::Import { binding: nested_binding, .. } = binding.kind {
self.update_import(binding, parent_id);
- if binding.ambiguity.is_some() {
+ if is_ambiguity(binding, warn_ambiguity) {
// Stop at the root ambiguity, further bindings in the chain should not
// be reexported because the root ambiguity blocks any access to them.
// (Those further bindings are most likely not ambiguities themselves.)
@@ -141,9 +144,9 @@ impl<'r, 'a, 'tcx> EffectiveVisibilitiesVisitor<'r, 'a, 'tcx> {
parent_id = ParentId::Import(binding);
binding = nested_binding;
+ warn_ambiguity |= nested_binding.warn_ambiguity;
}
-
- if binding.ambiguity.is_none()
+ if !is_ambiguity(binding, warn_ambiguity)
&& let Some(def_id) = binding.res().opt_def_id().and_then(|id| id.as_local()) {
self.update_def(def_id, binding.vis.expect_local(), parent_id);
}
diff --git a/compiler/rustc_resolve/src/ident.rs b/compiler/rustc_resolve/src/ident.rs
index 520fab1f0..3bd9cea27 100644
--- a/compiler/rustc_resolve/src/ident.rs
+++ b/compiler/rustc_resolve/src/ident.rs
@@ -677,6 +677,7 @@ impl<'a, 'tcx> Resolver<'a, 'tcx> {
ident: orig_ident,
b1: innermost_binding,
b2: binding,
+ warning: false,
misc1: misc(innermost_flags),
misc2: misc(flags),
});
@@ -905,6 +906,7 @@ impl<'a, 'tcx> Resolver<'a, 'tcx> {
ident,
b1: binding,
b2: shadowed_glob,
+ warning: false,
misc1: AmbiguityErrorMisc::None,
misc2: AmbiguityErrorMisc::None,
});
@@ -1417,13 +1419,13 @@ impl<'a, 'tcx> Resolver<'a, 'tcx> {
));
continue;
}
- if name == kw::PathRoot && ident.span.rust_2018() {
+ if name == kw::PathRoot && ident.span.at_least_rust_2018() {
module = Some(ModuleOrUniformRoot::ExternPrelude);
continue;
}
if name == kw::PathRoot
&& ident.span.is_rust_2015()
- && self.tcx.sess.rust_2018()
+ && self.tcx.sess.at_least_rust_2018()
{
// `::a::b` from 2015 macro on 2018 global edition
module = Some(ModuleOrUniformRoot::CrateRootAndExternPrelude);
@@ -1443,12 +1445,12 @@ impl<'a, 'tcx> Resolver<'a, 'tcx> {
let name_str = if name == kw::PathRoot {
"crate root".to_string()
} else {
- format!("`{}`", name)
+ format!("`{name}`")
};
let label = if segment_idx == 1 && path[0].ident.name == kw::PathRoot {
- format!("global paths cannot start with {}", name_str)
+ format!("global paths cannot start with {name_str}")
} else {
- format!("{} in paths can only be used in start position", name_str)
+ format!("{name_str} in paths can only be used in start position")
};
(label, None)
});
diff --git a/compiler/rustc_resolve/src/imports.rs b/compiler/rustc_resolve/src/imports.rs
index d37fe783b..a175d9f6c 100644
--- a/compiler/rustc_resolve/src/imports.rs
+++ b/compiler/rustc_resolve/src/imports.rs
@@ -284,6 +284,7 @@ impl<'a, 'tcx> Resolver<'a, 'tcx> {
self.arenas.alloc_name_binding(NameBindingData {
kind: NameBindingKind::Import { binding, import, used: Cell::new(false) },
ambiguity: None,
+ warn_ambiguity: false,
span: import.span,
vis,
expansion: import.parent_scope.expansion,
@@ -291,16 +292,18 @@ impl<'a, 'tcx> Resolver<'a, 'tcx> {
}
/// Define the name or return the existing binding if there is a collision.
+ /// `update` indicates if the definition is a redefinition of an existing binding.
pub(crate) fn try_define(
&mut self,
module: Module<'a>,
key: BindingKey,
binding: NameBinding<'a>,
+ warn_ambiguity: bool,
) -> Result<(), NameBinding<'a>> {
let res = binding.res();
self.check_reserved_macro_name(key.ident, res);
self.set_binding_parent_module(binding, module);
- self.update_resolution(module, key, |this, resolution| {
+ self.update_resolution(module, key, warn_ambiguity, |this, resolution| {
if let Some(old_binding) = resolution.binding {
if res == Res::Err && old_binding.res() != Res::Err {
// Do not override real bindings with `Res::Err`s from error recovery.
@@ -308,15 +311,42 @@ impl<'a, 'tcx> Resolver<'a, 'tcx> {
}
match (old_binding.is_glob_import(), binding.is_glob_import()) {
(true, true) => {
- if res != old_binding.res() {
- resolution.binding = Some(this.ambiguity(
- AmbiguityKind::GlobVsGlob,
- old_binding,
- binding,
- ));
+ // FIXME: remove `!binding.is_ambiguity()` after delete the warning ambiguity.
+ if !binding.is_ambiguity()
+ && let NameBindingKind::Import { import: old_import, .. } = old_binding.kind
+ && let NameBindingKind::Import { import, .. } = binding.kind
+ && old_import == import {
+ // We should replace the `old_binding` with `binding` regardless
+ // of whether they has same resolution or not when they are
+ // imported from the same glob-import statement.
+ // However we currently using `Some(old_binding)` for back compact
+ // purposes.
+ // This case can be removed after once `Undetermined` is prepared
+ // for glob-imports.
+ } else if res != old_binding.res() {
+ let binding = if warn_ambiguity {
+ this.warn_ambiguity(
+ AmbiguityKind::GlobVsGlob,
+ old_binding,
+ binding,
+ )
+ } else {
+ this.ambiguity(
+ AmbiguityKind::GlobVsGlob,
+ old_binding,
+ binding,
+ )
+ };
+ resolution.binding = Some(binding);
} else if !old_binding.vis.is_at_least(binding.vis, this.tcx) {
// We are glob-importing the same item but with greater visibility.
resolution.binding = Some(binding);
+ } else if binding.is_ambiguity() {
+ resolution.binding =
+ Some(self.arenas.alloc_name_binding(NameBindingData {
+ warn_ambiguity: true,
+ ..(*binding).clone()
+ }));
}
}
(old_glob @ true, false) | (old_glob @ false, true) => {
@@ -374,29 +404,52 @@ impl<'a, 'tcx> Resolver<'a, 'tcx> {
})
}
+ fn warn_ambiguity(
+ &self,
+ kind: AmbiguityKind,
+ primary_binding: NameBinding<'a>,
+ secondary_binding: NameBinding<'a>,
+ ) -> NameBinding<'a> {
+ self.arenas.alloc_name_binding(NameBindingData {
+ ambiguity: Some((secondary_binding, kind)),
+ warn_ambiguity: true,
+ ..(*primary_binding).clone()
+ })
+ }
+
// Use `f` to mutate the resolution of the name in the module.
// If the resolution becomes a success, define it in the module's glob importers.
- fn update_resolution<T, F>(&mut self, module: Module<'a>, key: BindingKey, f: F) -> T
+ fn update_resolution<T, F>(
+ &mut self,
+ module: Module<'a>,
+ key: BindingKey,
+ warn_ambiguity: bool,
+ f: F,
+ ) -> T
where
F: FnOnce(&mut Resolver<'a, 'tcx>, &mut NameResolution<'a>) -> T,
{
// Ensure that `resolution` isn't borrowed when defining in the module's glob importers,
// during which the resolution might end up getting re-defined via a glob cycle.
- let (binding, t) = {
+ let (binding, t, warn_ambiguity) = {
let resolution = &mut *self.resolution(module, key).borrow_mut();
let old_binding = resolution.binding();
let t = f(self, resolution);
- if old_binding.is_none() && let Some(binding) = resolution.binding() {
- (binding, t)
+ if let Some(binding) = resolution.binding() && old_binding != Some(binding) {
+ (binding, t, warn_ambiguity || old_binding.is_some())
} else {
return t;
}
};
- // Define `binding` in `module`s glob importers.
- for import in module.glob_importers.borrow_mut().iter() {
+ let Ok(glob_importers) = module.glob_importers.try_borrow_mut() else {
+ return t;
+ };
+
+ // Define or update `binding` in `module`s glob importers.
+ for import in glob_importers.iter() {
let mut ident = key.ident;
let scope = match ident.span.reverse_glob_adjust(module.expansion, import.span) {
Some(Some(def)) => self.expn_def_scope(def),
@@ -406,7 +459,12 @@ impl<'a, 'tcx> Resolver<'a, 'tcx> {
if self.is_accessible_from(binding.vis, scope) {
let imported_binding = self.import(binding, *import);
let key = BindingKey { ident, ..key };
- let _ = self.try_define(import.parent_scope.module, key, imported_binding);
+ let _ = self.try_define(
+ import.parent_scope.module,
+ key,
+ imported_binding,
+ warn_ambiguity,
+ );
}
}
@@ -425,7 +483,7 @@ impl<'a, 'tcx> Resolver<'a, 'tcx> {
let dummy_binding = self.import(dummy_binding, import);
self.per_ns(|this, ns| {
let key = BindingKey::new(target, ns);
- let _ = this.try_define(import.parent_scope.module, key, dummy_binding);
+ let _ = this.try_define(import.parent_scope.module, key, dummy_binding, false);
});
self.record_use(target, dummy_binding, false);
} else if import.imported_module.get().is_none() {
@@ -475,15 +533,15 @@ impl<'a, 'tcx> Resolver<'a, 'tcx> {
let indeterminate_imports = mem::take(&mut self.indeterminate_imports);
for (is_indeterminate, import) in determined_imports
- .into_iter()
+ .iter()
.map(|i| (false, i))
- .chain(indeterminate_imports.into_iter().map(|i| (true, i)))
+ .chain(indeterminate_imports.iter().map(|i| (true, i)))
{
- let unresolved_import_error = self.finalize_import(import);
+ let unresolved_import_error = self.finalize_import(*import);
// If this import is unresolved then create a dummy import
// resolution for it so that later resolve stages won't complain.
- self.import_dummy_binding(import, is_indeterminate);
+ self.import_dummy_binding(*import, is_indeterminate);
if let Some(err) = unresolved_import_error {
if let ImportKind::Single { source, ref source_bindings, .. } = import.kind {
@@ -505,27 +563,34 @@ impl<'a, 'tcx> Resolver<'a, 'tcx> {
errors = vec![];
}
if seen_spans.insert(err.span) {
- errors.push((import, err));
+ errors.push((*import, err));
prev_root_id = import.root_id;
}
- } else if is_indeterminate {
- let path = import_path_to_string(
- &import.module_path.iter().map(|seg| seg.ident).collect::<Vec<_>>(),
- &import.kind,
- import.span,
- );
- let err = UnresolvedImportError {
- span: import.span,
- label: None,
- note: None,
- suggestion: None,
- candidates: None,
- };
- // FIXME: there should be a better way of doing this than
- // formatting this as a string then checking for `::`
- if path.contains("::") {
- errors.push((import, err))
- }
+ }
+ }
+
+ if !errors.is_empty() {
+ self.throw_unresolved_import_error(errors);
+ return;
+ }
+
+ for import in &indeterminate_imports {
+ let path = import_path_to_string(
+ &import.module_path.iter().map(|seg| seg.ident).collect::<Vec<_>>(),
+ &import.kind,
+ import.span,
+ );
+ let err = UnresolvedImportError {
+ span: import.span,
+ label: None,
+ note: None,
+ suggestion: None,
+ candidates: None,
+ };
+ // FIXME: there should be a better way of doing this than
+ // formatting this as a string then checking for `::`
+ if path.contains("::") {
+ errors.push((*import, err))
}
}
@@ -700,7 +765,6 @@ impl<'a, 'tcx> Resolver<'a, 'tcx> {
Segment::names_to_string(&import.module_path),
module_to_string(import.parent_scope.module).unwrap_or_else(|| "???".to_string()),
);
-
let module = if let Some(module) = import.imported_module.get() {
module
} else {
@@ -773,7 +837,7 @@ impl<'a, 'tcx> Resolver<'a, 'tcx> {
.emit();
}
let key = BindingKey::new(target, ns);
- this.update_resolution(parent, key, |_, resolution| {
+ this.update_resolution(parent, key, false, |_, resolution| {
resolution.single_imports.remove(&import);
});
}
@@ -989,14 +1053,23 @@ impl<'a, 'tcx> Resolver<'a, 'tcx> {
initial_binding.res()
});
let res = binding.res();
+ let has_ambiguity_error = this
+ .ambiguity_errors
+ .iter()
+ .filter(|error| !error.warning)
+ .next()
+ .is_some();
+ if res == Res::Err || has_ambiguity_error {
+ this.tcx
+ .sess
+ .delay_span_bug(import.span, "some error happened for an import");
+ return;
+ }
if let Ok(initial_res) = initial_res {
- if res != initial_res && this.ambiguity_errors.is_empty() {
+ if res != initial_res {
span_bug!(import.span, "inconsistent resolution for an import");
}
- } else if res != Res::Err
- && this.ambiguity_errors.is_empty()
- && this.privacy_errors.is_empty()
- {
+ } else if this.privacy_errors.is_empty() {
this.tcx
.sess
.create_err(CannotDetermineImportResolution { span: import.span })
@@ -1087,18 +1160,18 @@ impl<'a, 'tcx> Resolver<'a, 'tcx> {
ModuleOrUniformRoot::Module(module) => {
let module_str = module_to_string(module);
if let Some(module_str) = module_str {
- format!("no `{}` in `{}`", ident, module_str)
+ format!("no `{ident}` in `{module_str}`")
} else {
- format!("no `{}` in the root", ident)
+ format!("no `{ident}` in the root")
}
}
_ => {
if !ident.is_path_segment_keyword() {
- format!("no external crate `{}`", ident)
+ format!("no external crate `{ident}`")
} else {
// HACK(eddyb) this shows up for `self` & `super`, which
// should work instead - for now keep the same error message.
- format!("no `{}` in the root", ident)
+ format!("no `{ident}` in the root")
}
}
};
@@ -1146,10 +1219,9 @@ impl<'a, 'tcx> Resolver<'a, 'tcx> {
let (ns, binding) = reexport_error.unwrap();
if pub_use_of_private_extern_crate_hack(import, binding) {
let msg = format!(
- "extern crate `{}` is private, and cannot be \
+ "extern crate `{ident}` is private, and cannot be \
re-exported (error E0365), consider declaring with \
- `pub`",
- ident
+ `pub`"
);
self.lint_buffer.buffer_lint(
PUB_USE_OF_PRIVATE_EXTERN_CRATE,
@@ -1289,7 +1361,7 @@ impl<'a, 'tcx> Resolver<'a, 'tcx> {
UNUSED_IMPORTS,
id,
import.span,
- format!("the item `{}` is imported redundantly", ident),
+ format!("the item `{ident}` is imported redundantly"),
BuiltinLintDiagnostics::RedundantImport(redundant_spans, ident),
);
}
@@ -1300,9 +1372,7 @@ impl<'a, 'tcx> Resolver<'a, 'tcx> {
let ImportKind::Glob { id, is_prelude, .. } = import.kind else { unreachable!() };
let ModuleOrUniformRoot::Module(module) = import.imported_module.get().unwrap() else {
- self.tcx.sess.create_err(CannotGlobImportAllCrates {
- span: import.span,
- }).emit();
+ self.tcx.sess.create_err(CannotGlobImportAllCrates { span: import.span }).emit();
return;
};
@@ -1337,7 +1407,17 @@ impl<'a, 'tcx> Resolver<'a, 'tcx> {
};
if self.is_accessible_from(binding.vis, scope) {
let imported_binding = self.import(binding, import);
- let _ = self.try_define(import.parent_scope.module, key, imported_binding);
+ let warn_ambiguity = self
+ .resolution(import.parent_scope.module, key)
+ .borrow()
+ .binding()
+ .is_some_and(|binding| binding.is_warn_ambiguity());
+ let _ = self.try_define(
+ import.parent_scope.module,
+ key,
+ imported_binding,
+ warn_ambiguity,
+ );
}
}
@@ -1356,7 +1436,8 @@ impl<'a, 'tcx> Resolver<'a, 'tcx> {
module.for_each_child(self, |this, ident, _, binding| {
let res = binding.res().expect_non_local();
- if res != def::Res::Err && !binding.is_ambiguity() {
+ let error_ambiguity = binding.is_ambiguity() && !binding.warn_ambiguity;
+ if res != def::Res::Err && !error_ambiguity {
let mut reexport_chain = SmallVec::new();
let mut next_binding = binding;
while let NameBindingKind::Import { binding, import, .. } = next_binding.kind {
diff --git a/compiler/rustc_resolve/src/late.rs b/compiler/rustc_resolve/src/late.rs
index 90cb312ed..c87db96a5 100644
--- a/compiler/rustc_resolve/src/late.rs
+++ b/compiler/rustc_resolve/src/late.rs
@@ -337,6 +337,7 @@ enum LifetimeBinderKind {
PolyTrait,
WhereBound,
Item,
+ ConstItem,
Function,
Closure,
ImplBlock,
@@ -349,7 +350,7 @@ impl LifetimeBinderKind {
BareFnType => "type",
PolyTrait => "bound",
WhereBound => "bound",
- Item => "item",
+ Item | ConstItem => "item",
ImplBlock => "impl block",
Function => "function",
Closure => "closure",
@@ -469,7 +470,7 @@ impl<'a> PathSource<'a> {
| DefKind::Enum
| DefKind::Trait
| DefKind::TraitAlias
- | DefKind::TyAlias
+ | DefKind::TyAlias { .. }
| DefKind::AssocTy
| DefKind::TyParam
| DefKind::OpaqueTy
@@ -508,7 +509,7 @@ impl<'a> PathSource<'a> {
DefKind::Struct
| DefKind::Union
| DefKind::Variant
- | DefKind::TyAlias
+ | DefKind::TyAlias { .. }
| DefKind::AssocTy,
_,
) | Res::SelfTyParam { .. }
@@ -549,6 +550,7 @@ enum MaybeExported<'a> {
Ok(NodeId),
Impl(Option<DefId>),
ImplItem(Result<DefId, &'a Visibility>),
+ NestedUse(&'a Visibility),
}
impl MaybeExported<'_> {
@@ -559,7 +561,9 @@ impl MaybeExported<'_> {
trait_def_id.as_local()
}
MaybeExported::Impl(None) => return true,
- MaybeExported::ImplItem(Err(vis)) => return vis.kind.is_pub(),
+ MaybeExported::ImplItem(Err(vis)) | MaybeExported::NestedUse(vis) => {
+ return vis.kind.is_pub();
+ }
};
def_id.map_or(true, |def_id| r.effective_visibilities.is_exported(def_id))
}
@@ -900,9 +904,12 @@ impl<'a: 'ast, 'ast, 'tcx> Visitor<'ast> for LateResolutionVisitor<'a, '_, 'ast,
sig.decl.inputs.iter().map(|Param { ty, .. }| (None, &**ty)),
&sig.decl.output,
);
+
+ if let Some((async_node_id, span)) = sig.header.asyncness.opt_return_id() {
+ this.record_lifetime_params_for_impl_trait(async_node_id, span);
+ }
},
);
- self.record_lifetime_params_for_async(fn_id, sig.header.asyncness.opt_return_id());
return;
}
FnKind::Fn(..) => {
@@ -938,12 +945,14 @@ impl<'a: 'ast, 'ast, 'tcx> Visitor<'ast> for LateResolutionVisitor<'a, '_, 'ast,
.iter()
.map(|Param { pat, ty, .. }| (Some(&**pat), &**ty)),
&declaration.output,
- )
+ );
+
+ if let Some((async_node_id, span)) = async_node_id {
+ this.record_lifetime_params_for_impl_trait(async_node_id, span);
+ }
},
);
- this.record_lifetime_params_for_async(fn_id, async_node_id);
-
if let Some(body) = body {
// Ignore errors in function bodies if this is rustdoc
// Be sure not to set this until the function signature has been resolved.
@@ -1361,7 +1370,7 @@ impl<'a: 'ast, 'b, 'ast, 'tcx> LateResolutionVisitor<'a, 'b, 'ast, 'tcx> {
fn visit_generic_params(&mut self, params: &'ast [GenericParam], add_self_upper: bool) {
// For type parameter defaults, we have to ban access
- // to following type parameters, as the InternalSubsts can only
+ // to following type parameters, as the GenericArgs can only
// provide previous type parameters as they're built. We
// put all the parameters on the ban list and then remove
// them one by one as they are processed and become available.
@@ -1690,6 +1699,7 @@ impl<'a: 'ast, 'b, 'ast, 'tcx> LateResolutionVisitor<'a, 'b, 'ast, 'tcx> {
// Leave the responsibility to create the `LocalDefId` to lowering.
let param = self.r.next_node_id();
let res = LifetimeRes::Fresh { param, binder };
+ self.record_lifetime_param(param, res);
// Record the created lifetime parameter so lowering can pick it up and add it to HIR.
self.r
@@ -1730,7 +1740,7 @@ impl<'a: 'ast, 'b, 'ast, 'tcx> LateResolutionVisitor<'a, 'b, 'ast, 'tcx> {
Res::Def(DefKind::Struct, def_id)
| Res::Def(DefKind::Union, def_id)
| Res::Def(DefKind::Enum, def_id)
- | Res::Def(DefKind::TyAlias, def_id)
+ | Res::Def(DefKind::TyAlias { .. }, def_id)
| Res::Def(DefKind::Trait, def_id)
if i + 1 == proj_start =>
{
@@ -1913,10 +1923,7 @@ impl<'a: 'ast, 'b, 'ast, 'tcx> LateResolutionVisitor<'a, 'b, 'ast, 'tcx> {
candidate: LifetimeElisionCandidate,
) {
if let Some(prev_res) = self.r.lifetimes_res_map.insert(id, res) {
- panic!(
- "lifetime {:?} resolved multiple times ({:?} before, {:?} now)",
- id, prev_res, res
- )
+ panic!("lifetime {id:?} resolved multiple times ({prev_res:?} before, {res:?} now)")
}
match res {
LifetimeRes::Param { .. } | LifetimeRes::Fresh { .. } | LifetimeRes::Static => {
@@ -1932,8 +1939,7 @@ impl<'a: 'ast, 'b, 'ast, 'tcx> LateResolutionVisitor<'a, 'b, 'ast, 'tcx> {
fn record_lifetime_param(&mut self, id: NodeId, res: LifetimeRes) {
if let Some(prev_res) = self.r.lifetimes_res_map.insert(id, res) {
panic!(
- "lifetime parameter {:?} resolved multiple times ({:?} before, {:?} now)",
- id, prev_res, res
+ "lifetime parameter {id:?} resolved multiple times ({prev_res:?} before, {res:?} now)"
)
}
}
@@ -2284,7 +2290,7 @@ impl<'a: 'ast, 'b, 'ast, 'tcx> LateResolutionVisitor<'a, 'b, 'ast, 'tcx> {
fn resolve_item(&mut self, item: &'ast Item) {
let mod_inner_docs =
matches!(item.kind, ItemKind::Mod(..)) && rustdoc::inner_docs(&item.attrs);
- if !mod_inner_docs && !matches!(item.kind, ItemKind::Impl(..)) {
+ if !mod_inner_docs && !matches!(item.kind, ItemKind::Impl(..) | ItemKind::Use(..)) {
self.resolve_doc_links(&item.attrs, MaybeExported::Ok(item.id));
}
@@ -2401,33 +2407,53 @@ impl<'a: 'ast, 'b, 'ast, 'tcx> LateResolutionVisitor<'a, 'b, 'ast, 'tcx> {
});
}
- ItemKind::Static(box ast::StaticItem { ref ty, ref expr, .. })
- | ItemKind::Const(box ast::ConstItem { ref ty, ref expr, .. }) => {
+ ItemKind::Static(box ast::StaticItem { ref ty, ref expr, .. }) => {
self.with_static_rib(|this| {
this.with_lifetime_rib(LifetimeRibKind::Elided(LifetimeRes::Static), |this| {
this.visit_ty(ty);
});
- this.with_lifetime_rib(LifetimeRibKind::Elided(LifetimeRes::Infer), |this| {
+ if let Some(expr) = expr {
+ // We already forbid generic params because of the above item rib,
+ // so it doesn't matter whether this is a trivial constant.
+ this.resolve_const_body(expr, Some((item.ident, ConstantItemKind::Static)));
+ }
+ });
+ }
+
+ ItemKind::Const(box ast::ConstItem { ref generics, ref ty, ref expr, .. }) => {
+ self.with_generic_param_rib(
+ &generics.params,
+ RibKind::Item(HasGenericParams::Yes(generics.span)),
+ LifetimeRibKind::Generics {
+ binder: item.id,
+ kind: LifetimeBinderKind::ConstItem,
+ span: generics.span,
+ },
+ |this| {
+ this.visit_generics(generics);
+
+ this.with_lifetime_rib(
+ LifetimeRibKind::Elided(LifetimeRes::Static),
+ |this| this.visit_ty(ty),
+ );
+
if let Some(expr) = expr {
- let constant_item_kind = match item.kind {
- ItemKind::Const(..) => ConstantItemKind::Const,
- ItemKind::Static(..) => ConstantItemKind::Static,
- _ => unreachable!(),
- };
- // We already forbid generic params because of the above item rib,
- // so it doesn't matter whether this is a trivial constant.
- this.with_constant_rib(
- IsRepeatExpr::No,
- ConstantHasGenerics::Yes,
- Some((item.ident, constant_item_kind)),
- |this| this.visit_expr(expr),
+ this.resolve_const_body(
+ expr,
+ Some((item.ident, ConstantItemKind::Const)),
);
}
- });
- });
+ },
+ );
}
ItemKind::Use(ref use_tree) => {
+ let maybe_exported = match use_tree.kind {
+ UseTreeKind::Simple(_) | UseTreeKind::Glob => MaybeExported::Ok(item.id),
+ UseTreeKind::Nested(_) => MaybeExported::NestedUse(&item.vis),
+ };
+ self.resolve_doc_links(&item.attrs, maybe_exported);
+
self.future_proof_import(use_tree);
}
@@ -2460,8 +2486,11 @@ impl<'a: 'ast, 'b, 'ast, 'tcx> LateResolutionVisitor<'a, 'b, 'ast, 'tcx> {
F: FnOnce(&mut Self),
{
debug!("with_generic_param_rib");
- let LifetimeRibKind::Generics { binder, span: generics_span, kind: generics_kind, .. }
- = lifetime_kind else { panic!() };
+ let LifetimeRibKind::Generics { binder, span: generics_span, kind: generics_kind, .. } =
+ lifetime_kind
+ else {
+ panic!()
+ };
let mut function_type_rib = Rib::new(kind);
let mut function_value_rib = Rib::new(kind);
@@ -2566,7 +2595,7 @@ impl<'a: 'ast, 'b, 'ast, 'tcx> LateResolutionVisitor<'a, 'b, 'ast, 'tcx> {
let res = match kind {
RibKind::Item(..) | RibKind::AssocItem => Res::Def(def_kind, def_id.to_def_id()),
RibKind::Normal => {
- if self.r.tcx.sess.features_untracked().non_lifetime_binders {
+ if self.r.tcx.features().non_lifetime_binders {
Res::Def(def_kind, def_id.to_def_id())
} else {
Res::Err
@@ -2688,28 +2717,31 @@ impl<'a: 'ast, 'b, 'ast, 'tcx> LateResolutionVisitor<'a, 'b, 'ast, 'tcx> {
for item in trait_items {
self.resolve_doc_links(&item.attrs, MaybeExported::Ok(item.id));
match &item.kind {
- AssocItemKind::Const(box ast::ConstItem { ty, expr, .. }) => {
- self.visit_ty(ty);
- // Only impose the restrictions of `ConstRibKind` for an
- // actual constant expression in a provided default.
- if let Some(expr) = expr {
- // We allow arbitrary const expressions inside of associated consts,
- // even if they are potentially not const evaluatable.
- //
- // Type parameters can already be used and as associated consts are
- // not used as part of the type system, this is far less surprising.
- self.with_lifetime_rib(
- LifetimeRibKind::Elided(LifetimeRes::Infer),
- |this| {
- this.with_constant_rib(
- IsRepeatExpr::No,
- ConstantHasGenerics::Yes,
- None,
- |this| this.visit_expr(expr),
- )
- },
- );
- }
+ AssocItemKind::Const(box ast::ConstItem { generics, ty, expr, .. }) => {
+ self.with_generic_param_rib(
+ &generics.params,
+ RibKind::AssocItem,
+ LifetimeRibKind::Generics {
+ binder: item.id,
+ span: generics.span,
+ kind: LifetimeBinderKind::ConstItem,
+ },
+ |this| {
+ this.visit_generics(generics);
+ this.visit_ty(ty);
+
+ // Only impose the restrictions of `ConstRibKind` for an
+ // actual constant expression in a provided default.
+ if let Some(expr) = expr {
+ // We allow arbitrary const expressions inside of associated consts,
+ // even if they are potentially not const evaluatable.
+ //
+ // Type parameters can already be used and as associated consts are
+ // not used as part of the type system, this is far less surprising.
+ this.resolve_const_body(expr, None);
+ }
+ },
+ );
}
AssocItemKind::Fn(box Fn { generics, .. }) => {
walk_assoc_item(self, generics, LifetimeBinderKind::Function, item);
@@ -2864,36 +2896,42 @@ impl<'a: 'ast, 'b, 'ast, 'tcx> LateResolutionVisitor<'a, 'b, 'ast, 'tcx> {
use crate::ResolutionError::*;
self.resolve_doc_links(&item.attrs, MaybeExported::ImplItem(trait_id.ok_or(&item.vis)));
match &item.kind {
- AssocItemKind::Const(box ast::ConstItem { ty, expr, .. }) => {
+ AssocItemKind::Const(box ast::ConstItem { generics, ty, expr, .. }) => {
debug!("resolve_implementation AssocItemKind::Const");
- // If this is a trait impl, ensure the const
- // exists in trait
- self.check_trait_item(
- item.id,
- item.ident,
- &item.kind,
- ValueNS,
- item.span,
- seen_trait_items,
- |i, s, c| ConstNotMemberOfTrait(i, s, c),
- );
- self.visit_ty(ty);
- if let Some(expr) = expr {
- // We allow arbitrary const expressions inside of associated consts,
- // even if they are potentially not const evaluatable.
- //
- // Type parameters can already be used and as associated consts are
- // not used as part of the type system, this is far less surprising.
- self.with_lifetime_rib(LifetimeRibKind::Elided(LifetimeRes::Infer), |this| {
- this.with_constant_rib(
- IsRepeatExpr::No,
- ConstantHasGenerics::Yes,
- None,
- |this| this.visit_expr(expr),
- )
- });
- }
+ self.with_generic_param_rib(
+ &generics.params,
+ RibKind::AssocItem,
+ LifetimeRibKind::Generics {
+ binder: item.id,
+ span: generics.span,
+ kind: LifetimeBinderKind::ConstItem,
+ },
+ |this| {
+ // If this is a trait impl, ensure the const
+ // exists in trait
+ this.check_trait_item(
+ item.id,
+ item.ident,
+ &item.kind,
+ ValueNS,
+ item.span,
+ seen_trait_items,
+ |i, s, c| ConstNotMemberOfTrait(i, s, c),
+ );
+
+ this.visit_generics(generics);
+ this.visit_ty(ty);
+ if let Some(expr) = expr {
+ // We allow arbitrary const expressions inside of associated consts,
+ // even if they are potentially not const evaluatable.
+ //
+ // Type parameters can already be used and as associated consts are
+ // not used as part of the type system, this is far less surprising.
+ this.resolve_const_body(expr, None);
+ }
+ },
+ );
}
AssocItemKind::Fn(box Fn { generics, .. }) => {
debug!("resolve_implementation AssocItemKind::Fn");
@@ -2972,7 +3010,9 @@ impl<'a: 'ast, 'b, 'ast, 'tcx> LateResolutionVisitor<'a, 'b, 'ast, 'tcx> {
F: FnOnce(Ident, String, Option<Symbol>) -> ResolutionError<'a>,
{
// If there is a TraitRef in scope for an impl, then the method must be in the trait.
- let Some((module, _)) = self.current_trait_ref else { return; };
+ let Some((module, _)) = self.current_trait_ref else {
+ return;
+ };
ident.span.normalize_to_macros_2_0_and_adjust(module.expansion);
let key = BindingKey::new(ident, ns);
let mut binding = self.r.resolution(module, key).try_borrow().ok().and_then(|r| r.binding);
@@ -3049,6 +3089,14 @@ impl<'a: 'ast, 'b, 'ast, 'tcx> LateResolutionVisitor<'a, 'b, 'ast, 'tcx> {
);
}
+ fn resolve_const_body(&mut self, expr: &'ast Expr, item: Option<(Ident, ConstantItemKind)>) {
+ self.with_lifetime_rib(LifetimeRibKind::Elided(LifetimeRes::Infer), |this| {
+ this.with_constant_rib(IsRepeatExpr::No, ConstantHasGenerics::Yes, item, |this| {
+ this.visit_expr(expr)
+ });
+ })
+ }
+
fn resolve_params(&mut self, params: &'ast [Param]) {
let mut bindings = smallvec![(PatBoundCtx::Product, Default::default())];
self.with_lifetime_rib(LifetimeRibKind::Elided(LifetimeRes::Infer), |this| {
@@ -3503,7 +3551,7 @@ impl<'a: 'ast, 'b, 'ast, 'tcx> LateResolutionVisitor<'a, 'b, 'ast, 'tcx> {
let report_errors = |this: &mut Self, res: Option<Res>| {
if this.should_report_errs() {
let (err, candidates) =
- this.smart_resolve_report_errors(path, path, path_span, source, res);
+ this.smart_resolve_report_errors(path, None, path_span, source, res);
let def_id = this.parent_scope.module.nearest_parent_mod();
let instead = res.is_some();
@@ -3555,14 +3603,14 @@ impl<'a: 'ast, 'b, 'ast, 'tcx> LateResolutionVisitor<'a, 'b, 'ast, 'tcx> {
// Before we start looking for candidates, we have to get our hands
// on the type user is trying to perform invocation on; basically:
// we're transforming `HashMap::new` into just `HashMap`.
- let prefix_path = match path.split_last() {
- Some((_, path)) if !path.is_empty() => path,
+ let (following_seg, prefix_path) = match path.split_last() {
+ Some((last, path)) if !path.is_empty() => (Some(last), path),
_ => return Some(parent_err),
};
let (mut err, candidates) = this.smart_resolve_report_errors(
prefix_path,
- path,
+ following_seg,
path_span,
PathSource::Type,
None,
@@ -3902,12 +3950,14 @@ impl<'a: 'ast, 'b, 'ast, 'tcx> LateResolutionVisitor<'a, 'b, 'ast, 'tcx> {
if path.len() > 1
&& let Some(res) = result.full_res()
+ && let Some((&last_segment, prev_segs)) = path.split_last()
+ && prev_segs.iter().all(|seg| !seg.has_generic_args)
&& res != Res::Err
&& path[0].ident.name != kw::PathRoot
&& path[0].ident.name != kw::DollarCrate
{
let unqualified_result = {
- match self.resolve_path(&[*path.last().unwrap()], Some(ns), None) {
+ match self.resolve_path(&[last_segment], Some(ns), None) {
PathResult::NonModule(path_res) => path_res.expect_full_res(),
PathResult::Module(ModuleOrUniformRoot::Module(module)) => {
module.res().unwrap()
@@ -3917,11 +3967,14 @@ impl<'a: 'ast, 'b, 'ast, 'tcx> LateResolutionVisitor<'a, 'b, 'ast, 'tcx> {
};
if res == unqualified_result {
let lint = lint::builtin::UNUSED_QUALIFICATIONS;
- self.r.lint_buffer.buffer_lint(
+ self.r.lint_buffer.buffer_lint_with_diagnostic(
lint,
finalize.node_id,
finalize.path_span,
"unnecessary qualification",
+ lint::BuiltinLintDiagnostics::UnusedQualifications {
+ removal_span: finalize.path_span.until(last_segment.ident.span),
+ }
)
}
}
@@ -4223,7 +4276,7 @@ impl<'a: 'ast, 'b, 'ast, 'tcx> LateResolutionVisitor<'a, 'b, 'ast, 'tcx> {
ExprKind::ConstBlock(ref ct) => {
self.resolve_anon_const(ct, AnonConstKind::InlineConst);
}
- ExprKind::Index(ref elem, ref idx) => {
+ ExprKind::Index(ref elem, ref idx, _) => {
self.resolve_expr(elem, Some(expr));
self.visit_expr(idx);
}
@@ -4279,39 +4332,32 @@ impl<'a: 'ast, 'b, 'ast, 'tcx> LateResolutionVisitor<'a, 'b, 'ast, 'tcx> {
)
}
- /// Construct the list of in-scope lifetime parameters for async lowering.
+ /// Construct the list of in-scope lifetime parameters for impl trait lowering.
/// We include all lifetime parameters, either named or "Fresh".
/// The order of those parameters does not matter, as long as it is
/// deterministic.
- fn record_lifetime_params_for_async(
- &mut self,
- fn_id: NodeId,
- async_node_id: Option<(NodeId, Span)>,
- ) {
- if let Some((async_node_id, span)) = async_node_id {
- let mut extra_lifetime_params =
- self.r.extra_lifetime_params_map.get(&fn_id).cloned().unwrap_or_default();
- for rib in self.lifetime_ribs.iter().rev() {
- extra_lifetime_params.extend(
- rib.bindings.iter().map(|(&ident, &(node_id, res))| (ident, node_id, res)),
- );
- match rib.kind {
- LifetimeRibKind::Item => break,
- LifetimeRibKind::AnonymousCreateParameter { binder, .. } => {
- if let Some(earlier_fresh) = self.r.extra_lifetime_params_map.get(&binder) {
- extra_lifetime_params.extend(earlier_fresh);
- }
- }
- LifetimeRibKind::Generics { .. } => {}
- _ => {
- // We are in a function definition. We should only find `Generics`
- // and `AnonymousCreateParameter` inside the innermost `Item`.
- span_bug!(span, "unexpected rib kind: {:?}", rib.kind)
+ fn record_lifetime_params_for_impl_trait(&mut self, impl_trait_node_id: NodeId, span: Span) {
+ let mut extra_lifetime_params = vec![];
+
+ for rib in self.lifetime_ribs.iter().rev() {
+ extra_lifetime_params
+ .extend(rib.bindings.iter().map(|(&ident, &(node_id, res))| (ident, node_id, res)));
+ match rib.kind {
+ LifetimeRibKind::Item => break,
+ LifetimeRibKind::AnonymousCreateParameter { binder, .. } => {
+ if let Some(earlier_fresh) = self.r.extra_lifetime_params_map.get(&binder) {
+ extra_lifetime_params.extend(earlier_fresh);
}
}
+ LifetimeRibKind::Generics { .. } => {}
+ _ => {
+ // We are in a function definition. We should only find `Generics`
+ // and `AnonymousCreateParameter` inside the innermost `Item`.
+ span_bug!(span, "unexpected rib kind: {:?}", rib.kind)
+ }
}
- self.r.extra_lifetime_params_map.insert(async_node_id, extra_lifetime_params);
}
+ self.r.extra_lifetime_params_map.insert(impl_trait_node_id, extra_lifetime_params);
}
fn resolve_and_cache_rustdoc_path(&mut self, path_str: &str, ns: Namespace) -> Option<Res> {
@@ -4328,7 +4374,7 @@ impl<'a: 'ast, 'b, 'ast, 'tcx> LateResolutionVisitor<'a, 'b, 'ast, 'tcx> {
if let Some(res) = res
&& let Some(def_id) = res.opt_def_id()
&& !def_id.is_local()
- && self.r.tcx.sess.crate_types().contains(&CrateType::ProcMacro)
+ && self.r.tcx.crate_types().contains(&CrateType::ProcMacro)
&& matches!(self.r.tcx.sess.opts.resolve_doc_links, ResolveDocLinks::ExportedMetadata) {
// Encoding foreign def ids in proc macro crate metadata will ICE.
return None;
@@ -4343,7 +4389,7 @@ impl<'a: 'ast, 'b, 'ast, 'tcx> LateResolutionVisitor<'a, 'b, 'ast, 'tcx> {
match self.r.tcx.sess.opts.resolve_doc_links {
ResolveDocLinks::None => return,
ResolveDocLinks::ExportedMetadata
- if !self.r.tcx.sess.crate_types().iter().copied().any(CrateType::has_metadata)
+ if !self.r.tcx.crate_types().iter().copied().any(CrateType::has_metadata)
|| !maybe_exported.eval(self.r) =>
{
return;
@@ -4402,7 +4448,7 @@ impl<'a: 'ast, 'b, 'ast, 'tcx> LateResolutionVisitor<'a, 'b, 'ast, 'tcx> {
.into_iter()
.filter_map(|tr| {
if !tr.def_id.is_local()
- && self.r.tcx.sess.crate_types().contains(&CrateType::ProcMacro)
+ && self.r.tcx.crate_types().contains(&CrateType::ProcMacro)
&& matches!(
self.r.tcx.sess.opts.resolve_doc_links,
ResolveDocLinks::ExportedMetadata
@@ -4430,6 +4476,7 @@ impl<'ast> Visitor<'ast> for LifetimeCountVisitor<'_, '_, '_> {
fn visit_item(&mut self, item: &'ast Item) {
match &item.kind {
ItemKind::TyAlias(box TyAlias { ref generics, .. })
+ | ItemKind::Const(box ConstItem { ref generics, .. })
| ItemKind::Fn(box Fn { ref generics, .. })
| ItemKind::Enum(_, ref generics)
| ItemKind::Struct(_, ref generics)
@@ -4449,7 +4496,6 @@ impl<'ast> Visitor<'ast> for LifetimeCountVisitor<'_, '_, '_> {
ItemKind::Mod(..)
| ItemKind::ForeignMod(..)
| ItemKind::Static(..)
- | ItemKind::Const(..)
| ItemKind::Use(..)
| ItemKind::ExternCrate(..)
| ItemKind::MacroDef(..)
diff --git a/compiler/rustc_resolve/src/late/diagnostics.rs b/compiler/rustc_resolve/src/late/diagnostics.rs
index c0e3f1aaf..c34b7df9b 100644
--- a/compiler/rustc_resolve/src/late/diagnostics.rs
+++ b/compiler/rustc_resolve/src/late/diagnostics.rs
@@ -332,15 +332,11 @@ impl<'a: 'ast, 'ast, 'tcx> LateResolutionVisitor<'a, '_, 'ast, 'tcx> {
pub(crate) fn smart_resolve_partial_mod_path_errors(
&mut self,
prefix_path: &[Segment],
- path: &[Segment],
+ following_seg: Option<&Segment>,
) -> Vec<ImportSuggestion> {
- let next_seg = if path.len() >= prefix_path.len() + 1 && prefix_path.len() == 1 {
- path.get(prefix_path.len())
- } else {
- None
- };
if let Some(segment) = prefix_path.last() &&
- let Some(next_seg) = next_seg {
+ let Some(following_seg) = following_seg
+ {
let candidates = self.r.lookup_import_candidates(
segment.ident,
Namespace::TypeNS,
@@ -353,9 +349,10 @@ impl<'a: 'ast, 'ast, 'tcx> LateResolutionVisitor<'a, '_, 'ast, 'tcx> {
.filter(|candidate| {
if let Some(def_id) = candidate.did &&
let Some(module) = self.r.get_module(def_id) {
- self.r.resolutions(module).borrow().iter().any(|(key, _r)| {
- key.ident.name == next_seg.ident.name
- })
+ Some(def_id) != self.parent_scope.module.opt_def_id() &&
+ self.r.resolutions(module).borrow().iter().any(|(key, _r)| {
+ key.ident.name == following_seg.ident.name
+ })
} else {
false
}
@@ -371,7 +368,7 @@ impl<'a: 'ast, 'ast, 'tcx> LateResolutionVisitor<'a, '_, 'ast, 'tcx> {
pub(crate) fn smart_resolve_report_errors(
&mut self,
path: &[Segment],
- full_path: &[Segment],
+ following_seg: Option<&Segment>,
span: Span,
source: PathSource<'_>,
res: Option<Res>,
@@ -412,8 +409,15 @@ impl<'a: 'ast, 'ast, 'tcx> LateResolutionVisitor<'a, '_, 'ast, 'tcx> {
return (err, Vec::new());
}
- let (found, candidates) =
- self.try_lookup_name_relaxed(&mut err, source, path, full_path, span, res, &base_error);
+ let (found, candidates) = self.try_lookup_name_relaxed(
+ &mut err,
+ source,
+ path,
+ following_seg,
+ span,
+ res,
+ &base_error,
+ );
if found {
return (err, candidates);
}
@@ -422,7 +426,7 @@ impl<'a: 'ast, 'ast, 'tcx> LateResolutionVisitor<'a, '_, 'ast, 'tcx> {
// if we have suggested using pattern matching, then don't add needless suggestions
// for typos.
- fallback |= self.suggest_typo(&mut err, source, path, span, &base_error);
+ fallback |= self.suggest_typo(&mut err, source, path, following_seg, span, &base_error);
if fallback {
// Fallback label.
@@ -442,20 +446,29 @@ impl<'a: 'ast, 'ast, 'tcx> LateResolutionVisitor<'a, '_, 'ast, 'tcx> {
err: &mut Diagnostic,
base_error: &BaseError,
) {
- let Some(ty) = self.diagnostic_metadata.current_type_path else { return; };
- let TyKind::Path(_, path) = &ty.kind else { return; };
+ let Some(ty) = self.diagnostic_metadata.current_type_path else {
+ return;
+ };
+ let TyKind::Path(_, path) = &ty.kind else {
+ return;
+ };
for segment in &path.segments {
- let Some(params) = &segment.args else { continue; };
- let ast::GenericArgs::AngleBracketed(ref params) = params.deref() else { continue; };
+ let Some(params) = &segment.args else {
+ continue;
+ };
+ let ast::GenericArgs::AngleBracketed(ref params) = params.deref() else {
+ continue;
+ };
for param in &params.args {
- let ast::AngleBracketedArg::Constraint(constraint) = param else { continue; };
+ let ast::AngleBracketedArg::Constraint(constraint) = param else {
+ continue;
+ };
let ast::AssocConstraintKind::Bound { bounds } = &constraint.kind else {
continue;
};
for bound in bounds {
- let ast::GenericBound::Trait(trait_ref, ast::TraitBoundModifier::None)
- = bound else
- {
+ let ast::GenericBound::Trait(trait_ref, ast::TraitBoundModifier::None) = bound
+ else {
continue;
};
if base_error.span == trait_ref.span {
@@ -519,7 +532,7 @@ impl<'a: 'ast, 'ast, 'tcx> LateResolutionVisitor<'a, '_, 'ast, 'tcx> {
err: &mut Diagnostic,
source: PathSource<'_>,
path: &[Segment],
- full_path: &[Segment],
+ following_seg: Option<&Segment>,
span: Span,
res: Option<Res>,
base_error: &BaseError,
@@ -542,7 +555,6 @@ impl<'a: 'ast, 'ast, 'tcx> LateResolutionVisitor<'a, '_, 'ast, 'tcx> {
}
})
.collect::<Vec<_>>();
- let crate_def_id = CRATE_DEF_ID.to_def_id();
// Try to filter out intrinsics candidates, as long as we have
// some other candidates to suggest.
let intrinsic_candidates: Vec<_> = candidates
@@ -553,8 +565,9 @@ impl<'a: 'ast, 'ast, 'tcx> LateResolutionVisitor<'a, '_, 'ast, 'tcx> {
.collect();
if candidates.is_empty() {
// Put them back if we have no more candidates to suggest...
- candidates.extend(intrinsic_candidates);
+ candidates = intrinsic_candidates;
}
+ let crate_def_id = CRATE_DEF_ID.to_def_id();
if candidates.is_empty() && is_expected(Res::Def(DefKind::Enum, crate_def_id)) {
let mut enum_candidates: Vec<_> = self
.r
@@ -572,13 +585,13 @@ impl<'a: 'ast, 'ast, 'tcx> LateResolutionVisitor<'a, '_, 'ast, 'tcx> {
let others = match enum_candidates.len() {
1 => String::new(),
2 => " and 1 other".to_owned(),
- n => format!(" and {} others", n),
+ n => format!(" and {n} others"),
};
format!("there is an enum variant `{}`{}; ", enum_candidates[0].0, others)
} else {
String::new()
};
- let msg = format!("{}try using the variant's enum", preamble);
+ let msg = format!("{preamble}try using the variant's enum");
err.span_suggestions(
span,
@@ -590,8 +603,9 @@ impl<'a: 'ast, 'ast, 'tcx> LateResolutionVisitor<'a, '_, 'ast, 'tcx> {
}
// Try finding a suitable replacement.
- let typo_sugg =
- self.lookup_typo_candidate(path, source.namespace(), is_expected).to_opt_suggestion();
+ let typo_sugg = self
+ .lookup_typo_candidate(path, following_seg, source.namespace(), is_expected)
+ .to_opt_suggestion();
if path.len() == 1 && self.self_type_is_available() {
if let Some(candidate) =
self.lookup_assoc_candidate(ident, ns, is_expected, source.is_call())
@@ -682,7 +696,7 @@ impl<'a: 'ast, 'ast, 'tcx> LateResolutionVisitor<'a, '_, 'ast, 'tcx> {
ident.name == path[0].ident.name {
err.span_help(
ident.span,
- format!("the binding `{}` is available in a different scope in the same function", path_str),
+ format!("the binding `{path_str}` is available in a different scope in the same function"),
);
return (true, candidates);
}
@@ -690,7 +704,7 @@ impl<'a: 'ast, 'ast, 'tcx> LateResolutionVisitor<'a, '_, 'ast, 'tcx> {
}
if candidates.is_empty() {
- candidates = self.smart_resolve_partial_mod_path_errors(path, full_path);
+ candidates = self.smart_resolve_partial_mod_path_errors(path, following_seg);
}
return (false, candidates);
@@ -776,12 +790,14 @@ impl<'a: 'ast, 'ast, 'tcx> LateResolutionVisitor<'a, '_, 'ast, 'tcx> {
err: &mut Diagnostic,
source: PathSource<'_>,
path: &[Segment],
+ following_seg: Option<&Segment>,
span: Span,
base_error: &BaseError,
) -> bool {
let is_expected = &|res| source.is_expected(res);
let ident_span = path.last().map_or(span, |ident| ident.ident.span);
- let typo_sugg = self.lookup_typo_candidate(path, source.namespace(), is_expected);
+ let typo_sugg =
+ self.lookup_typo_candidate(path, following_seg, source.namespace(), is_expected);
let is_in_same_file = &|sp1, sp2| {
let source_map = self.r.tcx.sess.source_map();
let file1 = source_map.span_to_filename(sp1);
@@ -842,7 +858,7 @@ impl<'a: 'ast, 'ast, 'tcx> LateResolutionVisitor<'a, '_, 'ast, 'tcx> {
for label_rib in &self.label_ribs {
for (label_ident, node_id) in &label_rib.bindings {
let ident = path.last().unwrap().ident;
- if format!("'{}", ident) == label_ident.to_string() {
+ if format!("'{ident}") == label_ident.to_string() {
err.span_label(label_ident.span, "a label with a similar name exists");
if let PathSource::Expr(Some(Expr {
kind: ExprKind::Break(None, Some(_)),
@@ -967,7 +983,7 @@ impl<'a: 'ast, 'ast, 'tcx> LateResolutionVisitor<'a, '_, 'ast, 'tcx> {
if let Some(ident) = fn_kind.ident() {
err.span_label(
ident.span,
- format!("this function {} have a `self` parameter", doesnt),
+ format!("this function {doesnt} have a `self` parameter"),
);
}
}
@@ -1141,10 +1157,14 @@ impl<'a: 'ast, 'ast, 'tcx> LateResolutionVisitor<'a, '_, 'ast, 'tcx> {
&poly_trait_ref.trait_ref.path.segments[..]
{
if ident.span == span {
- let Some(new_where_bound_predicate) = mk_where_bound_predicate(path, poly_trait_ref, ty) else { return false; };
+ let Some(new_where_bound_predicate) =
+ mk_where_bound_predicate(path, poly_trait_ref, ty)
+ else {
+ return false;
+ };
err.span_suggestion_verbose(
*where_span,
- format!("constrain the associated type to `{}`", ident),
+ format!("constrain the associated type to `{ident}`"),
where_bound_predicate_to_string(&new_where_bound_predicate),
Applicability::MaybeIncorrect,
);
@@ -1160,37 +1180,34 @@ impl<'a: 'ast, 'ast, 'tcx> LateResolutionVisitor<'a, '_, 'ast, 'tcx> {
/// return the span of whole call and the span for all arguments expect the first one (`self`).
fn call_has_self_arg(&self, source: PathSource<'_>) -> Option<(Span, Option<Span>)> {
let mut has_self_arg = None;
- if let PathSource::Expr(Some(parent)) = source {
- match &parent.kind {
- ExprKind::Call(_, args) if !args.is_empty() => {
- let mut expr_kind = &args[0].kind;
- loop {
- match expr_kind {
- ExprKind::Path(_, arg_name) if arg_name.segments.len() == 1 => {
- if arg_name.segments[0].ident.name == kw::SelfLower {
- let call_span = parent.span;
- let tail_args_span = if args.len() > 1 {
- Some(Span::new(
- args[1].span.lo(),
- args.last().unwrap().span.hi(),
- call_span.ctxt(),
- None,
- ))
- } else {
- None
- };
- has_self_arg = Some((call_span, tail_args_span));
- }
- break;
+ if let PathSource::Expr(Some(parent)) = source
+ && let ExprKind::Call(_, args) = &parent.kind
+ && !args.is_empty() {
+ let mut expr_kind = &args[0].kind;
+ loop {
+ match expr_kind {
+ ExprKind::Path(_, arg_name) if arg_name.segments.len() == 1 => {
+ if arg_name.segments[0].ident.name == kw::SelfLower {
+ let call_span = parent.span;
+ let tail_args_span = if args.len() > 1 {
+ Some(Span::new(
+ args[1].span.lo(),
+ args.last().unwrap().span.hi(),
+ call_span.ctxt(),
+ None,
+ ))
+ } else {
+ None
+ };
+ has_self_arg = Some((call_span, tail_args_span));
}
- ExprKind::AddrOf(_, _, expr) => expr_kind = &expr.kind,
- _ => break,
+ break;
}
+ ExprKind::AddrOf(_, _, expr) => expr_kind = &expr.kind,
+ _ => break,
}
}
- _ => (),
- }
- };
+ }
has_self_arg
}
@@ -1200,15 +1217,15 @@ impl<'a: 'ast, 'ast, 'tcx> LateResolutionVisitor<'a, '_, 'ast, 'tcx> {
// where a brace being opened means a block is being started. Look
// ahead for the next text to see if `span` is followed by a `{`.
let sm = self.r.tcx.sess.source_map();
- let sp = sm.span_look_ahead(span, None, Some(50));
- let followed_by_brace = matches!(sm.span_to_snippet(sp), Ok(ref snippet) if snippet == "{");
- // In case this could be a struct literal that needs to be surrounded
- // by parentheses, find the appropriate span.
- let closing_span = sm.span_look_ahead(span, Some("}"), Some(50));
- let closing_brace: Option<Span> = sm
- .span_to_snippet(closing_span)
- .map_or(None, |s| if s == "}" { Some(span.to(closing_span)) } else { None });
- (followed_by_brace, closing_brace)
+ if let Some(followed_brace_span) = sm.span_look_ahead(span, "{", Some(50)) {
+ // In case this could be a struct literal that needs to be surrounded
+ // by parentheses, find the appropriate span.
+ let close_brace_span = sm.span_look_ahead(followed_brace_span, "}", Some(50));
+ let closing_brace = close_brace_span.map(|sp| span.to(sp));
+ (true, closing_brace)
+ } else {
+ (false, None)
+ }
}
/// Provides context-dependent help for errors reported by the `smart_resolve_path_fragment`
@@ -1318,8 +1335,7 @@ impl<'a: 'ast, 'ast, 'tcx> LateResolutionVisitor<'a, '_, 'ast, 'tcx> {
span, // Note the parentheses surrounding the suggestion below
format!(
"you might want to surround a struct literal with parentheses: \
- `({} {{ /* fields */ }})`?",
- path_str
+ `({path_str} {{ /* fields */ }})`?"
),
);
}
@@ -1353,7 +1369,7 @@ impl<'a: 'ast, 'ast, 'tcx> LateResolutionVisitor<'a, '_, 'ast, 'tcx> {
.map(|(idx, new)| (new, old_fields.get(idx)))
.map(|(new, old)| {
let new = new.to_ident_string();
- if let Some(Some(old)) = old && new != *old { format!("{}: {}", new, old) } else { new }
+ if let Some(Some(old)) = old && new != *old { format!("{new}: {old}") } else { new }
})
.collect::<Vec<String>>()
} else {
@@ -1370,7 +1386,7 @@ impl<'a: 'ast, 'ast, 'tcx> LateResolutionVisitor<'a, '_, 'ast, 'tcx> {
};
err.span_suggestion(
span,
- format!("use struct {} syntax instead", descr),
+ format!("use struct {descr} syntax instead"),
format!("{path_str} {{{pad}{fields}{pad}}}"),
applicability,
);
@@ -1403,7 +1419,7 @@ impl<'a: 'ast, 'ast, 'tcx> LateResolutionVisitor<'a, '_, 'ast, 'tcx> {
(Res::Def(DefKind::Macro(MacroKind::Bang), _), _) => {
err.span_label(span, fallback_label.to_string());
}
- (Res::Def(DefKind::TyAlias, def_id), PathSource::Trait(_)) => {
+ (Res::Def(DefKind::TyAlias { .. }, def_id), PathSource::Trait(_)) => {
err.span_label(span, "type aliases cannot be used as traits");
if self.r.tcx.sess.is_nightly_build() {
let msg = "you might have meant to use `#![feature(trait_alias)]` instead of a \
@@ -1564,7 +1580,7 @@ impl<'a: 'ast, 'ast, 'tcx> LateResolutionVisitor<'a, '_, 'ast, 'tcx> {
err.span_suggestion(
span,
"use the tuple variant pattern syntax instead",
- format!("{}({})", path_str, fields),
+ format!("{path_str}({fields})"),
Applicability::HasPlaceholders,
);
}
@@ -1572,7 +1588,7 @@ impl<'a: 'ast, 'ast, 'tcx> LateResolutionVisitor<'a, '_, 'ast, 'tcx> {
err.span_label(span, fallback_label.to_string());
err.note("can't use `Self` as a constructor, you must use the implemented struct");
}
- (Res::Def(DefKind::TyAlias | DefKind::AssocTy, _), _) if ns == ValueNS => {
+ (Res::Def(DefKind::TyAlias { .. } | DefKind::AssocTy, _), _) if ns == ValueNS => {
err.note("can't use a type alias as a constructor");
}
_ => return false,
@@ -1715,6 +1731,7 @@ impl<'a: 'ast, 'ast, 'tcx> LateResolutionVisitor<'a, '_, 'ast, 'tcx> {
fn lookup_typo_candidate(
&mut self,
path: &[Segment],
+ following_seg: Option<&Segment>,
ns: Namespace,
filter_fn: &impl Fn(Res) -> bool,
) -> TypoCandidate {
@@ -1793,6 +1810,26 @@ impl<'a: 'ast, 'ast, 'tcx> LateResolutionVisitor<'a, '_, 'ast, 'tcx> {
}
}
+ // if next_seg is present, let's filter everything that does not continue the path
+ if let Some(following_seg) = following_seg {
+ names.retain(|suggestion| match suggestion.res {
+ Res::Def(DefKind::Struct | DefKind::Enum | DefKind::Union, _) => {
+ // FIXME: this is not totally accurate, but mostly works
+ suggestion.candidate != following_seg.ident.name
+ }
+ Res::Def(DefKind::Mod, def_id) => self.r.get_module(def_id).map_or_else(
+ || false,
+ |module| {
+ self.r
+ .resolutions(module)
+ .borrow()
+ .iter()
+ .any(|(key, _)| key.ident.name == following_seg.ident.name)
+ },
+ ),
+ _ => true,
+ });
+ }
let name = path[path.len() - 1].ident.name;
// Make sure error reporting is deterministic.
names.sort_by(|a, b| a.candidate.as_str().cmp(b.candidate.as_str()));
@@ -1803,7 +1840,8 @@ impl<'a: 'ast, 'ast, 'tcx> LateResolutionVisitor<'a, '_, 'ast, 'tcx> {
None,
) {
Some(found) => {
- let Some(sugg) = names.into_iter().find(|suggestion| suggestion.candidate == found) else {
+ let Some(sugg) = names.into_iter().find(|suggestion| suggestion.candidate == found)
+ else {
return TypoCandidate::None;
};
if found == name {
@@ -1952,9 +1990,9 @@ impl<'a: 'ast, 'ast, 'tcx> LateResolutionVisitor<'a, '_, 'ast, 'tcx> {
if !suggestable_variants.is_empty() {
let msg = if non_suggestable_variant_count == 0 && suggestable_variants.len() == 1 {
- format!("try {} the enum's variant", source_msg)
+ format!("try {source_msg} the enum's variant")
} else {
- format!("try {} one of the enum's variants", source_msg)
+ format!("try {source_msg} one of the enum's variants")
};
err.span_suggestions(
@@ -1967,19 +2005,15 @@ impl<'a: 'ast, 'ast, 'tcx> LateResolutionVisitor<'a, '_, 'ast, 'tcx> {
// If the enum has no tuple variants..
if non_suggestable_variant_count == variants.len() {
- err.help(format!("the enum has no tuple variants {}", source_msg));
+ err.help(format!("the enum has no tuple variants {source_msg}"));
}
// If there are also non-tuple variants..
if non_suggestable_variant_count == 1 {
- err.help(format!(
- "you might have meant {} the enum's non-tuple variant",
- source_msg
- ));
+ err.help(format!("you might have meant {source_msg} the enum's non-tuple variant"));
} else if non_suggestable_variant_count >= 1 {
err.help(format!(
- "you might have meant {} one of the enum's non-tuple variants",
- source_msg
+ "you might have meant {source_msg} one of the enum's non-tuple variants"
));
}
} else {
@@ -1999,7 +2033,7 @@ impl<'a: 'ast, 'ast, 'tcx> LateResolutionVisitor<'a, '_, 'ast, 'tcx> {
.map(|(variant, _, kind)| (path_names_to_string(variant), kind))
.map(|(variant, kind)| match kind {
CtorKind::Const => variant,
- CtorKind::Fn => format!("({}())", variant),
+ CtorKind::Fn => format!("({variant}())"),
})
.collect::<Vec<_>>();
let no_suggestable_variant = suggestable_variants.is_empty();
@@ -2024,7 +2058,7 @@ impl<'a: 'ast, 'ast, 'tcx> LateResolutionVisitor<'a, '_, 'ast, 'tcx> {
.filter(|(_, def_id, kind)| needs_placeholder(*def_id, *kind))
.map(|(variant, _, kind)| (path_names_to_string(variant), kind))
.filter_map(|(variant, kind)| match kind {
- CtorKind::Fn => Some(format!("({}(/* fields */))", variant)),
+ CtorKind::Fn => Some(format!("({variant}(/* fields */))")),
_ => None,
})
.collect::<Vec<_>>();
@@ -2306,13 +2340,20 @@ impl<'a: 'ast, 'ast, 'tcx> LateResolutionVisitor<'a, '_, 'ast, 'tcx> {
let mut should_continue = true;
match rib.kind {
LifetimeRibKind::Generics { binder: _, span, kind } => {
+ // Avoid suggesting placing lifetime parameters on constant items unless the relevant
+ // feature is enabled. Suggest the parent item as a possible location if applicable.
+ if let LifetimeBinderKind::ConstItem = kind
+ && !self.r.tcx().features().generic_const_items
+ {
+ continue;
+ }
+
if !span.can_be_used_for_suggestions() && suggest_note && let Some(name) = name {
suggest_note = false; // Avoid displaying the same help multiple times.
err.span_label(
span,
format!(
- "lifetime `{}` is missing in item created through this procedural macro",
- name,
+ "lifetime `{name}` is missing in item created through this procedural macro",
),
);
continue;
@@ -2356,14 +2397,14 @@ impl<'a: 'ast, 'ast, 'tcx> LateResolutionVisitor<'a, '_, 'ast, 'tcx> {
);
} else if let Some(name) = name {
let message =
- Cow::from(format!("consider introducing lifetime `{}` here", name));
+ Cow::from(format!("consider introducing lifetime `{name}` here"));
should_continue = suggest(err, false, span, message, sugg);
} else {
let message = Cow::from("consider introducing a named lifetime parameter");
should_continue = suggest(err, false, span, message, sugg);
}
}
- LifetimeRibKind::Item => break,
+ LifetimeRibKind::Item | LifetimeRibKind::ConstParamTy => break,
_ => {}
}
if !should_continue {
@@ -2469,7 +2510,9 @@ impl<'a: 'ast, 'ast, 'tcx> LateResolutionVisitor<'a, '_, 'ast, 'tcx> {
.lifetime_ribs
.iter()
.rev()
- .take_while(|rib| !matches!(rib.kind, LifetimeRibKind::Item))
+ .take_while(|rib| {
+ !matches!(rib.kind, LifetimeRibKind::Item | LifetimeRibKind::ConstParamTy)
+ })
.flat_map(|rib| rib.bindings.iter())
.map(|(&ident, &res)| (ident, res))
.filter(|(ident, _)| ident.name != kw::UnderscoreLifetime)
@@ -2500,7 +2543,7 @@ impl<'a: 'ast, 'ast, 'tcx> LateResolutionVisitor<'a, '_, 'ast, 'tcx> {
}
let help_name = if let Some(ident) = ident {
- format!("`{}`", ident)
+ format!("`{ident}`")
} else {
format!("argument {}", index + 1)
};
@@ -2508,7 +2551,7 @@ impl<'a: 'ast, 'ast, 'tcx> LateResolutionVisitor<'a, '_, 'ast, 'tcx> {
if lifetime_count == 1 {
m.push_str(&help_name[..])
} else {
- m.push_str(&format!("one of {}'s {} lifetimes", help_name, lifetime_count)[..])
+ m.push_str(&format!("one of {help_name}'s {lifetime_count} lifetimes")[..])
}
}
@@ -2538,14 +2581,12 @@ impl<'a: 'ast, 'ast, 'tcx> LateResolutionVisitor<'a, '_, 'ast, 'tcx> {
} else if num_params == 1 {
err.help(format!(
"this function's return type contains a borrowed value, \
- but the signature does not say which {} it is borrowed from",
- m
+ but the signature does not say which {m} it is borrowed from"
));
} else {
err.help(format!(
"this function's return type contains a borrowed value, \
- but the signature does not say whether it is borrowed from {}",
- m
+ but the signature does not say whether it is borrowed from {m}"
));
}
}
@@ -2564,7 +2605,7 @@ impl<'a: 'ast, 'ast, 'tcx> LateResolutionVisitor<'a, '_, 'ast, 'tcx> {
}
MissingLifetimeKind::Ampersand => {
debug_assert_eq!(lt.count, 1);
- (lt.span.shrink_to_hi(), format!("{} ", existing_name))
+ (lt.span.shrink_to_hi(), format!("{existing_name} "))
}
MissingLifetimeKind::Comma => {
let sugg: String = std::iter::repeat([existing_name.as_str(), ", "])
@@ -2611,7 +2652,7 @@ impl<'a: 'ast, 'ast, 'tcx> LateResolutionVisitor<'a, '_, 'ast, 'tcx> {
}
1 => {
err.multipart_suggestion_verbose(
- format!("consider using the `{}` lifetime", existing_name),
+ format!("consider using the `{existing_name}` lifetime"),
spans_suggs,
Applicability::MaybeIncorrect,
);
@@ -2649,7 +2690,9 @@ fn mk_where_bound_predicate(
use rustc_span::DUMMY_SP;
let modified_segments = {
let mut segments = path.segments.clone();
- let [preceding @ .., second_last, last] = segments.as_mut_slice() else { return None; };
+ let [preceding @ .., second_last, last] = segments.as_mut_slice() else {
+ return None;
+ };
let mut segments = ThinVec::from(preceding);
let added_constraint = ast::AngleBracketedArg::Constraint(ast::AssocConstraint {
@@ -2726,9 +2769,9 @@ pub(super) fn signal_label_shadowing(sess: &Session, orig: Span, shadower: Ident
let shadower = shadower.span;
let mut err = sess.struct_span_warn(
shadower,
- format!("label name `{}` shadows a label name that is already in scope", name),
+ format!("label name `{name}` shadows a label name that is already in scope"),
);
err.span_label(orig, "first declared here");
- err.span_label(shadower, format!("label `{}` already in scope", name));
+ err.span_label(shadower, format!("label `{name}` already in scope"));
err.emit();
}
diff --git a/compiler/rustc_resolve/src/lib.rs b/compiler/rustc_resolve/src/lib.rs
index da3d86a47..76e54e60d 100644
--- a/compiler/rustc_resolve/src/lib.rs
+++ b/compiler/rustc_resolve/src/lib.rs
@@ -18,6 +18,7 @@
#![recursion_limit = "256"]
#![allow(rustdoc::private_intra_doc_links)]
#![allow(rustc::potential_query_instability)]
+#![cfg_attr(not(bootstrap), allow(internal_features))]
#[macro_use]
extern crate tracing;
@@ -658,6 +659,7 @@ impl<'a> fmt::Debug for Module<'a> {
struct NameBindingData<'a> {
kind: NameBindingKind<'a>,
ambiguity: Option<(NameBinding<'a>, AmbiguityKind)>,
+ warn_ambiguity: bool,
expansion: LocalExpnId,
span: Span,
vis: ty::Visibility<DefId>,
@@ -767,6 +769,7 @@ struct AmbiguityError<'a> {
b2: NameBinding<'a>,
misc1: AmbiguityErrorMisc,
misc2: AmbiguityErrorMisc,
+ warning: bool,
}
impl<'a> NameBindingData<'a> {
@@ -794,6 +797,14 @@ impl<'a> NameBindingData<'a> {
}
}
+ fn is_warn_ambiguity(&self) -> bool {
+ self.warn_ambiguity
+ || match self.kind {
+ NameBindingKind::Import { binding, .. } => binding.is_warn_ambiguity(),
+ _ => false,
+ }
+ }
+
fn is_possibly_imported_variant(&self) -> bool {
match self.kind {
NameBindingKind::Import { binding, .. } => binding.is_possibly_imported_variant(),
@@ -1158,7 +1169,7 @@ impl<'tcx> Resolver<'_, 'tcx> {
}
fn local_def_id(&self, node: NodeId) -> LocalDefId {
- self.opt_local_def_id(node).unwrap_or_else(|| panic!("no entry for node id: `{:?}`", node))
+ self.opt_local_def_id(node).unwrap_or_else(|| panic!("no entry for node id: `{node:?}`"))
}
/// Adds a definition with a parent definition.
@@ -1271,7 +1282,7 @@ impl<'a, 'tcx> Resolver<'a, 'tcx> {
let registered_tools = tcx.registered_tools(());
- let features = tcx.sess.features_untracked();
+ let features = tcx.features();
let mut resolver = Resolver {
tcx,
@@ -1322,6 +1333,7 @@ impl<'a, 'tcx> Resolver<'a, 'tcx> {
dummy_binding: arenas.alloc_name_binding(NameBindingData {
kind: NameBindingKind::Res(Res::Err),
ambiguity: None,
+ warn_ambiguity: false,
expansion: LocalExpnId::ROOT,
span: DUMMY_SP,
vis: ty::Visibility::Public,
@@ -1685,6 +1697,16 @@ impl<'a, 'tcx> Resolver<'a, 'tcx> {
}
fn record_use(&mut self, ident: Ident, used_binding: NameBinding<'a>, is_lexical_scope: bool) {
+ self.record_use_inner(ident, used_binding, is_lexical_scope, used_binding.warn_ambiguity);
+ }
+
+ fn record_use_inner(
+ &mut self,
+ ident: Ident,
+ used_binding: NameBinding<'a>,
+ is_lexical_scope: bool,
+ warn_ambiguity: bool,
+ ) {
if let Some((b2, kind)) = used_binding.ambiguity {
let ambiguity_error = AmbiguityError {
kind,
@@ -1693,9 +1715,10 @@ impl<'a, 'tcx> Resolver<'a, 'tcx> {
b2,
misc1: AmbiguityErrorMisc::None,
misc2: AmbiguityErrorMisc::None,
+ warning: warn_ambiguity,
};
if !self.matches_previous_ambiguity_error(&ambiguity_error) {
- // avoid duplicated span information to be emitt out
+ // avoid duplicated span information to be emit out
self.ambiguity_errors.push(ambiguity_error);
}
}
@@ -1715,7 +1738,7 @@ impl<'a, 'tcx> Resolver<'a, 'tcx> {
self.used_imports.insert(id);
}
self.add_to_glob_map(import, ident);
- self.record_use(ident, binding, false);
+ self.record_use_inner(ident, binding, false, warn_ambiguity || binding.warn_ambiguity);
}
}
@@ -1812,7 +1835,7 @@ impl<'a, 'tcx> Resolver<'a, 'tcx> {
fn record_partial_res(&mut self, node_id: NodeId, resolution: PartialRes) {
debug!("(recording res) recording {:?} for {}", resolution, node_id);
if let Some(prev_res) = self.partial_res_map.insert(node_id, resolution) {
- panic!("path resolved multiple times ({:?} before, {:?} now)", prev_res, resolution);
+ panic!("path resolved multiple times ({prev_res:?} before, {resolution:?} now)");
}
}
@@ -1871,7 +1894,10 @@ impl<'a, 'tcx> Resolver<'a, 'tcx> {
} else {
let crate_id = if finalize {
let Some(crate_id) =
- self.crate_loader(|c| c.process_path_extern(ident.name, ident.span)) else { return Some(self.dummy_binding); };
+ self.crate_loader(|c| c.process_path_extern(ident.name, ident.span))
+ else {
+ return Some(self.dummy_binding);
+ };
crate_id
} else {
self.crate_loader(|c| c.maybe_process_path_extern(ident.name))?
diff --git a/compiler/rustc_resolve/src/macros.rs b/compiler/rustc_resolve/src/macros.rs
index d16b7902f..6a5b675b4 100644
--- a/compiler/rustc_resolve/src/macros.rs
+++ b/compiler/rustc_resolve/src/macros.rs
@@ -24,7 +24,9 @@ use rustc_hir::def_id::{CrateNum, LocalDefId};
use rustc_middle::middle::stability;
use rustc_middle::ty::RegisteredTools;
use rustc_middle::ty::TyCtxt;
-use rustc_session::lint::builtin::{LEGACY_DERIVE_HELPERS, SOFT_UNSTABLE};
+use rustc_session::lint::builtin::{
+ LEGACY_DERIVE_HELPERS, SOFT_UNSTABLE, UNKNOWN_DIAGNOSTIC_ATTRIBUTES,
+};
use rustc_session::lint::builtin::{UNUSED_MACROS, UNUSED_MACRO_RULES};
use rustc_session::lint::BuiltinLintDiagnostics;
use rustc_session::parse::feature_err;
@@ -140,9 +142,9 @@ pub(crate) fn registered_tools(tcx: TyCtxt<'_>, (): ()) -> RegisteredTools {
}
}
}
- // We implicitly add `rustfmt` and `clippy` to known tools,
+ // We implicitly add `rustfmt`, `clippy`, `diagnostic` to known tools,
// but it's not an error to register them explicitly.
- let predefined_tools = [sym::clippy, sym::rustfmt];
+ let predefined_tools = [sym::clippy, sym::rustfmt, sym::diagnostic];
registered_tools.extend(predefined_tools.iter().cloned().map(Ident::with_dummy_span));
registered_tools
}
@@ -205,7 +207,7 @@ impl<'a, 'tcx> ResolverExpand for Resolver<'a, 'tcx> {
self.tcx
.sess
.diagnostic()
- .bug(format!("built-in macro `{}` was already registered", name));
+ .bug(format!("built-in macro `{name}` was already registered"));
}
}
@@ -568,7 +570,7 @@ impl<'a, 'tcx> Resolver<'a, 'tcx> {
}
let mut err = self.tcx.sess.create_err(err);
- err.span_label(path.span, format!("not {} {}", article, expected));
+ err.span_label(path.span, format!("not {article} {expected}"));
err.emit();
@@ -576,10 +578,7 @@ impl<'a, 'tcx> Resolver<'a, 'tcx> {
}
// We are trying to avoid reporting this error if other related errors were reported.
- if res != Res::Err
- && inner_attr
- && !self.tcx.sess.features_untracked().custom_inner_attributes
- {
+ if res != Res::Err && inner_attr && !self.tcx.features().custom_inner_attributes {
let msg = match res {
Res::Def(..) => "inner macro attributes are unstable",
Res::NonMacroAttr(..) => "custom inner attributes are unstable",
@@ -598,6 +597,18 @@ impl<'a, 'tcx> Resolver<'a, 'tcx> {
}
}
+ if res == Res::NonMacroAttr(NonMacroAttrKind::Tool)
+ && path.segments.len() >= 2
+ && path.segments[0].ident.name == sym::diagnostic
+ {
+ self.tcx.sess.parse_sess.buffer_lint(
+ UNKNOWN_DIAGNOSTIC_ATTRIBUTES,
+ path.segments[1].span(),
+ node_id,
+ "unknown diagnostic attribute",
+ );
+ }
+
Ok((ext, res))
}
@@ -895,7 +906,7 @@ impl<'a, 'tcx> Resolver<'a, 'tcx> {
if macro_kind.is_some() && sub_namespace_match(macro_kind, Some(MacroKind::Attr)) {
self.tcx.sess.span_err(
ident.span,
- format!("name `{}` is reserved in attribute namespace", ident),
+ format!("name `{ident}` is reserved in attribute namespace"),
);
}
}
@@ -909,7 +920,8 @@ impl<'a, 'tcx> Resolver<'a, 'tcx> {
item: &ast::Item,
edition: Edition,
) -> (SyntaxExtension, Vec<(usize, Span)>) {
- let (mut result, mut rule_spans) = compile_declarative_macro(self.tcx.sess, item, edition);
+ let (mut result, mut rule_spans) =
+ compile_declarative_macro(self.tcx.sess, self.tcx.features(), item, edition);
if let Some(builtin_name) = result.builtin_name {
// The macro was marked with `#[rustc_builtin_macro]`.
diff --git a/compiler/rustc_resolve/src/rustdoc.rs b/compiler/rustc_resolve/src/rustdoc.rs
index d433391f2..ba7417b6d 100644
--- a/compiler/rustc_resolve/src/rustdoc.rs
+++ b/compiler/rustc_resolve/src/rustdoc.rs
@@ -1,4 +1,4 @@
-use pulldown_cmark::{BrokenLink, Event, LinkType, Options, Parser, Tag};
+use pulldown_cmark::{BrokenLink, CowStr, Event, LinkType, Options, Parser, Tag};
use rustc_ast as ast;
use rustc_ast::util::comments::beautify_doc_string;
use rustc_data_structures::fx::FxHashMap;
@@ -392,16 +392,73 @@ pub(crate) fn attrs_to_preprocessed_links(attrs: &[ast::Attribute]) -> Vec<Box<s
let (doc_fragments, _) = attrs_to_doc_fragments(attrs.iter().map(|attr| (attr, None)), true);
let doc = prepare_to_doc_link_resolution(&doc_fragments).into_values().next().unwrap();
- Parser::new_with_broken_link_callback(
+ parse_links(&doc)
+}
+
+/// Similiar version of `markdown_links` from rustdoc.
+/// This will collect destination links and display text if exists.
+fn parse_links<'md>(doc: &'md str) -> Vec<Box<str>> {
+ let mut broken_link_callback = |link: BrokenLink<'md>| Some((link.reference, "".into()));
+ let mut event_iter = Parser::new_with_broken_link_callback(
&doc,
main_body_opts(),
- Some(&mut |link: BrokenLink<'_>| Some((link.reference, "".into()))),
+ Some(&mut broken_link_callback),
)
- .filter_map(|event| match event {
- Event::Start(Tag::Link(link_type, dest, _)) if may_be_doc_link(link_type) => {
- Some(preprocess_link(&dest))
+ .into_iter();
+ let mut links = Vec::new();
+
+ while let Some(event) = event_iter.next() {
+ match event {
+ Event::Start(Tag::Link(link_type, dest, _)) if may_be_doc_link(link_type) => {
+ if matches!(
+ link_type,
+ LinkType::Inline
+ | LinkType::ReferenceUnknown
+ | LinkType::Reference
+ | LinkType::Shortcut
+ | LinkType::ShortcutUnknown
+ ) {
+ if let Some(display_text) = collect_link_data(&mut event_iter) {
+ links.push(display_text);
+ }
+ }
+
+ links.push(preprocess_link(&dest));
+ }
+ _ => {}
+ }
+ }
+
+ links
+}
+
+/// Collects additional data of link.
+fn collect_link_data<'input, 'callback>(
+ event_iter: &mut Parser<'input, 'callback>,
+) -> Option<Box<str>> {
+ let mut display_text: Option<String> = None;
+ let mut append_text = |text: CowStr<'_>| {
+ if let Some(display_text) = &mut display_text {
+ display_text.push_str(&text);
+ } else {
+ display_text = Some(text.to_string());
+ }
+ };
+
+ while let Some(event) = event_iter.next() {
+ match event {
+ Event::Text(text) => {
+ append_text(text);
+ }
+ Event::Code(code) => {
+ append_text(code);
+ }
+ Event::End(_) => {
+ break;
+ }
+ _ => {}
}
- _ => None,
- })
- .collect()
+ }
+
+ display_text.map(String::into_boxed_str)
}
diff --git a/compiler/rustc_session/Cargo.toml b/compiler/rustc_session/Cargo.toml
index 1291d1454..e26d25d9a 100644
--- a/compiler/rustc_session/Cargo.toml
+++ b/compiler/rustc_session/Cargo.toml
@@ -4,7 +4,6 @@ version = "0.0.0"
edition = "2021"
[dependencies]
-atty = "0.2.13"
bitflags = "1.2.1"
getopts = "0.2"
rustc_macros = { path = "../rustc_macros" }
diff --git a/compiler/rustc_session/messages.ftl b/compiler/rustc_session/messages.ftl
index 4897bd8d5..b07c6db59 100644
--- a/compiler/rustc_session/messages.ftl
+++ b/compiler/rustc_session/messages.ftl
@@ -26,6 +26,8 @@ session_feature_gate_error = {$explain}
session_file_is_not_writeable = output file {$file} is not writeable -- check its permissions
+session_file_write_fail = failed to write `{$path}` due to error `{$err}`
+
session_hexadecimal_float_literal_not_supported = hexadecimal float literal is not supported
session_incompatible_linker_flavor = linker flavor `{$flavor}` is incompatible with the current target
@@ -43,6 +45,7 @@ session_int_literal_too_large = integer literal is too large
.note = value exceeds limit of `{$limit}`
session_invalid_character_in_create_name = invalid character `{$character}` in crate name: `{$crate_name}`
+session_invalid_character_in_create_name_help = you can either pass `--crate-name` on the command line or add `#![crate_name="…"]` to set the crate name
session_invalid_float_literal_suffix = invalid suffix `{$suffix}` for float literal
.label = invalid suffix `{$suffix}`
@@ -86,7 +89,9 @@ session_sanitizer_cfi_generalize_pointers_requires_cfi = `-Zsanitizer-cfi-genera
session_sanitizer_cfi_normalize_integers_requires_cfi = `-Zsanitizer-cfi-normalize-integers` requires `-Zsanitizer=cfi` or `-Zsanitizer=kcfi`
-session_sanitizer_cfi_requires_lto = `-Zsanitizer=cfi` requires `-Clto`, `-Clto=thin`, or `-Clinker-plugin-lto`
+session_sanitizer_cfi_requires_lto = `-Zsanitizer=cfi` requires `-Clto` or `-Clinker-plugin-lto`
+
+session_sanitizer_cfi_requires_single_codegen_unit = `-Zsanitizer=cfi` with `-Clto` requires `-Ccodegen-units=1`
session_sanitizer_not_supported = {$us} sanitizer is not supported for this target
diff --git a/compiler/rustc_session/src/code_stats.rs b/compiler/rustc_session/src/code_stats.rs
index cabe1c96b..df81e1f83 100644
--- a/compiler/rustc_session/src/code_stats.rs
+++ b/compiler/rustc_session/src/code_stats.rs
@@ -227,10 +227,8 @@ impl CodeStats {
}
pub fn print_vtable_sizes(&self, crate_name: &str) {
- let mut infos = std::mem::take(&mut *self.vtable_sizes.lock())
- .into_iter()
- .map(|(_did, stats)| stats)
- .collect::<Vec<_>>();
+ let mut infos =
+ std::mem::take(&mut *self.vtable_sizes.lock()).into_values().collect::<Vec<_>>();
// Primary sort: cost % in reverse order (from largest to smallest)
// Secondary sort: trait_name
diff --git a/compiler/rustc_session/src/config.rs b/compiler/rustc_session/src/config.rs
index f97cb3440..f00472f18 100644
--- a/compiler/rustc_session/src/config.rs
+++ b/compiler/rustc_session/src/config.rs
@@ -3,6 +3,7 @@
pub use crate::options::*;
+use crate::errors::FileWriteFail;
use crate::search_paths::SearchPath;
use crate::utils::{CanonicalizedPath, NativeLib, NativeLibKind};
use crate::{lint, HashStableContext};
@@ -11,7 +12,7 @@ use crate::{EarlyErrorHandler, Session};
use rustc_data_structures::fx::{FxHashMap, FxHashSet};
use rustc_data_structures::stable_hasher::{StableOrd, ToStableHashKey};
use rustc_target::abi::Align;
-use rustc_target::spec::{PanicStrategy, SanitizerSet, SplitDebuginfo};
+use rustc_target::spec::{PanicStrategy, RelocModel, SanitizerSet, SplitDebuginfo};
use rustc_target::spec::{Target, TargetTriple, TargetWarnings, TARGETS};
use crate::parse::{CrateCheckConfig, CrateConfig};
@@ -31,6 +32,7 @@ use std::collections::btree_map::{
use std::collections::{BTreeMap, BTreeSet};
use std::ffi::OsStr;
use std::fmt;
+use std::fs;
use std::hash::Hash;
use std::iter;
use std::path::{Path, PathBuf};
@@ -277,11 +279,11 @@ impl LinkSelfContained {
// set of all values like `y` or `n` used to be. Therefore, if this flag had previously been
// set in bulk with its historical values, then manually setting a component clears that
// `explicitly_set` state.
- if let Some(component_to_enable) = component.strip_prefix("+") {
+ if let Some(component_to_enable) = component.strip_prefix('+') {
self.explicitly_set = None;
self.components.insert(component_to_enable.parse()?);
Ok(())
- } else if let Some(component_to_disable) = component.strip_prefix("-") {
+ } else if let Some(component_to_disable) = component.strip_prefix('-') {
self.explicitly_set = None;
self.components.remove(component_to_disable.parse()?);
Ok(())
@@ -710,8 +712,14 @@ impl ExternEntry {
}
}
+#[derive(Clone, PartialEq, Debug)]
+pub struct PrintRequest {
+ pub kind: PrintKind,
+ pub out: OutFileName,
+}
+
#[derive(Copy, Clone, PartialEq, Eq, Debug)]
-pub enum PrintRequest {
+pub enum PrintKind {
FileNames,
Sysroot,
TargetLibdir,
@@ -826,9 +834,10 @@ impl OutFileName {
}
pub fn is_tty(&self) -> bool {
+ use std::io::IsTerminal;
match *self {
OutFileName::Real(_) => false,
- OutFileName::Stdout => atty::is(atty::Stream::Stdout),
+ OutFileName::Stdout => std::io::stdout().is_terminal(),
}
}
@@ -855,6 +864,17 @@ impl OutFileName {
OutFileName::Stdout => outputs.temp_path(flavor, codegen_unit_name),
}
}
+
+ pub fn overwrite(&self, content: &str, sess: &Session) {
+ match self {
+ OutFileName::Stdout => print!("{content}"),
+ OutFileName::Real(path) => {
+ if let Err(e) = fs::write(path, content) {
+ sess.emit_fatal(FileWriteFail { path, err: e.to_string() });
+ }
+ }
+ }
+ }
}
#[derive(Clone, Hash, Debug, HashStable_Generic)]
@@ -1173,6 +1193,7 @@ fn default_configuration(sess: &Session) -> CrateConfig {
let os = &sess.target.os;
let env = &sess.target.env;
let abi = &sess.target.abi;
+ let relocation_model = sess.target.relocation_model.desc_symbol();
let vendor = &sess.target.vendor;
let min_atomic_width = sess.target.min_atomic_width();
let max_atomic_width = sess.target.max_atomic_width();
@@ -1198,6 +1219,9 @@ fn default_configuration(sess: &Session) -> CrateConfig {
ret.insert((sym::target_pointer_width, Some(Symbol::intern(&wordsz))));
ret.insert((sym::target_env, Some(Symbol::intern(env))));
ret.insert((sym::target_abi, Some(Symbol::intern(abi))));
+ if sess.is_nightly_build() {
+ ret.insert((sym::relocation_model, Some(relocation_model)));
+ }
ret.insert((sym::target_vendor, Some(Symbol::intern(vendor))));
if sess.target.has_thread_local {
ret.insert((sym::target_thread_local, None));
@@ -1395,6 +1419,8 @@ impl CrateCheckConfig {
.into_iter()
.map(|sanitizer| Symbol::intern(sanitizer.as_str().unwrap()));
+ let relocation_model_values = RelocModel::all();
+
// Unknown possible values:
// - `feature`
// - `target_feature`
@@ -1433,6 +1459,10 @@ impl CrateCheckConfig {
.entry(sym::target_has_atomic_equal_alignment)
.or_insert_with(no_values)
.extend(atomic_values);
+ self.expecteds
+ .entry(sym::relocation_model)
+ .or_insert_with(empty_values)
+ .extend(relocation_model_values);
// Target specific values
{
@@ -1519,9 +1549,8 @@ pub(super) fn build_target_config(
);
let (target, target_warnings) = target_result.unwrap_or_else(|e| {
handler.early_error(format!(
- "Error loading target specification: {}. \
- Run `rustc --print target-list` for a list of built-in targets",
- e
+ "Error loading target specification: {e}. \
+ Run `rustc --print target-list` for a list of built-in targets"
))
});
for warning in target_warnings.warning_messages() {
@@ -1958,8 +1987,7 @@ pub fn parse_crate_edition(handler: &EarlyErrorHandler, matches: &getopts::Match
let is_nightly = nightly_options::match_is_nightly_build(matches);
let msg = if !is_nightly {
format!(
- "the crate requires edition {}, but the latest edition supported by this Rust version is {}",
- edition, LATEST_STABLE_EDITION
+ "the crate requires edition {edition}, but the latest edition supported by this Rust version is {LATEST_STABLE_EDITION}"
)
} else {
format!("edition {edition} is unstable and only available with -Z unstable-options")
@@ -2005,13 +2033,7 @@ fn parse_output_types(
if !unstable_opts.parse_only {
for list in matches.opt_strs("emit") {
for output_type in list.split(',') {
- let (shorthand, path) = match output_type.split_once('=') {
- None => (output_type, None),
- Some((shorthand, "-")) => (shorthand, Some(OutFileName::Stdout)),
- Some((shorthand, path)) => {
- (shorthand, Some(OutFileName::Real(PathBuf::from(path))))
- }
- };
+ let (shorthand, path) = split_out_file_name(output_type);
let output_type = OutputType::from_shorthand(shorthand).unwrap_or_else(|| {
handler.early_error(format!(
"unknown emission type: `{shorthand}` - expected one of: {display}",
@@ -2028,6 +2050,14 @@ fn parse_output_types(
OutputTypes(output_types)
}
+fn split_out_file_name(arg: &str) -> (&str, Option<OutFileName>) {
+ match arg.split_once('=') {
+ None => (arg, None),
+ Some((kind, "-")) => (kind, Some(OutFileName::Stdout)),
+ Some((kind, path)) => (kind, Some(OutFileName::Real(PathBuf::from(path)))),
+ }
+}
+
fn should_override_cgus_and_disable_thinlto(
handler: &EarlyErrorHandler,
output_types: &OutputTypes,
@@ -2091,41 +2121,55 @@ fn collect_print_requests(
) -> Vec<PrintRequest> {
let mut prints = Vec::<PrintRequest>::new();
if cg.target_cpu.as_ref().is_some_and(|s| s == "help") {
- prints.push(PrintRequest::TargetCPUs);
+ prints.push(PrintRequest { kind: PrintKind::TargetCPUs, out: OutFileName::Stdout });
cg.target_cpu = None;
};
if cg.target_feature == "help" {
- prints.push(PrintRequest::TargetFeatures);
+ prints.push(PrintRequest { kind: PrintKind::TargetFeatures, out: OutFileName::Stdout });
cg.target_feature = String::new();
}
- const PRINT_REQUESTS: &[(&str, PrintRequest)] = &[
- ("crate-name", PrintRequest::CrateName),
- ("file-names", PrintRequest::FileNames),
- ("sysroot", PrintRequest::Sysroot),
- ("target-libdir", PrintRequest::TargetLibdir),
- ("cfg", PrintRequest::Cfg),
- ("calling-conventions", PrintRequest::CallingConventions),
- ("target-list", PrintRequest::TargetList),
- ("target-cpus", PrintRequest::TargetCPUs),
- ("target-features", PrintRequest::TargetFeatures),
- ("relocation-models", PrintRequest::RelocationModels),
- ("code-models", PrintRequest::CodeModels),
- ("tls-models", PrintRequest::TlsModels),
- ("native-static-libs", PrintRequest::NativeStaticLibs),
- ("stack-protector-strategies", PrintRequest::StackProtectorStrategies),
- ("target-spec-json", PrintRequest::TargetSpec),
- ("all-target-specs-json", PrintRequest::AllTargetSpecs),
- ("link-args", PrintRequest::LinkArgs),
- ("split-debuginfo", PrintRequest::SplitDebuginfo),
- ("deployment-target", PrintRequest::DeploymentTarget),
+ const PRINT_KINDS: &[(&str, PrintKind)] = &[
+ ("crate-name", PrintKind::CrateName),
+ ("file-names", PrintKind::FileNames),
+ ("sysroot", PrintKind::Sysroot),
+ ("target-libdir", PrintKind::TargetLibdir),
+ ("cfg", PrintKind::Cfg),
+ ("calling-conventions", PrintKind::CallingConventions),
+ ("target-list", PrintKind::TargetList),
+ ("target-cpus", PrintKind::TargetCPUs),
+ ("target-features", PrintKind::TargetFeatures),
+ ("relocation-models", PrintKind::RelocationModels),
+ ("code-models", PrintKind::CodeModels),
+ ("tls-models", PrintKind::TlsModels),
+ ("native-static-libs", PrintKind::NativeStaticLibs),
+ ("stack-protector-strategies", PrintKind::StackProtectorStrategies),
+ ("target-spec-json", PrintKind::TargetSpec),
+ ("all-target-specs-json", PrintKind::AllTargetSpecs),
+ ("link-args", PrintKind::LinkArgs),
+ ("split-debuginfo", PrintKind::SplitDebuginfo),
+ ("deployment-target", PrintKind::DeploymentTarget),
];
+ // We disallow reusing the same path in multiple prints, such as `--print
+ // cfg=output.txt --print link-args=output.txt`, because outputs are printed
+ // by disparate pieces of the compiler, and keeping track of which files
+ // need to be overwritten vs appended to is annoying.
+ let mut printed_paths = FxHashSet::default();
+
prints.extend(matches.opt_strs("print").into_iter().map(|req| {
- match PRINT_REQUESTS.iter().find(|&&(name, _)| name == req) {
- Some((_, PrintRequest::TargetSpec)) => {
+ let (req, out) = split_out_file_name(&req);
+
+ if out.is_some() && !unstable_opts.unstable_options {
+ handler.early_error(
+ "the `-Z unstable-options` flag must also be passed to \
+ enable the path print option",
+ );
+ }
+ let kind = match PRINT_KINDS.iter().find(|&&(name, _)| name == req) {
+ Some((_, PrintKind::TargetSpec)) => {
if unstable_opts.unstable_options {
- PrintRequest::TargetSpec
+ PrintKind::TargetSpec
} else {
handler.early_error(
"the `-Z unstable-options` flag must also be passed to \
@@ -2133,9 +2177,9 @@ fn collect_print_requests(
);
}
}
- Some((_, PrintRequest::AllTargetSpecs)) => {
+ Some((_, PrintKind::AllTargetSpecs)) => {
if unstable_opts.unstable_options {
- PrintRequest::AllTargetSpecs
+ PrintKind::AllTargetSpecs
} else {
handler.early_error(
"the `-Z unstable-options` flag must also be passed to \
@@ -2143,16 +2187,28 @@ fn collect_print_requests(
);
}
}
- Some(&(_, print_request)) => print_request,
+ Some(&(_, print_kind)) => print_kind,
None => {
let prints =
- PRINT_REQUESTS.iter().map(|(name, _)| format!("`{name}`")).collect::<Vec<_>>();
+ PRINT_KINDS.iter().map(|(name, _)| format!("`{name}`")).collect::<Vec<_>>();
let prints = prints.join(", ");
handler.early_error(format!(
"unknown print request `{req}`. Valid print requests are: {prints}"
));
}
+ };
+
+ let out = out.unwrap_or(OutFileName::Stdout);
+ if let OutFileName::Real(path) = &out {
+ if !printed_paths.insert(path.clone()) {
+ handler.early_error(format!(
+ "cannot print multiple outputs to the same path: {}",
+ path.display(),
+ ));
+ }
}
+
+ PrintRequest { kind, out }
}));
prints
@@ -2524,6 +2580,8 @@ pub fn build_session_options(
let error_format = parse_error_format(handler, matches, color, json_rendered);
+ handler.abort_if_error_and_set_error_format(error_format);
+
let diagnostic_width = matches.opt_get("diagnostic-width").unwrap_or_else(|_| {
handler.early_error("`--diagnostic-width` must be an positive integer");
});
diff --git a/compiler/rustc_session/src/cstore.rs b/compiler/rustc_session/src/cstore.rs
index dc475e8c6..c53a355b5 100644
--- a/compiler/rustc_session/src/cstore.rs
+++ b/compiler/rustc_session/src/cstore.rs
@@ -13,6 +13,7 @@ use rustc_hir::definitions::{DefKey, DefPath, DefPathHash, Definitions};
use rustc_span::hygiene::{ExpnHash, ExpnId};
use rustc_span::symbol::Symbol;
use rustc_span::Span;
+use rustc_target::spec::abi::Abi;
use rustc_target::spec::Target;
use std::any::Any;
@@ -147,6 +148,7 @@ pub enum DllCallingConvention {
pub struct ForeignModule {
pub foreign_items: Vec<DefId>,
pub def_id: DefId,
+ pub abi: Abi,
}
#[derive(Copy, Clone, Debug, HashStable_Generic)]
diff --git a/compiler/rustc_session/src/errors.rs b/compiler/rustc_session/src/errors.rs
index 4a3e668da..78940462b 100644
--- a/compiler/rustc_session/src/errors.rs
+++ b/compiler/rustc_session/src/errors.rs
@@ -115,6 +115,10 @@ pub struct CannotEnableCrtStaticLinux;
pub struct SanitizerCfiRequiresLto;
#[derive(Diagnostic)]
+#[diag(session_sanitizer_cfi_requires_single_codegen_unit)]
+pub struct SanitizerCfiRequiresSingleCodegenUnit;
+
+#[derive(Diagnostic)]
#[diag(session_sanitizer_cfi_canonical_jump_tables_requires_cfi)]
pub struct SanitizerCfiCanonicalJumpTablesRequiresCfi;
@@ -164,6 +168,13 @@ pub struct FileIsNotWriteable<'a> {
}
#[derive(Diagnostic)]
+#[diag(session_file_write_fail)]
+pub(crate) struct FileWriteFail<'a> {
+ pub path: &'a std::path::Path,
+ pub err: String,
+}
+
+#[derive(Diagnostic)]
#[diag(session_crate_name_does_not_match)]
pub struct CrateNameDoesNotMatch {
#[primary_span]
@@ -192,6 +203,14 @@ pub struct InvalidCharacterInCrateName {
pub span: Option<Span>,
pub character: char,
pub crate_name: Symbol,
+ #[subdiagnostic]
+ pub crate_name_help: Option<InvalidCrateNameHelp>,
+}
+
+#[derive(Subdiagnostic)]
+pub enum InvalidCrateNameHelp {
+ #[help(session_invalid_character_in_create_name_help)]
+ AddCrateName,
}
#[derive(Subdiagnostic)]
diff --git a/compiler/rustc_session/src/lib.rs b/compiler/rustc_session/src/lib.rs
index d57aa820f..a270817f3 100644
--- a/compiler/rustc_session/src/lib.rs
+++ b/compiler/rustc_session/src/lib.rs
@@ -10,6 +10,7 @@
#![allow(rustc::potential_query_instability)]
#![deny(rustc::untranslatable_diagnostic)]
#![deny(rustc::diagnostic_outside_of_impl)]
+#![cfg_attr(not(bootstrap), allow(internal_features))]
#[macro_use]
extern crate rustc_macros;
diff --git a/compiler/rustc_session/src/options.rs b/compiler/rustc_session/src/options.rs
index 7840a0ecf..055ab2d9c 100644
--- a/compiler/rustc_session/src/options.rs
+++ b/compiler/rustc_session/src/options.rs
@@ -330,8 +330,7 @@ fn build_options<O: Default>(
match value {
None => handler.early_error(
format!(
- "{0} option `{1}` requires {2} ({3} {1}=<value>)",
- outputname, key, type_desc, prefix
+ "{outputname} option `{key}` requires {type_desc} ({prefix} {key}=<value>)"
),
),
Some(value) => handler.early_error(
@@ -1145,7 +1144,7 @@ mod parse {
}
// 2. Parse a list of enabled and disabled components.
- for comp in s.split(",") {
+ for comp in s.split(',') {
if slot.handle_cli_component(comp).is_err() {
return false;
}
@@ -1433,8 +1432,6 @@ options! {
dep_tasks: bool = (false, parse_bool, [UNTRACKED],
"print tasks that execute and the color their dep node gets (requires debug build) \
(default: no)"),
- diagnostic_width: Option<usize> = (None, parse_opt_number, [UNTRACKED],
- "set the current output width for diagnostic truncation"),
dont_buffer_diagnostics: bool = (false, parse_bool, [UNTRACKED],
"emit diagnostics rather than buffering (breaks NLL error downgrading, sorting) \
(default: no)"),
@@ -1583,9 +1580,6 @@ options! {
"what location details should be tracked when using caller_location, either \
`none`, or a comma separated list of location details, for which \
valid options are `file`, `line`, and `column` (default: `file,line,column`)"),
- lower_impl_trait_in_trait_to_assoc_ty: bool = (false, parse_bool, [TRACKED],
- "modify the lowering strategy for `impl Trait` in traits so that they are lowered to \
- generic associated types"),
ls: bool = (false, parse_bool, [UNTRACKED],
"list the symbols defined by a library crate (default: no)"),
macro_backtrace: bool = (false, parse_bool, [UNTRACKED],
@@ -1671,6 +1665,9 @@ options! {
"use a more precise version of drop elaboration for matches on enums (default: yes). \
This results in better codegen, but has caused miscompilations on some tier 2 platforms. \
See #77382 and #74551."),
+ #[rustc_lint_opt_deny_field_access("use `Session::print_codegen_stats` instead of this field")]
+ print_codegen_stats: bool = (false, parse_bool, [UNTRACKED],
+ "print codegen statistics (default: no)"),
print_fuel: Option<String> = (None, parse_opt_string, [TRACKED],
"make rustc print the total optimization fuel used by a crate"),
print_llvm_passes: bool = (false, parse_bool, [UNTRACKED],
@@ -1878,10 +1875,13 @@ written to standard error output)"),
Requires `-Clto[=[fat,yes]]`"),
wasi_exec_model: Option<WasiExecModel> = (None, parse_wasi_exec_model, [TRACKED],
"whether to build a wasi command or reactor"),
+ write_long_types_to_disk: bool = (true, parse_bool, [UNTRACKED],
+ "whether long type names should be written to files instead of being printed in errors"),
// tidy-alphabetical-end
// If you add a new option, please update:
// - compiler/rustc_interface/src/tests.rs
+ // - src/doc/unstable-book/src/compiler-flags
}
#[derive(Clone, Hash, PartialEq, Eq, Debug)]
diff --git a/compiler/rustc_session/src/output.rs b/compiler/rustc_session/src/output.rs
index 2088744bc..c0884fb21 100644
--- a/compiler/rustc_session/src/output.rs
+++ b/compiler/rustc_session/src/output.rs
@@ -2,7 +2,7 @@
use crate::config::{CrateType, Input, OutFileName, OutputFilenames, OutputType};
use crate::errors::{
CrateNameDoesNotMatch, CrateNameEmpty, CrateNameInvalid, FileIsNotWriteable,
- InvalidCharacterInCrateName,
+ InvalidCharacterInCrateName, InvalidCrateNameHelp,
};
use crate::Session;
use rustc_ast::{self as ast, attr};
@@ -101,7 +101,16 @@ pub fn validate_crate_name(sess: &Session, s: Symbol, sp: Option<Span>) {
continue;
}
err_count += 1;
- sess.emit_err(InvalidCharacterInCrateName { span: sp, character: c, crate_name: s });
+ sess.emit_err(InvalidCharacterInCrateName {
+ span: sp,
+ character: c,
+ crate_name: s,
+ crate_name_help: if sp.is_none() {
+ Some(InvalidCrateNameHelp::AddCrateName)
+ } else {
+ None
+ },
+ });
}
}
diff --git a/compiler/rustc_session/src/parse.rs b/compiler/rustc_session/src/parse.rs
index 194f7201f..1cf63e9b7 100644
--- a/compiler/rustc_session/src/parse.rs
+++ b/compiler/rustc_session/src/parse.rs
@@ -8,8 +8,8 @@ use crate::lint::{
};
use rustc_ast::node_id::NodeId;
use rustc_data_structures::fx::{FxHashMap, FxHashSet, FxIndexSet};
-use rustc_data_structures::sync::{AppendOnlyVec, AtomicBool, Lock, Lrc};
-use rustc_errors::{emitter::SilentEmitter, ColorConfig, Handler};
+use rustc_data_structures::sync::{AppendOnlyVec, Lock, Lrc};
+use rustc_errors::{emitter::SilentEmitter, Handler};
use rustc_errors::{
fallback_fluent_bundle, Diagnostic, DiagnosticBuilder, DiagnosticId, DiagnosticMessage,
EmissionGuarantee, ErrorGuaranteed, IntoDiagnostic, MultiSpan, Noted, StashKey,
@@ -117,6 +117,7 @@ pub fn feature_err_issue(
/// Construct a future incompatibility diagnostic for a feature gate.
///
/// This diagnostic is only a warning and *does not cause compilation to fail*.
+#[track_caller]
pub fn feature_warn(sess: &ParseSess, feature: Symbol, span: Span, explain: &'static str) {
feature_warn_issue(sess, feature, span, GateIssue::Language, explain);
}
@@ -129,6 +130,7 @@ pub fn feature_warn(sess: &ParseSess, feature: Symbol, span: Span, explain: &'st
/// Almost always, you want to use this for a language feature. If so, prefer `feature_warn`.
#[allow(rustc::diagnostic_outside_of_impl)]
#[allow(rustc::untranslatable_diagnostic)]
+#[track_caller]
pub fn feature_warn_issue(
sess: &ParseSess,
feature: Symbol,
@@ -202,8 +204,6 @@ pub struct ParseSess {
pub ambiguous_block_expr_parse: Lock<FxHashMap<Span, Span>>,
pub gated_spans: GatedSpans,
pub symbol_gallery: SymbolGallery,
- /// The parser has reached `Eof` due to an unclosed brace. Used to silence unnecessary errors.
- pub reached_eof: AtomicBool,
/// Environment variables accessed during the build and their values when they exist.
pub env_depinfo: Lock<FxHashSet<(Symbol, Option<Symbol>)>>,
/// File paths accessed during the build.
@@ -222,14 +222,7 @@ impl ParseSess {
pub fn new(locale_resources: Vec<&'static str>, file_path_mapping: FilePathMapping) -> Self {
let fallback_bundle = fallback_fluent_bundle(locale_resources, false);
let sm = Lrc::new(SourceMap::new(file_path_mapping));
- let handler = Handler::with_tty_emitter(
- ColorConfig::Auto,
- true,
- None,
- Some(sm.clone()),
- None,
- fallback_bundle,
- );
+ let handler = Handler::with_tty_emitter(Some(sm.clone()), fallback_bundle);
ParseSess::with_span_handler(handler, sm)
}
@@ -247,7 +240,6 @@ impl ParseSess {
ambiguous_block_expr_parse: Lock::new(FxHashMap::default()),
gated_spans: GatedSpans::default(),
symbol_gallery: SymbolGallery::default(),
- reached_eof: AtomicBool::new(false),
env_depinfo: Default::default(),
file_depinfo: Default::default(),
assume_incomplete_release: false,
@@ -259,13 +251,9 @@ impl ParseSess {
pub fn with_silent_emitter(fatal_note: Option<String>) -> Self {
let fallback_bundle = fallback_fluent_bundle(Vec::new(), false);
let sm = Lrc::new(SourceMap::new(FilePathMapping::empty()));
- let fatal_handler =
- Handler::with_tty_emitter(ColorConfig::Auto, false, None, None, None, fallback_bundle);
- let handler = Handler::with_emitter(
- false,
- None,
- Box::new(SilentEmitter { fatal_handler, fatal_note }),
- );
+ let fatal_handler = Handler::with_tty_emitter(None, fallback_bundle).disable_warnings();
+ let handler = Handler::with_emitter(Box::new(SilentEmitter { fatal_handler, fatal_note }))
+ .disable_warnings();
ParseSess::with_span_handler(handler, sm)
}
@@ -351,6 +339,7 @@ impl ParseSess {
self.create_warning(warning).emit()
}
+ #[track_caller]
pub fn create_note<'a>(
&'a self,
note: impl IntoDiagnostic<'a, Noted>,
@@ -358,10 +347,12 @@ impl ParseSess {
note.into_diagnostic(&self.span_diagnostic)
}
+ #[track_caller]
pub fn emit_note<'a>(&'a self, note: impl IntoDiagnostic<'a, Noted>) -> Noted {
self.create_note(note).emit()
}
+ #[track_caller]
pub fn create_fatal<'a>(
&'a self,
fatal: impl IntoDiagnostic<'a, !>,
@@ -369,6 +360,7 @@ impl ParseSess {
fatal.into_diagnostic(&self.span_diagnostic)
}
+ #[track_caller]
pub fn emit_fatal<'a>(&'a self, fatal: impl IntoDiagnostic<'a, !>) -> ! {
self.create_fatal(fatal).emit()
}
@@ -383,16 +375,19 @@ impl ParseSess {
}
#[rustc_lint_diagnostics]
+ #[track_caller]
pub fn struct_warn(&self, msg: impl Into<DiagnosticMessage>) -> DiagnosticBuilder<'_, ()> {
self.span_diagnostic.struct_warn(msg)
}
#[rustc_lint_diagnostics]
+ #[track_caller]
pub fn struct_fatal(&self, msg: impl Into<DiagnosticMessage>) -> DiagnosticBuilder<'_, !> {
self.span_diagnostic.struct_fatal(msg)
}
#[rustc_lint_diagnostics]
+ #[track_caller]
pub fn struct_diagnostic<G: EmissionGuarantee>(
&self,
msg: impl Into<DiagnosticMessage>,
diff --git a/compiler/rustc_session/src/session.rs b/compiler/rustc_session/src/session.rs
index 5be122ffb..086ce4e69 100644
--- a/compiler/rustc_session/src/session.rs
+++ b/compiler/rustc_session/src/session.rs
@@ -17,7 +17,7 @@ use rustc_data_structures::fx::{FxHashMap, FxIndexSet};
use rustc_data_structures::jobserver::{self, Client};
use rustc_data_structures::profiling::{duration_to_secs_str, SelfProfiler, SelfProfilerRef};
use rustc_data_structures::sync::{
- self, AtomicU64, AtomicUsize, Lock, Lrc, OnceCell, OneThread, Ordering, Ordering::SeqCst,
+ self, AtomicU64, AtomicUsize, Lock, Lrc, OneThread, Ordering, Ordering::SeqCst,
};
use rustc_errors::annotate_snippet_emitter_writer::AnnotateSnippetEmitterWriter;
use rustc_errors::emitter::{Emitter, EmitterWriter, HumanReadableErrorType};
@@ -152,16 +152,6 @@ pub struct Session {
/// Input, input file path and output file path to this compilation process.
pub io: CompilerIO,
- crate_types: OnceCell<Vec<CrateType>>,
- /// The `stable_crate_id` is constructed out of the crate name and all the
- /// `-C metadata` arguments passed to the compiler. Its value forms a unique
- /// global identifier for the crate. It is used to allow multiple crates
- /// with the same name to coexist. See the
- /// `rustc_symbol_mangling` crate for more information.
- pub stable_crate_id: OnceCell<StableCrateId>,
-
- features: OnceCell<rustc_feature::Features>,
-
incr_comp_session: OneThread<RefCell<IncrCompSession>>,
/// Used for incremental compilation tests. Will only be populated if
/// `-Zquery-dep-graph` is specified.
@@ -310,55 +300,11 @@ impl Session {
self.parse_sess.span_diagnostic.emit_future_breakage_report(diags);
}
- pub fn local_stable_crate_id(&self) -> StableCrateId {
- self.stable_crate_id.get().copied().unwrap()
- }
-
- pub fn crate_types(&self) -> &[CrateType] {
- self.crate_types.get().unwrap().as_slice()
- }
-
/// Returns true if the crate is a testing one.
pub fn is_test_crate(&self) -> bool {
self.opts.test
}
- pub fn needs_crate_hash(&self) -> bool {
- // Why is the crate hash needed for these configurations?
- // - debug_assertions: for the "fingerprint the result" check in
- // `rustc_query_system::query::plumbing::execute_job`.
- // - incremental: for query lookups.
- // - needs_metadata: for putting into crate metadata.
- // - instrument_coverage: for putting into coverage data (see
- // `hash_mir_source`).
- cfg!(debug_assertions)
- || self.opts.incremental.is_some()
- || self.needs_metadata()
- || self.instrument_coverage()
- }
-
- pub fn metadata_kind(&self) -> MetadataKind {
- self.crate_types()
- .iter()
- .map(|ty| match *ty {
- CrateType::Executable | CrateType::Staticlib | CrateType::Cdylib => {
- MetadataKind::None
- }
- CrateType::Rlib => MetadataKind::Uncompressed,
- CrateType::Dylib | CrateType::ProcMacro => MetadataKind::Compressed,
- })
- .max()
- .unwrap_or(MetadataKind::None)
- }
-
- pub fn needs_metadata(&self) -> bool {
- self.metadata_kind() != MetadataKind::None
- }
-
- pub fn init_crate_types(&self, crate_types: Vec<CrateType>) {
- self.crate_types.set(crate_types).expect("`crate_types` was initialized twice")
- }
-
#[rustc_lint_diagnostics]
#[track_caller]
pub fn struct_span_warn<S: Into<MultiSpan>>(
@@ -677,7 +623,7 @@ impl Session {
pub fn delay_span_bug<S: Into<MultiSpan>>(
&self,
sp: S,
- msg: impl Into<DiagnosticMessage>,
+ msg: impl Into<String>,
) -> ErrorGuaranteed {
self.diagnostic().delay_span_bug(sp, msg)
}
@@ -757,21 +703,6 @@ impl Session {
self.opts.cg.instrument_coverage() == InstrumentCoverage::ExceptUnusedFunctions
}
- /// Gets the features enabled for the current compilation session.
- /// DO NOT USE THIS METHOD if there is a TyCtxt available, as it circumvents
- /// dependency tracking. Use tcx.features() instead.
- #[inline]
- pub fn features_untracked(&self) -> &rustc_feature::Features {
- self.features.get().unwrap()
- }
-
- pub fn init_features(&self, features: rustc_feature::Features) {
- match self.features.set(features) {
- Ok(()) => {}
- Err(_) => panic!("`features` was initialized twice"),
- }
- }
-
pub fn is_sanitizer_cfi_enabled(&self) -> bool {
self.opts.unstable_opts.sanitizer.contains(SanitizerSet::CFI)
}
@@ -995,18 +926,18 @@ impl Session {
}
/// Are we allowed to use features from the Rust 2018 edition?
- pub fn rust_2018(&self) -> bool {
- self.edition().rust_2018()
+ pub fn at_least_rust_2018(&self) -> bool {
+ self.edition().at_least_rust_2018()
}
/// Are we allowed to use features from the Rust 2021 edition?
- pub fn rust_2021(&self) -> bool {
- self.edition().rust_2021()
+ pub fn at_least_rust_2021(&self) -> bool {
+ self.edition().at_least_rust_2021()
}
/// Are we allowed to use features from the Rust 2024 edition?
- pub fn rust_2024(&self) -> bool {
- self.edition().rust_2024()
+ pub fn at_least_rust_2024(&self) -> bool {
+ self.edition().at_least_rust_2024()
}
/// Returns `true` if we should use the PLT for shared library calls.
@@ -1057,6 +988,10 @@ impl Session {
self.opts.unstable_opts.verbose
}
+ pub fn print_llvm_stats(&self) -> bool {
+ self.opts.unstable_opts.print_codegen_stats
+ }
+
pub fn verify_llvm_ir(&self) -> bool {
self.opts.unstable_opts.verify_llvm_ir || option_env!("RUSTC_VERIFY_LLVM_IR").is_some()
}
@@ -1346,18 +1281,15 @@ fn default_emitter(
);
Box::new(emitter.ui_testing(sopts.unstable_opts.ui_testing))
} else {
- let emitter = EmitterWriter::stderr(
- color_config,
- Some(source_map),
- bundle,
- fallback_bundle,
- short,
- sopts.unstable_opts.teach,
- sopts.diagnostic_width,
- macro_backtrace,
- track_diagnostics,
- terminal_url,
- );
+ let emitter = EmitterWriter::stderr(color_config, fallback_bundle)
+ .fluent_bundle(bundle)
+ .sm(Some(source_map))
+ .short_message(short)
+ .teach(sopts.unstable_opts.teach)
+ .diagnostic_width(sopts.diagnostic_width)
+ .macro_backtrace(macro_backtrace)
+ .track_diagnostics(track_diagnostics)
+ .terminal_url(terminal_url);
Box::new(emitter.ui_testing(sopts.unstable_opts.ui_testing))
}
}
@@ -1392,6 +1324,7 @@ pub fn build_session(
file_loader: Option<Box<dyn FileLoader + Send + Sync + 'static>>,
target_override: Option<Target>,
cfg_version: &'static str,
+ ice_file: Option<PathBuf>,
) -> Session {
// FIXME: This is not general enough to make the warning lint completely override
// normal diagnostic warnings, since the warning lint can also be denied and changed
@@ -1420,7 +1353,7 @@ pub fn build_session(
let loader = file_loader.unwrap_or_else(|| Box::new(RealFileLoader));
let hash_kind = sopts.unstable_opts.src_hash_algorithm.unwrap_or_else(|| {
if target_cfg.is_like_msvc {
- SourceFileHashAlgorithm::Sha1
+ SourceFileHashAlgorithm::Sha256
} else {
SourceFileHashAlgorithm::Md5
}
@@ -1437,10 +1370,11 @@ pub fn build_session(
);
let emitter = default_emitter(&sopts, registry, source_map.clone(), bundle, fallback_bundle);
- let span_diagnostic = rustc_errors::Handler::with_emitter_and_flags(
- emitter,
- sopts.unstable_opts.diagnostic_handler_flags(can_emit_warnings),
- );
+ let mut span_diagnostic = rustc_errors::Handler::with_emitter(emitter)
+ .with_flags(sopts.unstable_opts.diagnostic_handler_flags(can_emit_warnings));
+ if let Some(ice_file) = ice_file {
+ span_diagnostic = span_diagnostic.with_ice_file(ice_file);
+ }
let self_profiler = if let SwitchWithOptPath::Enabled(ref d) = sopts.unstable_opts.self_profile
{
@@ -1513,9 +1447,6 @@ pub fn build_session(
parse_sess,
sysroot,
io,
- crate_types: OnceCell::new(),
- stable_crate_id: OnceCell::new(),
- features: OnceCell::new(),
incr_comp_session: OneThread::new(RefCell::new(IncrCompSession::NotInitialized)),
cgu_reuse_tracker,
prof,
@@ -1616,13 +1547,19 @@ fn validate_commandline_args_with_session_available(sess: &Session) {
// LLVM CFI requires LTO.
if sess.is_sanitizer_cfi_enabled()
- && !(sess.lto() == config::Lto::Fat
- || sess.lto() == config::Lto::Thin
- || sess.opts.cg.linker_plugin_lto.enabled())
+ && !(sess.lto() == config::Lto::Fat || sess.opts.cg.linker_plugin_lto.enabled())
{
sess.emit_err(errors::SanitizerCfiRequiresLto);
}
+ // LLVM CFI using rustc LTO requires a single codegen unit.
+ if sess.is_sanitizer_cfi_enabled()
+ && sess.lto() == config::Lto::Fat
+ && !(sess.codegen_units().as_usize() == 1)
+ {
+ sess.emit_err(errors::SanitizerCfiRequiresSingleCodegenUnit);
+ }
+
// LLVM CFI is incompatible with LLVM KCFI.
if sess.is_sanitizer_cfi_enabled() && sess.is_sanitizer_kcfi_enabled() {
sess.emit_err(errors::CannotMixAndMatchSanitizers {
@@ -1731,7 +1668,7 @@ pub struct EarlyErrorHandler {
impl EarlyErrorHandler {
pub fn new(output: ErrorOutputType) -> Self {
let emitter = mk_emitter(output);
- Self { handler: rustc_errors::Handler::with_emitter(true, None, emitter) }
+ Self { handler: rustc_errors::Handler::with_emitter(emitter) }
}
pub fn abort_if_errors(&self) {
@@ -1745,7 +1682,7 @@ impl EarlyErrorHandler {
self.handler.abort_if_errors();
let emitter = mk_emitter(output);
- self.handler = Handler::with_emitter(true, None, emitter);
+ self.handler = Handler::with_emitter(emitter);
}
#[allow(rustc::untranslatable_diagnostic)]
@@ -1788,18 +1725,7 @@ fn mk_emitter(output: ErrorOutputType) -> Box<dyn Emitter + sync::Send + 'static
let emitter: Box<dyn Emitter + sync::Send> = match output {
config::ErrorOutputType::HumanReadable(kind) => {
let (short, color_config) = kind.unzip();
- Box::new(EmitterWriter::stderr(
- color_config,
- None,
- None,
- fallback_bundle,
- short,
- false,
- None,
- false,
- false,
- TerminalUrl::No,
- ))
+ Box::new(EmitterWriter::stderr(color_config, fallback_bundle).short_message(short))
}
config::ErrorOutputType::Json { pretty, json_rendered } => Box::new(JsonEmitter::basic(
pretty,
diff --git a/compiler/rustc_session/src/utils.rs b/compiler/rustc_session/src/utils.rs
index 1d15e2c28..71f2591fe 100644
--- a/compiler/rustc_session/src/utils.rs
+++ b/compiler/rustc_session/src/utils.rs
@@ -7,6 +7,7 @@ impl Session {
pub fn timer(&self, what: &'static str) -> VerboseTimingGuard<'_> {
self.prof.verbose_generic_activity(what)
}
+ /// Used by `-Z self-profile`.
pub fn time<R>(&self, what: &'static str, f: impl FnOnce() -> R) -> R {
self.prof.verbose_generic_activity(what).run(f)
}
diff --git a/compiler/rustc_smir/Cargo.toml b/compiler/rustc_smir/Cargo.toml
index a6e6de5f7..80d4e7ed0 100644
--- a/compiler/rustc_smir/Cargo.toml
+++ b/compiler/rustc_smir/Cargo.toml
@@ -4,14 +4,18 @@ version = "0.0.0"
edition = "2021"
[dependencies]
-rustc_hir = { path = "../rustc_hir" }
+# Use optional dependencies for rustc_* in order to support building this crate separately.
+rustc_hir = { path = "../rustc_hir", optional = true }
rustc_middle = { path = "../rustc_middle", optional = true }
rustc_span = { path = "../rustc_span", optional = true }
+rustc_target = { path = "../rustc_target", optional = true }
tracing = "0.1"
scoped-tls = "1.0"
[features]
default = [
+ "rustc_hir",
"rustc_middle",
"rustc_span",
+ "rustc_target",
]
diff --git a/compiler/rustc_smir/rust-toolchain.toml b/compiler/rustc_smir/rust-toolchain.toml
index 157dfd620..d75e8e33b 100644
--- a/compiler/rustc_smir/rust-toolchain.toml
+++ b/compiler/rustc_smir/rust-toolchain.toml
@@ -1,3 +1,3 @@
[toolchain]
-channel = "nightly-2023-02-28"
+channel = "nightly-2023-06-14"
components = [ "rustfmt", "rustc-dev" ]
diff --git a/compiler/rustc_smir/src/lib.rs b/compiler/rustc_smir/src/lib.rs
index fb03633b9..8cb533c8d 100644
--- a/compiler/rustc_smir/src/lib.rs
+++ b/compiler/rustc_smir/src/lib.rs
@@ -1,6 +1,6 @@
//! The WIP stable interface to rustc internals.
//!
-//! For more information see https://github.com/rust-lang/project-stable-mir
+//! For more information see <https://github.com/rust-lang/project-stable-mir>
//!
//! # Note
//!
@@ -11,8 +11,19 @@
test(attr(allow(unused_variables), deny(warnings)))
)]
#![cfg_attr(not(feature = "default"), feature(rustc_private))]
-#![feature(local_key_cell_methods)]
#![feature(ptr_metadata)]
+#![feature(type_alias_impl_trait)] // Used to define opaque types.
+#![feature(intra_doc_pointers)]
+
+// Declare extern rustc_* crates to enable building this crate separately from the compiler.
+#[cfg(not(feature = "default"))]
+extern crate rustc_hir;
+#[cfg(not(feature = "default"))]
+extern crate rustc_middle;
+#[cfg(not(feature = "default"))]
+extern crate rustc_span;
+#[cfg(not(feature = "default"))]
+extern crate rustc_target;
pub mod rustc_internal;
pub mod stable_mir;
diff --git a/compiler/rustc_smir/src/rustc_internal/mod.rs b/compiler/rustc_smir/src/rustc_internal/mod.rs
index 609a04d26..078ff6744 100644
--- a/compiler/rustc_smir/src/rustc_internal/mod.rs
+++ b/compiler/rustc_smir/src/rustc_internal/mod.rs
@@ -3,6 +3,9 @@
//! For that, we define APIs that will temporarily be public to 3P that exposes rustc internal APIs
//! until stable MIR is complete.
+use std::fmt::Debug;
+use std::string::ToString;
+
use crate::{
rustc_smir::Tables,
stable_mir::{self, with},
@@ -24,21 +27,117 @@ pub fn crate_item(did: DefId) -> stable_mir::CrateItem {
with_tables(|t| t.crate_item(did))
}
+pub fn adt_def(did: DefId) -> stable_mir::ty::AdtDef {
+ with_tables(|t| t.adt_def(did))
+}
+
+pub fn foreign_def(did: DefId) -> stable_mir::ty::ForeignDef {
+ with_tables(|t| t.foreign_def(did))
+}
+
+pub fn fn_def(did: DefId) -> stable_mir::ty::FnDef {
+ with_tables(|t| t.fn_def(did))
+}
+
+pub fn closure_def(did: DefId) -> stable_mir::ty::ClosureDef {
+ with_tables(|t| t.closure_def(did))
+}
+
+pub fn generator_def(did: DefId) -> stable_mir::ty::GeneratorDef {
+ with_tables(|t| t.generator_def(did))
+}
+
+pub fn alias_def(did: DefId) -> stable_mir::ty::AliasDef {
+ with_tables(|t| t.alias_def(did))
+}
+
+pub fn param_def(did: DefId) -> stable_mir::ty::ParamDef {
+ with_tables(|t| t.param_def(did))
+}
+
+pub fn br_named_def(did: DefId) -> stable_mir::ty::BrNamedDef {
+ with_tables(|t| t.br_named_def(did))
+}
+
+pub fn trait_def(did: DefId) -> stable_mir::ty::TraitDef {
+ with_tables(|t| t.trait_def(did))
+}
+
+pub fn impl_def(did: DefId) -> stable_mir::ty::ImplDef {
+ with_tables(|t| t.impl_def(did))
+}
+
impl<'tcx> Tables<'tcx> {
pub fn item_def_id(&self, item: &stable_mir::CrateItem) -> DefId {
self.def_ids[item.0]
}
+ pub fn trait_def_id(&self, trait_def: &stable_mir::ty::TraitDef) -> DefId {
+ self.def_ids[trait_def.0]
+ }
+
+ pub fn impl_trait_def_id(&self, impl_def: &stable_mir::ty::ImplDef) -> DefId {
+ self.def_ids[impl_def.0]
+ }
+
pub fn crate_item(&mut self, did: DefId) -> stable_mir::CrateItem {
+ stable_mir::CrateItem(self.create_def_id(did))
+ }
+
+ pub fn adt_def(&mut self, did: DefId) -> stable_mir::ty::AdtDef {
+ stable_mir::ty::AdtDef(self.create_def_id(did))
+ }
+
+ pub fn foreign_def(&mut self, did: DefId) -> stable_mir::ty::ForeignDef {
+ stable_mir::ty::ForeignDef(self.create_def_id(did))
+ }
+
+ pub fn fn_def(&mut self, did: DefId) -> stable_mir::ty::FnDef {
+ stable_mir::ty::FnDef(self.create_def_id(did))
+ }
+
+ pub fn closure_def(&mut self, did: DefId) -> stable_mir::ty::ClosureDef {
+ stable_mir::ty::ClosureDef(self.create_def_id(did))
+ }
+
+ pub fn generator_def(&mut self, did: DefId) -> stable_mir::ty::GeneratorDef {
+ stable_mir::ty::GeneratorDef(self.create_def_id(did))
+ }
+
+ pub fn alias_def(&mut self, did: DefId) -> stable_mir::ty::AliasDef {
+ stable_mir::ty::AliasDef(self.create_def_id(did))
+ }
+
+ pub fn param_def(&mut self, did: DefId) -> stable_mir::ty::ParamDef {
+ stable_mir::ty::ParamDef(self.create_def_id(did))
+ }
+
+ pub fn br_named_def(&mut self, did: DefId) -> stable_mir::ty::BrNamedDef {
+ stable_mir::ty::BrNamedDef(self.create_def_id(did))
+ }
+
+ pub fn trait_def(&mut self, did: DefId) -> stable_mir::ty::TraitDef {
+ stable_mir::ty::TraitDef(self.create_def_id(did))
+ }
+
+ pub fn const_def(&mut self, did: DefId) -> stable_mir::ty::ConstDef {
+ stable_mir::ty::ConstDef(self.create_def_id(did))
+ }
+
+ pub fn impl_def(&mut self, did: DefId) -> stable_mir::ty::ImplDef {
+ stable_mir::ty::ImplDef(self.create_def_id(did))
+ }
+
+ fn create_def_id(&mut self, did: DefId) -> stable_mir::DefId {
// FIXME: this becomes inefficient when we have too many ids
for (i, &d) in self.def_ids.iter().enumerate() {
if d == did {
- return stable_mir::CrateItem(i);
+ return i;
}
}
let id = self.def_ids.len();
self.def_ids.push(did);
- stable_mir::CrateItem(id)
+ id
}
}
@@ -49,3 +148,10 @@ pub fn crate_num(item: &stable_mir::Crate) -> CrateNum {
pub fn run(tcx: TyCtxt<'_>, f: impl FnOnce()) {
crate::stable_mir::run(Tables { tcx, def_ids: vec![], types: vec![] }, f);
}
+
+/// A type that provides internal information but that can still be used for debug purpose.
+pub type Opaque = impl Debug + ToString + Clone;
+
+pub(crate) fn opaque<T: Debug>(value: &T) -> Opaque {
+ format!("{value:?}")
+}
diff --git a/compiler/rustc_smir/src/rustc_smir/mod.rs b/compiler/rustc_smir/src/rustc_smir/mod.rs
index 85d5bb00c..06b37008e 100644
--- a/compiler/rustc_smir/src/rustc_smir/mod.rs
+++ b/compiler/rustc_smir/src/rustc_smir/mod.rs
@@ -7,11 +7,19 @@
//!
//! For now, we are developing everything inside `rustc`, thus, we keep this module private.
-use crate::stable_mir::ty::{FloatTy, IntTy, RigidTy, TyKind, UintTy};
+use crate::rustc_internal::{self, opaque};
+use crate::stable_mir::mir::{CopyNonOverlapping, UserTypeProjection, VariantIdx};
+use crate::stable_mir::ty::{
+ allocation_filter, new_allocation, Const, FloatTy, IntTy, Movability, RigidTy, TyKind, UintTy,
+};
use crate::stable_mir::{self, Context};
-use rustc_middle::mir;
-use rustc_middle::ty::{self, Ty, TyCtxt};
+use rustc_hir as hir;
+use rustc_middle::mir::coverage::CodeRegion;
+use rustc_middle::mir::interpret::alloc_range;
+use rustc_middle::mir::{self, ConstantKind};
+use rustc_middle::ty::{self, Ty, TyCtxt, Variance};
use rustc_span::def_id::{CrateNum, DefId, LOCAL_CRATE};
+use rustc_target::abi::FieldIdx;
use tracing::debug;
impl<'tcx> Context for Tables<'tcx> {
@@ -36,6 +44,35 @@ impl<'tcx> Context for Tables<'tcx> {
fn entry_fn(&mut self) -> Option<stable_mir::CrateItem> {
Some(self.crate_item(self.tcx.entry_fn(())?.0))
}
+
+ fn all_trait_decls(&mut self) -> stable_mir::TraitDecls {
+ self.tcx
+ .traits(LOCAL_CRATE)
+ .iter()
+ .map(|trait_def_id| self.trait_def(*trait_def_id))
+ .collect()
+ }
+
+ fn trait_decl(&mut self, trait_def: &stable_mir::ty::TraitDef) -> stable_mir::ty::TraitDecl {
+ let def_id = self.trait_def_id(trait_def);
+ let trait_def = self.tcx.trait_def(def_id);
+ trait_def.stable(self)
+ }
+
+ fn all_trait_impls(&mut self) -> stable_mir::ImplTraitDecls {
+ self.tcx
+ .trait_impls_in_crate(LOCAL_CRATE)
+ .iter()
+ .map(|impl_def_id| self.impl_def(*impl_def_id))
+ .collect()
+ }
+
+ fn trait_impl(&mut self, impl_def: &stable_mir::ty::ImplDef) -> stable_mir::ty::ImplTrait {
+ let def_id = self.impl_trait_def_id(impl_def);
+ let impl_trait = self.tcx.impl_trait_ref(def_id).unwrap();
+ impl_trait.stable(self)
+ }
+
fn mir_body(&mut self, item: &stable_mir::CrateItem) -> stable_mir::mir::Body {
let def_id = self.item_def_id(item);
let mir = self.tcx.optimized_mir(def_id);
@@ -44,8 +81,12 @@ impl<'tcx> Context for Tables<'tcx> {
.basic_blocks
.iter()
.map(|block| stable_mir::mir::BasicBlock {
- terminator: block.terminator().stable(),
- statements: block.statements.iter().map(mir::Statement::stable).collect(),
+ terminator: block.terminator().stable(self),
+ statements: block
+ .statements
+ .iter()
+ .map(|statement| statement.stable(self))
+ .collect(),
})
.collect(),
locals: mir.local_decls.iter().map(|decl| self.intern_ty(decl.ty)).collect(),
@@ -57,7 +98,8 @@ impl<'tcx> Context for Tables<'tcx> {
}
fn ty_kind(&mut self, ty: crate::stable_mir::ty::Ty) -> TyKind {
- self.rustc_ty_to_ty(self.types[ty.0])
+ let ty = self.types[ty.0];
+ ty.stable(self)
}
}
@@ -68,57 +110,6 @@ pub struct Tables<'tcx> {
}
impl<'tcx> Tables<'tcx> {
- fn rustc_ty_to_ty(&mut self, ty: Ty<'tcx>) -> TyKind {
- match ty.kind() {
- ty::Bool => TyKind::RigidTy(RigidTy::Bool),
- ty::Char => TyKind::RigidTy(RigidTy::Char),
- ty::Int(int_ty) => match int_ty {
- ty::IntTy::Isize => TyKind::RigidTy(RigidTy::Int(IntTy::Isize)),
- ty::IntTy::I8 => TyKind::RigidTy(RigidTy::Int(IntTy::I8)),
- ty::IntTy::I16 => TyKind::RigidTy(RigidTy::Int(IntTy::I16)),
- ty::IntTy::I32 => TyKind::RigidTy(RigidTy::Int(IntTy::I32)),
- ty::IntTy::I64 => TyKind::RigidTy(RigidTy::Int(IntTy::I64)),
- ty::IntTy::I128 => TyKind::RigidTy(RigidTy::Int(IntTy::I128)),
- },
- ty::Uint(uint_ty) => match uint_ty {
- ty::UintTy::Usize => TyKind::RigidTy(RigidTy::Uint(UintTy::Usize)),
- ty::UintTy::U8 => TyKind::RigidTy(RigidTy::Uint(UintTy::U8)),
- ty::UintTy::U16 => TyKind::RigidTy(RigidTy::Uint(UintTy::U16)),
- ty::UintTy::U32 => TyKind::RigidTy(RigidTy::Uint(UintTy::U32)),
- ty::UintTy::U64 => TyKind::RigidTy(RigidTy::Uint(UintTy::U64)),
- ty::UintTy::U128 => TyKind::RigidTy(RigidTy::Uint(UintTy::U128)),
- },
- ty::Float(float_ty) => match float_ty {
- ty::FloatTy::F32 => TyKind::RigidTy(RigidTy::Float(FloatTy::F32)),
- ty::FloatTy::F64 => TyKind::RigidTy(RigidTy::Float(FloatTy::F64)),
- },
- ty::Adt(_, _) => todo!(),
- ty::Foreign(_) => todo!(),
- ty::Str => todo!(),
- ty::Array(_, _) => todo!(),
- ty::Slice(_) => todo!(),
- ty::RawPtr(_) => todo!(),
- ty::Ref(_, _, _) => todo!(),
- ty::FnDef(_, _) => todo!(),
- ty::FnPtr(_) => todo!(),
- ty::Placeholder(..) => todo!(),
- ty::Dynamic(_, _, _) => todo!(),
- ty::Closure(_, _) => todo!(),
- ty::Generator(_, _, _) => todo!(),
- ty::GeneratorWitness(_) => todo!(),
- ty::GeneratorWitnessMIR(_, _) => todo!(),
- ty::Never => todo!(),
- ty::Tuple(fields) => TyKind::RigidTy(RigidTy::Tuple(
- fields.iter().map(|ty| self.intern_ty(ty)).collect(),
- )),
- ty::Alias(_, _) => todo!(),
- ty::Param(_) => todo!(),
- ty::Bound(_, _) => todo!(),
- ty::Infer(_) => todo!(),
- ty::Error(_) => todo!(),
- }
- }
-
fn intern_ty(&mut self, ty: Ty<'tcx>) -> stable_mir::ty::Ty {
if let Some(id) = self.types.iter().position(|&t| t == ty) {
return stable_mir::ty::Ty(id);
@@ -137,78 +128,343 @@ fn smir_crate(tcx: TyCtxt<'_>, crate_num: CrateNum) -> stable_mir::Crate {
stable_mir::Crate { id: crate_num.into(), name: crate_name, is_local }
}
-pub trait Stable {
+/// Trait used to convert between an internal MIR type to a Stable MIR type.
+pub(crate) trait Stable<'tcx> {
+ /// The stable representation of the type implementing Stable.
type T;
- fn stable(&self) -> Self::T;
+ /// Converts an object to the equivalent Stable MIR representation.
+ fn stable(&self, tables: &mut Tables<'tcx>) -> Self::T;
}
-impl<'tcx> Stable for mir::Statement<'tcx> {
+impl<'tcx> Stable<'tcx> for mir::Statement<'tcx> {
type T = stable_mir::mir::Statement;
- fn stable(&self) -> Self::T {
+ fn stable(&self, tables: &mut Tables<'tcx>) -> Self::T {
use rustc_middle::mir::StatementKind::*;
match &self.kind {
Assign(assign) => {
- stable_mir::mir::Statement::Assign(assign.0.stable(), assign.1.stable())
- }
- FakeRead(_) => todo!(),
- SetDiscriminant { .. } => todo!(),
- Deinit(_) => todo!(),
- StorageLive(_) => todo!(),
- StorageDead(_) => todo!(),
- Retag(_, _) => todo!(),
- PlaceMention(_) => todo!(),
- AscribeUserType(_, _) => todo!(),
- Coverage(_) => todo!(),
- Intrinsic(_) => todo!(),
- ConstEvalCounter => todo!(),
+ stable_mir::mir::Statement::Assign(assign.0.stable(tables), assign.1.stable(tables))
+ }
+ FakeRead(fake_read_place) => stable_mir::mir::Statement::FakeRead(
+ fake_read_place.0.stable(tables),
+ fake_read_place.1.stable(tables),
+ ),
+ SetDiscriminant { place: plc, variant_index: idx } => {
+ stable_mir::mir::Statement::SetDiscriminant {
+ place: plc.as_ref().stable(tables),
+ variant_index: idx.stable(tables),
+ }
+ }
+ Deinit(place) => stable_mir::mir::Statement::Deinit(place.stable(tables)),
+ StorageLive(place) => stable_mir::mir::Statement::StorageLive(place.stable(tables)),
+ StorageDead(place) => stable_mir::mir::Statement::StorageDead(place.stable(tables)),
+ Retag(retag, place) => {
+ stable_mir::mir::Statement::Retag(retag.stable(tables), place.stable(tables))
+ }
+ PlaceMention(place) => stable_mir::mir::Statement::PlaceMention(place.stable(tables)),
+ AscribeUserType(place_projection, variance) => {
+ stable_mir::mir::Statement::AscribeUserType {
+ place: place_projection.as_ref().0.stable(tables),
+ projections: place_projection.as_ref().1.stable(tables),
+ variance: variance.stable(tables),
+ }
+ }
+ Coverage(coverage) => stable_mir::mir::Statement::Coverage(stable_mir::mir::Coverage {
+ kind: coverage.kind.stable(tables),
+ code_region: coverage.code_region.as_ref().map(|reg| reg.stable(tables)),
+ }),
+ Intrinsic(intrinstic) => {
+ stable_mir::mir::Statement::Intrinsic(intrinstic.stable(tables))
+ }
+ ConstEvalCounter => stable_mir::mir::Statement::ConstEvalCounter,
Nop => stable_mir::mir::Statement::Nop,
}
}
}
-impl<'tcx> Stable for mir::Rvalue<'tcx> {
+impl<'tcx> Stable<'tcx> for mir::Rvalue<'tcx> {
type T = stable_mir::mir::Rvalue;
- fn stable(&self) -> Self::T {
+ fn stable(&self, tables: &mut Tables<'tcx>) -> Self::T {
use mir::Rvalue::*;
match self {
- Use(op) => stable_mir::mir::Rvalue::Use(op.stable()),
- Repeat(_, _) => todo!(),
- Ref(_, _, _) => todo!(),
- ThreadLocalRef(_) => todo!(),
- AddressOf(_, _) => todo!(),
- Len(_) => todo!(),
- Cast(_, _, _) => todo!(),
- BinaryOp(_, _) => todo!(),
+ Use(op) => stable_mir::mir::Rvalue::Use(op.stable(tables)),
+ Repeat(op, len) => {
+ let cnst = ConstantKind::from_const(*len, tables.tcx);
+ let len = Const { literal: cnst.stable(tables) };
+ stable_mir::mir::Rvalue::Repeat(op.stable(tables), len)
+ }
+ Ref(region, kind, place) => stable_mir::mir::Rvalue::Ref(
+ opaque(region),
+ kind.stable(tables),
+ place.stable(tables),
+ ),
+ ThreadLocalRef(def_id) => {
+ stable_mir::mir::Rvalue::ThreadLocalRef(rustc_internal::crate_item(*def_id))
+ }
+ AddressOf(mutability, place) => {
+ stable_mir::mir::Rvalue::AddressOf(mutability.stable(tables), place.stable(tables))
+ }
+ Len(place) => stable_mir::mir::Rvalue::Len(place.stable(tables)),
+ Cast(cast_kind, op, ty) => stable_mir::mir::Rvalue::Cast(
+ cast_kind.stable(tables),
+ op.stable(tables),
+ tables.intern_ty(*ty),
+ ),
+ BinaryOp(bin_op, ops) => stable_mir::mir::Rvalue::BinaryOp(
+ bin_op.stable(tables),
+ ops.0.stable(tables),
+ ops.1.stable(tables),
+ ),
CheckedBinaryOp(bin_op, ops) => stable_mir::mir::Rvalue::CheckedBinaryOp(
- bin_op.stable(),
- ops.0.stable(),
- ops.1.stable(),
+ bin_op.stable(tables),
+ ops.0.stable(tables),
+ ops.1.stable(tables),
+ ),
+ NullaryOp(null_op, ty) => {
+ stable_mir::mir::Rvalue::NullaryOp(null_op.stable(tables), tables.intern_ty(*ty))
+ }
+ UnaryOp(un_op, op) => {
+ stable_mir::mir::Rvalue::UnaryOp(un_op.stable(tables), op.stable(tables))
+ }
+ Discriminant(place) => stable_mir::mir::Rvalue::Discriminant(place.stable(tables)),
+ Aggregate(agg_kind, operands) => {
+ let operands = operands.iter().map(|op| op.stable(tables)).collect();
+ stable_mir::mir::Rvalue::Aggregate(agg_kind.stable(tables), operands)
+ }
+ ShallowInitBox(op, ty) => {
+ stable_mir::mir::Rvalue::ShallowInitBox(op.stable(tables), tables.intern_ty(*ty))
+ }
+ CopyForDeref(place) => stable_mir::mir::Rvalue::CopyForDeref(place.stable(tables)),
+ }
+ }
+}
+
+impl<'tcx> Stable<'tcx> for mir::Mutability {
+ type T = stable_mir::mir::Mutability;
+ fn stable(&self, _: &mut Tables<'tcx>) -> Self::T {
+ use mir::Mutability::*;
+ match *self {
+ Not => stable_mir::mir::Mutability::Not,
+ Mut => stable_mir::mir::Mutability::Mut,
+ }
+ }
+}
+
+impl<'tcx> Stable<'tcx> for mir::BorrowKind {
+ type T = stable_mir::mir::BorrowKind;
+ fn stable(&self, tables: &mut Tables<'tcx>) -> Self::T {
+ use mir::BorrowKind::*;
+ match *self {
+ Shared => stable_mir::mir::BorrowKind::Shared,
+ Shallow => stable_mir::mir::BorrowKind::Shallow,
+ Mut { kind } => stable_mir::mir::BorrowKind::Mut { kind: kind.stable(tables) },
+ }
+ }
+}
+
+impl<'tcx> Stable<'tcx> for mir::MutBorrowKind {
+ type T = stable_mir::mir::MutBorrowKind;
+ fn stable(&self, _: &mut Tables<'tcx>) -> Self::T {
+ use mir::MutBorrowKind::*;
+ match *self {
+ Default => stable_mir::mir::MutBorrowKind::Default,
+ TwoPhaseBorrow => stable_mir::mir::MutBorrowKind::TwoPhaseBorrow,
+ ClosureCapture => stable_mir::mir::MutBorrowKind::ClosureCapture,
+ }
+ }
+}
+
+impl<'tcx> Stable<'tcx> for mir::NullOp<'tcx> {
+ type T = stable_mir::mir::NullOp;
+ fn stable(&self, tables: &mut Tables<'tcx>) -> Self::T {
+ use mir::NullOp::*;
+ match self {
+ SizeOf => stable_mir::mir::NullOp::SizeOf,
+ AlignOf => stable_mir::mir::NullOp::AlignOf,
+ OffsetOf(indices) => stable_mir::mir::NullOp::OffsetOf(
+ indices.iter().map(|idx| idx.stable(tables)).collect(),
),
- NullaryOp(_, _) => todo!(),
- UnaryOp(un_op, op) => stable_mir::mir::Rvalue::UnaryOp(un_op.stable(), op.stable()),
- Discriminant(_) => todo!(),
- Aggregate(_, _) => todo!(),
- ShallowInitBox(_, _) => todo!(),
- CopyForDeref(_) => todo!(),
}
}
}
-impl<'tcx> Stable for mir::Operand<'tcx> {
+impl<'tcx> Stable<'tcx> for mir::CastKind {
+ type T = stable_mir::mir::CastKind;
+ fn stable(&self, tables: &mut Tables<'tcx>) -> Self::T {
+ use mir::CastKind::*;
+ match self {
+ PointerExposeAddress => stable_mir::mir::CastKind::PointerExposeAddress,
+ PointerFromExposedAddress => stable_mir::mir::CastKind::PointerFromExposedAddress,
+ PointerCoercion(c) => stable_mir::mir::CastKind::PointerCoercion(c.stable(tables)),
+ DynStar => stable_mir::mir::CastKind::DynStar,
+ IntToInt => stable_mir::mir::CastKind::IntToInt,
+ FloatToInt => stable_mir::mir::CastKind::FloatToInt,
+ FloatToFloat => stable_mir::mir::CastKind::FloatToFloat,
+ IntToFloat => stable_mir::mir::CastKind::IntToFloat,
+ PtrToPtr => stable_mir::mir::CastKind::PtrToPtr,
+ FnPtrToPtr => stable_mir::mir::CastKind::FnPtrToPtr,
+ Transmute => stable_mir::mir::CastKind::Transmute,
+ }
+ }
+}
+
+impl<'tcx> Stable<'tcx> for ty::AliasKind {
+ type T = stable_mir::ty::AliasKind;
+ fn stable(&self, _: &mut Tables<'tcx>) -> Self::T {
+ use ty::AliasKind::*;
+ match self {
+ Projection => stable_mir::ty::AliasKind::Projection,
+ Inherent => stable_mir::ty::AliasKind::Inherent,
+ Opaque => stable_mir::ty::AliasKind::Opaque,
+ Weak => stable_mir::ty::AliasKind::Weak,
+ }
+ }
+}
+
+impl<'tcx> Stable<'tcx> for ty::AliasTy<'tcx> {
+ type T = stable_mir::ty::AliasTy;
+ fn stable(&self, tables: &mut Tables<'tcx>) -> Self::T {
+ let ty::AliasTy { args, def_id, .. } = self;
+ stable_mir::ty::AliasTy { def_id: tables.alias_def(*def_id), args: args.stable(tables) }
+ }
+}
+
+impl<'tcx> Stable<'tcx> for ty::DynKind {
+ type T = stable_mir::ty::DynKind;
+
+ fn stable(&self, _: &mut Tables<'tcx>) -> Self::T {
+ use ty::DynKind;
+ match self {
+ DynKind::Dyn => stable_mir::ty::DynKind::Dyn,
+ DynKind::DynStar => stable_mir::ty::DynKind::DynStar,
+ }
+ }
+}
+
+impl<'tcx> Stable<'tcx> for ty::ExistentialPredicate<'tcx> {
+ type T = stable_mir::ty::ExistentialPredicate;
+
+ fn stable(&self, tables: &mut Tables<'tcx>) -> Self::T {
+ use stable_mir::ty::ExistentialPredicate::*;
+ match self {
+ ty::ExistentialPredicate::Trait(existential_trait_ref) => {
+ Trait(existential_trait_ref.stable(tables))
+ }
+ ty::ExistentialPredicate::Projection(existential_projection) => {
+ Projection(existential_projection.stable(tables))
+ }
+ ty::ExistentialPredicate::AutoTrait(def_id) => AutoTrait(tables.trait_def(*def_id)),
+ }
+ }
+}
+
+impl<'tcx> Stable<'tcx> for ty::ExistentialTraitRef<'tcx> {
+ type T = stable_mir::ty::ExistentialTraitRef;
+
+ fn stable(&self, tables: &mut Tables<'tcx>) -> Self::T {
+ let ty::ExistentialTraitRef { def_id, args } = self;
+ stable_mir::ty::ExistentialTraitRef {
+ def_id: tables.trait_def(*def_id),
+ generic_args: args.stable(tables),
+ }
+ }
+}
+
+impl<'tcx> Stable<'tcx> for ty::TermKind<'tcx> {
+ type T = stable_mir::ty::TermKind;
+
+ fn stable(&self, tables: &mut Tables<'tcx>) -> Self::T {
+ use stable_mir::ty::TermKind;
+ match self {
+ ty::TermKind::Ty(ty) => TermKind::Type(tables.intern_ty(*ty)),
+ ty::TermKind::Const(cnst) => {
+ let cnst = ConstantKind::from_const(*cnst, tables.tcx);
+ let cnst = Const { literal: cnst.stable(tables) };
+ TermKind::Const(cnst)
+ }
+ }
+ }
+}
+
+impl<'tcx> Stable<'tcx> for ty::ExistentialProjection<'tcx> {
+ type T = stable_mir::ty::ExistentialProjection;
+
+ fn stable(&self, tables: &mut Tables<'tcx>) -> Self::T {
+ let ty::ExistentialProjection { def_id, args, term } = self;
+ stable_mir::ty::ExistentialProjection {
+ def_id: tables.trait_def(*def_id),
+ generic_args: args.stable(tables),
+ term: term.unpack().stable(tables),
+ }
+ }
+}
+
+impl<'tcx> Stable<'tcx> for ty::adjustment::PointerCoercion {
+ type T = stable_mir::mir::PointerCoercion;
+ fn stable(&self, tables: &mut Tables<'tcx>) -> Self::T {
+ use ty::adjustment::PointerCoercion;
+ match self {
+ PointerCoercion::ReifyFnPointer => stable_mir::mir::PointerCoercion::ReifyFnPointer,
+ PointerCoercion::UnsafeFnPointer => stable_mir::mir::PointerCoercion::UnsafeFnPointer,
+ PointerCoercion::ClosureFnPointer(unsafety) => {
+ stable_mir::mir::PointerCoercion::ClosureFnPointer(unsafety.stable(tables))
+ }
+ PointerCoercion::MutToConstPointer => {
+ stable_mir::mir::PointerCoercion::MutToConstPointer
+ }
+ PointerCoercion::ArrayToPointer => stable_mir::mir::PointerCoercion::ArrayToPointer,
+ PointerCoercion::Unsize => stable_mir::mir::PointerCoercion::Unsize,
+ }
+ }
+}
+
+impl<'tcx> Stable<'tcx> for rustc_hir::Unsafety {
+ type T = stable_mir::mir::Safety;
+ fn stable(&self, _: &mut Tables<'tcx>) -> Self::T {
+ match self {
+ rustc_hir::Unsafety::Unsafe => stable_mir::mir::Safety::Unsafe,
+ rustc_hir::Unsafety::Normal => stable_mir::mir::Safety::Normal,
+ }
+ }
+}
+
+impl<'tcx> Stable<'tcx> for mir::FakeReadCause {
+ type T = stable_mir::mir::FakeReadCause;
+ fn stable(&self, _: &mut Tables<'tcx>) -> Self::T {
+ use mir::FakeReadCause::*;
+ match self {
+ ForMatchGuard => stable_mir::mir::FakeReadCause::ForMatchGuard,
+ ForMatchedPlace(local_def_id) => {
+ stable_mir::mir::FakeReadCause::ForMatchedPlace(opaque(local_def_id))
+ }
+ ForGuardBinding => stable_mir::mir::FakeReadCause::ForGuardBinding,
+ ForLet(local_def_id) => stable_mir::mir::FakeReadCause::ForLet(opaque(local_def_id)),
+ ForIndex => stable_mir::mir::FakeReadCause::ForIndex,
+ }
+ }
+}
+
+impl<'tcx> Stable<'tcx> for FieldIdx {
+ type T = usize;
+ fn stable(&self, _: &mut Tables<'tcx>) -> Self::T {
+ self.as_usize()
+ }
+}
+
+impl<'tcx> Stable<'tcx> for mir::Operand<'tcx> {
type T = stable_mir::mir::Operand;
- fn stable(&self) -> Self::T {
+ fn stable(&self, tables: &mut Tables<'tcx>) -> Self::T {
use mir::Operand::*;
match self {
- Copy(place) => stable_mir::mir::Operand::Copy(place.stable()),
- Move(place) => stable_mir::mir::Operand::Move(place.stable()),
+ Copy(place) => stable_mir::mir::Operand::Copy(place.stable(tables)),
+ Move(place) => stable_mir::mir::Operand::Move(place.stable(tables)),
Constant(c) => stable_mir::mir::Operand::Constant(c.to_string()),
}
}
}
-impl<'tcx> Stable for mir::Place<'tcx> {
+impl<'tcx> Stable<'tcx> for mir::Place<'tcx> {
type T = stable_mir::mir::Place;
- fn stable(&self) -> Self::T {
+ fn stable(&self, _: &mut Tables<'tcx>) -> Self::T {
stable_mir::mir::Place {
local: self.local.as_usize(),
projection: format!("{:?}", self.projection),
@@ -216,9 +472,113 @@ impl<'tcx> Stable for mir::Place<'tcx> {
}
}
-impl Stable for mir::UnwindAction {
+impl<'tcx> Stable<'tcx> for mir::coverage::CoverageKind {
+ type T = stable_mir::mir::CoverageKind;
+ fn stable(&self, tables: &mut Tables<'tcx>) -> Self::T {
+ use rustc_middle::mir::coverage::CoverageKind;
+ match self {
+ CoverageKind::Counter { function_source_hash, id } => {
+ stable_mir::mir::CoverageKind::Counter {
+ function_source_hash: *function_source_hash as usize,
+ id: opaque(id),
+ }
+ }
+ CoverageKind::Expression { id, lhs, op, rhs } => {
+ stable_mir::mir::CoverageKind::Expression {
+ id: opaque(id),
+ lhs: opaque(lhs),
+ op: op.stable(tables),
+ rhs: opaque(rhs),
+ }
+ }
+ CoverageKind::Unreachable => stable_mir::mir::CoverageKind::Unreachable,
+ }
+ }
+}
+
+impl<'tcx> Stable<'tcx> for mir::UserTypeProjection {
+ type T = stable_mir::mir::UserTypeProjection;
+
+ fn stable(&self, _: &mut Tables<'tcx>) -> Self::T {
+ UserTypeProjection { base: self.base.as_usize(), projection: format!("{:?}", self.projs) }
+ }
+}
+
+impl<'tcx> Stable<'tcx> for mir::coverage::Op {
+ type T = stable_mir::mir::Op;
+
+ fn stable(&self, _: &mut Tables<'tcx>) -> Self::T {
+ use rustc_middle::mir::coverage::Op::*;
+ match self {
+ Subtract => stable_mir::mir::Op::Subtract,
+ Add => stable_mir::mir::Op::Add,
+ }
+ }
+}
+
+impl<'tcx> Stable<'tcx> for mir::Local {
+ type T = stable_mir::mir::Local;
+ fn stable(&self, _: &mut Tables<'tcx>) -> Self::T {
+ self.as_usize()
+ }
+}
+
+impl<'tcx> Stable<'tcx> for rustc_target::abi::VariantIdx {
+ type T = VariantIdx;
+ fn stable(&self, _: &mut Tables<'tcx>) -> Self::T {
+ self.as_usize()
+ }
+}
+
+impl<'tcx> Stable<'tcx> for Variance {
+ type T = stable_mir::mir::Variance;
+ fn stable(&self, _: &mut Tables<'tcx>) -> Self::T {
+ match self {
+ Variance::Bivariant => stable_mir::mir::Variance::Bivariant,
+ Variance::Contravariant => stable_mir::mir::Variance::Contravariant,
+ Variance::Covariant => stable_mir::mir::Variance::Covariant,
+ Variance::Invariant => stable_mir::mir::Variance::Invariant,
+ }
+ }
+}
+
+impl<'tcx> Stable<'tcx> for mir::RetagKind {
+ type T = stable_mir::mir::RetagKind;
+ fn stable(&self, _: &mut Tables<'tcx>) -> Self::T {
+ use rustc_middle::mir::RetagKind;
+ match self {
+ RetagKind::FnEntry => stable_mir::mir::RetagKind::FnEntry,
+ RetagKind::TwoPhase => stable_mir::mir::RetagKind::TwoPhase,
+ RetagKind::Raw => stable_mir::mir::RetagKind::Raw,
+ RetagKind::Default => stable_mir::mir::RetagKind::Default,
+ }
+ }
+}
+
+impl<'tcx> Stable<'tcx> for ty::UserTypeAnnotationIndex {
+ type T = usize;
+ fn stable(&self, _: &mut Tables<'tcx>) -> Self::T {
+ self.as_usize()
+ }
+}
+
+impl<'tcx> Stable<'tcx> for CodeRegion {
+ type T = stable_mir::mir::CodeRegion;
+
+ fn stable(&self, _: &mut Tables<'tcx>) -> Self::T {
+ stable_mir::mir::CodeRegion {
+ file_name: self.file_name.as_str().to_string(),
+ start_line: self.start_line as usize,
+ start_col: self.start_col as usize,
+ end_line: self.end_line as usize,
+ end_col: self.end_col as usize,
+ }
+ }
+}
+
+impl<'tcx> Stable<'tcx> for mir::UnwindAction {
type T = stable_mir::mir::UnwindAction;
- fn stable(&self) -> Self::T {
+ fn stable(&self, _: &mut Tables<'tcx>) -> Self::T {
use rustc_middle::mir::UnwindAction;
match self {
UnwindAction::Continue => stable_mir::mir::UnwindAction::Continue,
@@ -229,42 +589,68 @@ impl Stable for mir::UnwindAction {
}
}
-fn rustc_assert_msg_to_msg<'tcx>(
- assert_message: &rustc_middle::mir::AssertMessage<'tcx>,
-) -> stable_mir::mir::AssertMessage {
- use rustc_middle::mir::AssertKind;
- match assert_message {
- AssertKind::BoundsCheck { len, index } => {
- stable_mir::mir::AssertMessage::BoundsCheck { len: len.stable(), index: index.stable() }
- }
- AssertKind::Overflow(bin_op, op1, op2) => {
- stable_mir::mir::AssertMessage::Overflow(bin_op.stable(), op1.stable(), op2.stable())
- }
- AssertKind::OverflowNeg(op) => stable_mir::mir::AssertMessage::OverflowNeg(op.stable()),
- AssertKind::DivisionByZero(op) => {
- stable_mir::mir::AssertMessage::DivisionByZero(op.stable())
- }
- AssertKind::RemainderByZero(op) => {
- stable_mir::mir::AssertMessage::RemainderByZero(op.stable())
- }
- AssertKind::ResumedAfterReturn(generator) => {
- stable_mir::mir::AssertMessage::ResumedAfterReturn(generator.stable())
- }
- AssertKind::ResumedAfterPanic(generator) => {
- stable_mir::mir::AssertMessage::ResumedAfterPanic(generator.stable())
+impl<'tcx> Stable<'tcx> for mir::NonDivergingIntrinsic<'tcx> {
+ type T = stable_mir::mir::NonDivergingIntrinsic;
+
+ fn stable(&self, tables: &mut Tables<'tcx>) -> Self::T {
+ use rustc_middle::mir::NonDivergingIntrinsic;
+ match self {
+ NonDivergingIntrinsic::Assume(op) => {
+ stable_mir::mir::NonDivergingIntrinsic::Assume(op.stable(tables))
+ }
+ NonDivergingIntrinsic::CopyNonOverlapping(copy_non_overlapping) => {
+ stable_mir::mir::NonDivergingIntrinsic::CopyNonOverlapping(CopyNonOverlapping {
+ src: copy_non_overlapping.src.stable(tables),
+ dst: copy_non_overlapping.dst.stable(tables),
+ count: copy_non_overlapping.count.stable(tables),
+ })
+ }
}
- AssertKind::MisalignedPointerDereference { required, found } => {
- stable_mir::mir::AssertMessage::MisalignedPointerDereference {
- required: required.stable(),
- found: found.stable(),
+ }
+}
+
+impl<'tcx> Stable<'tcx> for mir::AssertMessage<'tcx> {
+ type T = stable_mir::mir::AssertMessage;
+ fn stable(&self, tables: &mut Tables<'tcx>) -> Self::T {
+ use rustc_middle::mir::AssertKind;
+ match self {
+ AssertKind::BoundsCheck { len, index } => stable_mir::mir::AssertMessage::BoundsCheck {
+ len: len.stable(tables),
+ index: index.stable(tables),
+ },
+ AssertKind::Overflow(bin_op, op1, op2) => stable_mir::mir::AssertMessage::Overflow(
+ bin_op.stable(tables),
+ op1.stable(tables),
+ op2.stable(tables),
+ ),
+ AssertKind::OverflowNeg(op) => {
+ stable_mir::mir::AssertMessage::OverflowNeg(op.stable(tables))
+ }
+ AssertKind::DivisionByZero(op) => {
+ stable_mir::mir::AssertMessage::DivisionByZero(op.stable(tables))
+ }
+ AssertKind::RemainderByZero(op) => {
+ stable_mir::mir::AssertMessage::RemainderByZero(op.stable(tables))
+ }
+ AssertKind::ResumedAfterReturn(generator) => {
+ stable_mir::mir::AssertMessage::ResumedAfterReturn(generator.stable(tables))
+ }
+ AssertKind::ResumedAfterPanic(generator) => {
+ stable_mir::mir::AssertMessage::ResumedAfterPanic(generator.stable(tables))
+ }
+ AssertKind::MisalignedPointerDereference { required, found } => {
+ stable_mir::mir::AssertMessage::MisalignedPointerDereference {
+ required: required.stable(tables),
+ found: found.stable(tables),
+ }
}
}
}
}
-impl Stable for mir::BinOp {
+impl<'tcx> Stable<'tcx> for mir::BinOp {
type T = stable_mir::mir::BinOp;
- fn stable(&self) -> Self::T {
+ fn stable(&self, _: &mut Tables<'tcx>) -> Self::T {
use mir::BinOp;
match self {
BinOp::Add => stable_mir::mir::BinOp::Add,
@@ -293,9 +679,9 @@ impl Stable for mir::BinOp {
}
}
-impl Stable for mir::UnOp {
+impl<'tcx> Stable<'tcx> for mir::UnOp {
type T = stable_mir::mir::UnOp;
- fn stable(&self) -> Self::T {
+ fn stable(&self, _: &mut Tables<'tcx>) -> Self::T {
use mir::UnOp;
match self {
UnOp::Not => stable_mir::mir::UnOp::Not,
@@ -304,9 +690,43 @@ impl Stable for mir::UnOp {
}
}
-impl Stable for rustc_hir::GeneratorKind {
+impl<'tcx> Stable<'tcx> for mir::AggregateKind<'tcx> {
+ type T = stable_mir::mir::AggregateKind;
+ fn stable(&self, tables: &mut Tables<'tcx>) -> Self::T {
+ match self {
+ mir::AggregateKind::Array(ty) => {
+ stable_mir::mir::AggregateKind::Array(tables.intern_ty(*ty))
+ }
+ mir::AggregateKind::Tuple => stable_mir::mir::AggregateKind::Tuple,
+ mir::AggregateKind::Adt(def_id, var_idx, generic_arg, user_ty_index, field_idx) => {
+ stable_mir::mir::AggregateKind::Adt(
+ rustc_internal::adt_def(*def_id),
+ var_idx.index(),
+ generic_arg.stable(tables),
+ user_ty_index.map(|idx| idx.index()),
+ field_idx.map(|idx| idx.index()),
+ )
+ }
+ mir::AggregateKind::Closure(def_id, generic_arg) => {
+ stable_mir::mir::AggregateKind::Closure(
+ rustc_internal::closure_def(*def_id),
+ generic_arg.stable(tables),
+ )
+ }
+ mir::AggregateKind::Generator(def_id, generic_arg, movability) => {
+ stable_mir::mir::AggregateKind::Generator(
+ rustc_internal::generator_def(*def_id),
+ generic_arg.stable(tables),
+ movability.stable(tables),
+ )
+ }
+ }
+ }
+}
+
+impl<'tcx> Stable<'tcx> for rustc_hir::GeneratorKind {
type T = stable_mir::mir::GeneratorKind;
- fn stable(&self) -> Self::T {
+ fn stable(&self, _: &mut Tables<'tcx>) -> Self::T {
use rustc_hir::{AsyncGeneratorKind, GeneratorKind};
match self {
GeneratorKind::Async(async_gen) => {
@@ -322,35 +742,35 @@ impl Stable for rustc_hir::GeneratorKind {
}
}
-impl<'tcx> Stable for mir::InlineAsmOperand<'tcx> {
+impl<'tcx> Stable<'tcx> for mir::InlineAsmOperand<'tcx> {
type T = stable_mir::mir::InlineAsmOperand;
- fn stable(&self) -> Self::T {
+ fn stable(&self, tables: &mut Tables<'tcx>) -> Self::T {
use rustc_middle::mir::InlineAsmOperand;
let (in_value, out_place) = match self {
- InlineAsmOperand::In { value, .. } => (Some(value.stable()), None),
- InlineAsmOperand::Out { place, .. } => (None, place.map(|place| place.stable())),
+ InlineAsmOperand::In { value, .. } => (Some(value.stable(tables)), None),
+ InlineAsmOperand::Out { place, .. } => (None, place.map(|place| place.stable(tables))),
InlineAsmOperand::InOut { in_value, out_place, .. } => {
- (Some(in_value.stable()), out_place.map(|place| place.stable()))
+ (Some(in_value.stable(tables)), out_place.map(|place| place.stable(tables)))
}
InlineAsmOperand::Const { .. }
| InlineAsmOperand::SymFn { .. }
| InlineAsmOperand::SymStatic { .. } => (None, None),
};
- stable_mir::mir::InlineAsmOperand { in_value, out_place, raw_rpr: format!("{:?}", self) }
+ stable_mir::mir::InlineAsmOperand { in_value, out_place, raw_rpr: format!("{self:?}") }
}
}
-impl<'tcx> Stable for mir::Terminator<'tcx> {
+impl<'tcx> Stable<'tcx> for mir::Terminator<'tcx> {
type T = stable_mir::mir::Terminator;
- fn stable(&self) -> Self::T {
+ fn stable(&self, tables: &mut Tables<'tcx>) -> Self::T {
use rustc_middle::mir::TerminatorKind::*;
use stable_mir::mir::Terminator;
match &self.kind {
Goto { target } => Terminator::Goto { target: target.as_usize() },
SwitchInt { discr, targets } => Terminator::SwitchInt {
- discr: discr.stable(),
+ discr: discr.stable(tables),
targets: targets
.iter()
.map(|(value, target)| stable_mir::mir::SwitchTarget {
@@ -365,37 +785,423 @@ impl<'tcx> Stable for mir::Terminator<'tcx> {
Return => Terminator::Return,
Unreachable => Terminator::Unreachable,
Drop { place, target, unwind, replace: _ } => Terminator::Drop {
- place: place.stable(),
+ place: place.stable(tables),
target: target.as_usize(),
- unwind: unwind.stable(),
+ unwind: unwind.stable(tables),
},
Call { func, args, destination, target, unwind, call_source: _, fn_span: _ } => {
Terminator::Call {
- func: func.stable(),
- args: args.iter().map(|arg| arg.stable()).collect(),
- destination: destination.stable(),
+ func: func.stable(tables),
+ args: args.iter().map(|arg| arg.stable(tables)).collect(),
+ destination: destination.stable(tables),
target: target.map(|t| t.as_usize()),
- unwind: unwind.stable(),
+ unwind: unwind.stable(tables),
}
}
Assert { cond, expected, msg, target, unwind } => Terminator::Assert {
- cond: cond.stable(),
+ cond: cond.stable(tables),
expected: *expected,
- msg: rustc_assert_msg_to_msg(msg),
+ msg: msg.stable(tables),
target: target.as_usize(),
- unwind: unwind.stable(),
+ unwind: unwind.stable(tables),
},
InlineAsm { template, operands, options, line_spans, destination, unwind } => {
Terminator::InlineAsm {
- template: format!("{:?}", template),
- operands: operands.iter().map(|operand| operand.stable()).collect(),
- options: format!("{:?}", options),
- line_spans: format!("{:?}", line_spans),
+ template: format!("{template:?}"),
+ operands: operands.iter().map(|operand| operand.stable(tables)).collect(),
+ options: format!("{options:?}"),
+ line_spans: format!("{line_spans:?}"),
destination: destination.map(|d| d.as_usize()),
- unwind: unwind.stable(),
+ unwind: unwind.stable(tables),
}
}
Yield { .. } | GeneratorDrop | FalseEdge { .. } | FalseUnwind { .. } => unreachable!(),
}
}
}
+
+impl<'tcx> Stable<'tcx> for ty::GenericArgs<'tcx> {
+ type T = stable_mir::ty::GenericArgs;
+ fn stable(&self, tables: &mut Tables<'tcx>) -> Self::T {
+ use stable_mir::ty::GenericArgs;
+
+ GenericArgs(self.iter().map(|arg| arg.unpack().stable(tables)).collect())
+ }
+}
+
+impl<'tcx> Stable<'tcx> for ty::GenericArgKind<'tcx> {
+ type T = stable_mir::ty::GenericArgKind;
+
+ fn stable(&self, tables: &mut Tables<'tcx>) -> Self::T {
+ use stable_mir::ty::GenericArgKind;
+ match self {
+ ty::GenericArgKind::Lifetime(region) => GenericArgKind::Lifetime(opaque(region)),
+ ty::GenericArgKind::Type(ty) => GenericArgKind::Type(tables.intern_ty(*ty)),
+ ty::GenericArgKind::Const(cnst) => {
+ let cnst = ConstantKind::from_const(*cnst, tables.tcx);
+ GenericArgKind::Const(stable_mir::ty::Const { literal: cnst.stable(tables) })
+ }
+ }
+ }
+}
+
+impl<'tcx, S, V> Stable<'tcx> for ty::Binder<'tcx, S>
+where
+ S: Stable<'tcx, T = V>,
+{
+ type T = stable_mir::ty::Binder<V>;
+
+ fn stable(&self, tables: &mut Tables<'tcx>) -> Self::T {
+ use stable_mir::ty::Binder;
+
+ Binder {
+ value: self.as_ref().skip_binder().stable(tables),
+ bound_vars: self
+ .bound_vars()
+ .iter()
+ .map(|bound_var| bound_var.stable(tables))
+ .collect(),
+ }
+ }
+}
+
+impl<'tcx, S, V> Stable<'tcx> for ty::EarlyBinder<S>
+where
+ S: Stable<'tcx, T = V>,
+{
+ type T = stable_mir::ty::EarlyBinder<V>;
+
+ fn stable(&self, tables: &mut Tables<'tcx>) -> Self::T {
+ use stable_mir::ty::EarlyBinder;
+
+ EarlyBinder { value: self.as_ref().skip_binder().stable(tables) }
+ }
+}
+
+impl<'tcx> Stable<'tcx> for ty::FnSig<'tcx> {
+ type T = stable_mir::ty::FnSig;
+ fn stable(&self, tables: &mut Tables<'tcx>) -> Self::T {
+ use rustc_target::spec::abi;
+ use stable_mir::ty::{Abi, FnSig};
+
+ FnSig {
+ inputs_and_output: self
+ .inputs_and_output
+ .iter()
+ .map(|ty| tables.intern_ty(ty))
+ .collect(),
+ c_variadic: self.c_variadic,
+ unsafety: self.unsafety.stable(tables),
+ abi: match self.abi {
+ abi::Abi::Rust => Abi::Rust,
+ abi::Abi::C { unwind } => Abi::C { unwind },
+ abi::Abi::Cdecl { unwind } => Abi::Cdecl { unwind },
+ abi::Abi::Stdcall { unwind } => Abi::Stdcall { unwind },
+ abi::Abi::Fastcall { unwind } => Abi::Fastcall { unwind },
+ abi::Abi::Vectorcall { unwind } => Abi::Vectorcall { unwind },
+ abi::Abi::Thiscall { unwind } => Abi::Thiscall { unwind },
+ abi::Abi::Aapcs { unwind } => Abi::Aapcs { unwind },
+ abi::Abi::Win64 { unwind } => Abi::Win64 { unwind },
+ abi::Abi::SysV64 { unwind } => Abi::SysV64 { unwind },
+ abi::Abi::PtxKernel => Abi::PtxKernel,
+ abi::Abi::Msp430Interrupt => Abi::Msp430Interrupt,
+ abi::Abi::X86Interrupt => Abi::X86Interrupt,
+ abi::Abi::AmdGpuKernel => Abi::AmdGpuKernel,
+ abi::Abi::EfiApi => Abi::EfiApi,
+ abi::Abi::AvrInterrupt => Abi::AvrInterrupt,
+ abi::Abi::AvrNonBlockingInterrupt => Abi::AvrNonBlockingInterrupt,
+ abi::Abi::CCmseNonSecureCall => Abi::CCmseNonSecureCall,
+ abi::Abi::Wasm => Abi::Wasm,
+ abi::Abi::System { unwind } => Abi::System { unwind },
+ abi::Abi::RustIntrinsic => Abi::RustIntrinsic,
+ abi::Abi::RustCall => Abi::RustCall,
+ abi::Abi::PlatformIntrinsic => Abi::PlatformIntrinsic,
+ abi::Abi::Unadjusted => Abi::Unadjusted,
+ abi::Abi::RustCold => Abi::RustCold,
+ abi::Abi::RiscvInterruptM => Abi::RiscvInterruptM,
+ abi::Abi::RiscvInterruptS => Abi::RiscvInterruptS,
+ },
+ }
+ }
+}
+
+impl<'tcx> Stable<'tcx> for ty::BoundTyKind {
+ type T = stable_mir::ty::BoundTyKind;
+
+ fn stable(&self, _: &mut Tables<'tcx>) -> Self::T {
+ use stable_mir::ty::BoundTyKind;
+
+ match self {
+ ty::BoundTyKind::Anon => BoundTyKind::Anon,
+ ty::BoundTyKind::Param(def_id, symbol) => {
+ BoundTyKind::Param(rustc_internal::param_def(*def_id), symbol.to_string())
+ }
+ }
+ }
+}
+
+impl<'tcx> Stable<'tcx> for ty::BoundRegionKind {
+ type T = stable_mir::ty::BoundRegionKind;
+
+ fn stable(&self, _: &mut Tables<'tcx>) -> Self::T {
+ use stable_mir::ty::BoundRegionKind;
+
+ match self {
+ ty::BoundRegionKind::BrAnon(option_span) => {
+ BoundRegionKind::BrAnon(option_span.map(|span| opaque(&span)))
+ }
+ ty::BoundRegionKind::BrNamed(def_id, symbol) => {
+ BoundRegionKind::BrNamed(rustc_internal::br_named_def(*def_id), symbol.to_string())
+ }
+ ty::BoundRegionKind::BrEnv => BoundRegionKind::BrEnv,
+ }
+ }
+}
+
+impl<'tcx> Stable<'tcx> for ty::BoundVariableKind {
+ type T = stable_mir::ty::BoundVariableKind;
+
+ fn stable(&self, tables: &mut Tables<'tcx>) -> Self::T {
+ use stable_mir::ty::BoundVariableKind;
+
+ match self {
+ ty::BoundVariableKind::Ty(bound_ty_kind) => {
+ BoundVariableKind::Ty(bound_ty_kind.stable(tables))
+ }
+ ty::BoundVariableKind::Region(bound_region_kind) => {
+ BoundVariableKind::Region(bound_region_kind.stable(tables))
+ }
+ ty::BoundVariableKind::Const => BoundVariableKind::Const,
+ }
+ }
+}
+
+impl<'tcx> Stable<'tcx> for ty::IntTy {
+ type T = IntTy;
+
+ fn stable(&self, _: &mut Tables<'tcx>) -> Self::T {
+ match self {
+ ty::IntTy::Isize => IntTy::Isize,
+ ty::IntTy::I8 => IntTy::I8,
+ ty::IntTy::I16 => IntTy::I16,
+ ty::IntTy::I32 => IntTy::I32,
+ ty::IntTy::I64 => IntTy::I64,
+ ty::IntTy::I128 => IntTy::I128,
+ }
+ }
+}
+
+impl<'tcx> Stable<'tcx> for ty::UintTy {
+ type T = UintTy;
+
+ fn stable(&self, _: &mut Tables<'tcx>) -> Self::T {
+ match self {
+ ty::UintTy::Usize => UintTy::Usize,
+ ty::UintTy::U8 => UintTy::U8,
+ ty::UintTy::U16 => UintTy::U16,
+ ty::UintTy::U32 => UintTy::U32,
+ ty::UintTy::U64 => UintTy::U64,
+ ty::UintTy::U128 => UintTy::U128,
+ }
+ }
+}
+
+impl<'tcx> Stable<'tcx> for ty::FloatTy {
+ type T = FloatTy;
+
+ fn stable(&self, _: &mut Tables<'tcx>) -> Self::T {
+ match self {
+ ty::FloatTy::F32 => FloatTy::F32,
+ ty::FloatTy::F64 => FloatTy::F64,
+ }
+ }
+}
+
+impl<'tcx> Stable<'tcx> for hir::Movability {
+ type T = Movability;
+
+ fn stable(&self, _: &mut Tables<'tcx>) -> Self::T {
+ match self {
+ hir::Movability::Static => Movability::Static,
+ hir::Movability::Movable => Movability::Movable,
+ }
+ }
+}
+
+impl<'tcx> Stable<'tcx> for Ty<'tcx> {
+ type T = stable_mir::ty::TyKind;
+ fn stable(&self, tables: &mut Tables<'tcx>) -> Self::T {
+ match self.kind() {
+ ty::Bool => TyKind::RigidTy(RigidTy::Bool),
+ ty::Char => TyKind::RigidTy(RigidTy::Char),
+ ty::Int(int_ty) => TyKind::RigidTy(RigidTy::Int(int_ty.stable(tables))),
+ ty::Uint(uint_ty) => TyKind::RigidTy(RigidTy::Uint(uint_ty.stable(tables))),
+ ty::Float(float_ty) => TyKind::RigidTy(RigidTy::Float(float_ty.stable(tables))),
+ ty::Adt(adt_def, generic_args) => TyKind::RigidTy(RigidTy::Adt(
+ rustc_internal::adt_def(adt_def.did()),
+ generic_args.stable(tables),
+ )),
+ ty::Foreign(def_id) => {
+ TyKind::RigidTy(RigidTy::Foreign(rustc_internal::foreign_def(*def_id)))
+ }
+ ty::Str => TyKind::RigidTy(RigidTy::Str),
+ ty::Array(ty, constant) => {
+ let cnst = ConstantKind::from_const(*constant, tables.tcx);
+ let cnst = stable_mir::ty::Const { literal: cnst.stable(tables) };
+ TyKind::RigidTy(RigidTy::Array(tables.intern_ty(*ty), cnst))
+ }
+ ty::Slice(ty) => TyKind::RigidTy(RigidTy::Slice(tables.intern_ty(*ty))),
+ ty::RawPtr(ty::TypeAndMut { ty, mutbl }) => {
+ TyKind::RigidTy(RigidTy::RawPtr(tables.intern_ty(*ty), mutbl.stable(tables)))
+ }
+ ty::Ref(region, ty, mutbl) => TyKind::RigidTy(RigidTy::Ref(
+ opaque(region),
+ tables.intern_ty(*ty),
+ mutbl.stable(tables),
+ )),
+ ty::FnDef(def_id, generic_args) => TyKind::RigidTy(RigidTy::FnDef(
+ rustc_internal::fn_def(*def_id),
+ generic_args.stable(tables),
+ )),
+ ty::FnPtr(poly_fn_sig) => TyKind::RigidTy(RigidTy::FnPtr(poly_fn_sig.stable(tables))),
+ ty::Dynamic(existential_predicates, region, dyn_kind) => {
+ TyKind::RigidTy(RigidTy::Dynamic(
+ existential_predicates
+ .iter()
+ .map(|existential_predicate| existential_predicate.stable(tables))
+ .collect(),
+ opaque(region),
+ dyn_kind.stable(tables),
+ ))
+ }
+ ty::Closure(def_id, generic_args) => TyKind::RigidTy(RigidTy::Closure(
+ rustc_internal::closure_def(*def_id),
+ generic_args.stable(tables),
+ )),
+ ty::Generator(def_id, generic_args, movability) => TyKind::RigidTy(RigidTy::Generator(
+ rustc_internal::generator_def(*def_id),
+ generic_args.stable(tables),
+ movability.stable(tables),
+ )),
+ ty::Never => TyKind::RigidTy(RigidTy::Never),
+ ty::Tuple(fields) => TyKind::RigidTy(RigidTy::Tuple(
+ fields.iter().map(|ty| tables.intern_ty(ty)).collect(),
+ )),
+ ty::Alias(alias_kind, alias_ty) => {
+ TyKind::Alias(alias_kind.stable(tables), alias_ty.stable(tables))
+ }
+ ty::Param(param_ty) => TyKind::Param(param_ty.stable(tables)),
+ ty::Bound(debruijn_idx, bound_ty) => {
+ TyKind::Bound(debruijn_idx.as_usize(), bound_ty.stable(tables))
+ }
+ ty::Placeholder(..)
+ | ty::GeneratorWitness(_)
+ | ty::GeneratorWitnessMIR(_, _)
+ | ty::Infer(_)
+ | ty::Error(_) => {
+ unreachable!();
+ }
+ }
+ }
+}
+
+impl<'tcx> Stable<'tcx> for ty::ParamTy {
+ type T = stable_mir::ty::ParamTy;
+ fn stable(&self, _: &mut Tables<'tcx>) -> Self::T {
+ use stable_mir::ty::ParamTy;
+ ParamTy { index: self.index, name: self.name.to_string() }
+ }
+}
+
+impl<'tcx> Stable<'tcx> for ty::BoundTy {
+ type T = stable_mir::ty::BoundTy;
+ fn stable(&self, tables: &mut Tables<'tcx>) -> Self::T {
+ use stable_mir::ty::BoundTy;
+ BoundTy { var: self.var.as_usize(), kind: self.kind.stable(tables) }
+ }
+}
+
+impl<'tcx> Stable<'tcx> for mir::interpret::Allocation {
+ type T = stable_mir::ty::Allocation;
+
+ fn stable(&self, tables: &mut Tables<'tcx>) -> Self::T {
+ allocation_filter(self, alloc_range(rustc_target::abi::Size::ZERO, self.size()), tables)
+ }
+}
+
+impl<'tcx> Stable<'tcx> for ty::trait_def::TraitSpecializationKind {
+ type T = stable_mir::ty::TraitSpecializationKind;
+ fn stable(&self, _: &mut Tables<'tcx>) -> Self::T {
+ use stable_mir::ty::TraitSpecializationKind;
+
+ match self {
+ ty::trait_def::TraitSpecializationKind::None => TraitSpecializationKind::None,
+ ty::trait_def::TraitSpecializationKind::Marker => TraitSpecializationKind::Marker,
+ ty::trait_def::TraitSpecializationKind::AlwaysApplicable => {
+ TraitSpecializationKind::AlwaysApplicable
+ }
+ }
+ }
+}
+
+impl<'tcx> Stable<'tcx> for ty::TraitDef {
+ type T = stable_mir::ty::TraitDecl;
+ fn stable(&self, tables: &mut Tables<'tcx>) -> Self::T {
+ use stable_mir::ty::TraitDecl;
+
+ TraitDecl {
+ def_id: rustc_internal::trait_def(self.def_id),
+ unsafety: self.unsafety.stable(tables),
+ paren_sugar: self.paren_sugar,
+ has_auto_impl: self.has_auto_impl,
+ is_marker: self.is_marker,
+ is_coinductive: self.is_coinductive,
+ skip_array_during_method_dispatch: self.skip_array_during_method_dispatch,
+ specialization_kind: self.specialization_kind.stable(tables),
+ must_implement_one_of: self
+ .must_implement_one_of
+ .as_ref()
+ .map(|idents| idents.iter().map(|ident| opaque(ident)).collect()),
+ implement_via_object: self.implement_via_object,
+ deny_explicit_impl: self.deny_explicit_impl,
+ }
+ }
+}
+
+impl<'tcx> Stable<'tcx> for rustc_middle::mir::ConstantKind<'tcx> {
+ type T = stable_mir::ty::ConstantKind;
+
+ fn stable(&self, tables: &mut Tables<'tcx>) -> Self::T {
+ match self {
+ ConstantKind::Ty(c) => match c.kind() {
+ ty::Value(val) => {
+ let const_val = tables.tcx.valtree_to_const_val((c.ty(), val));
+ stable_mir::ty::ConstantKind::Allocated(new_allocation(self, const_val, tables))
+ }
+ ty::ParamCt(param) => stable_mir::ty::ConstantKind::ParamCt(opaque(&param)),
+ ty::ErrorCt(_) => unreachable!(),
+ _ => unimplemented!(),
+ },
+ ConstantKind::Unevaluated(unev_const, ty) => {
+ stable_mir::ty::ConstantKind::Unevaluated(stable_mir::ty::UnevaluatedConst {
+ ty: tables.intern_ty(*ty),
+ def: tables.const_def(unev_const.def),
+ args: unev_const.args.stable(tables),
+ promoted: unev_const.promoted.map(|u| u.as_u32()),
+ })
+ }
+ ConstantKind::Val(val, _) => {
+ stable_mir::ty::ConstantKind::Allocated(new_allocation(self, *val, tables))
+ }
+ }
+ }
+}
+
+impl<'tcx> Stable<'tcx> for ty::TraitRef<'tcx> {
+ type T = stable_mir::ty::TraitRef;
+ fn stable(&self, tables: &mut Tables<'tcx>) -> Self::T {
+ use stable_mir::ty::TraitRef;
+
+ TraitRef { def_id: rustc_internal::trait_def(self.def_id), args: self.args.stable(tables) }
+ }
+}
diff --git a/compiler/rustc_smir/src/stable_mir/mir/body.rs b/compiler/rustc_smir/src/stable_mir/mir/body.rs
index 468e915d1..c16bd6cbd 100644
--- a/compiler/rustc_smir/src/stable_mir/mir/body.rs
+++ b/compiler/rustc_smir/src/stable_mir/mir/body.rs
@@ -1,4 +1,8 @@
-use crate::stable_mir::ty::Ty;
+use crate::rustc_internal::Opaque;
+use crate::stable_mir::ty::{
+ AdtDef, ClosureDef, Const, GeneratorDef, GenericArgs, Movability, Region,
+};
+use crate::stable_mir::{self, ty::Ty};
#[derive(Clone, Debug)]
pub struct Body {
@@ -130,18 +134,225 @@ pub enum AsyncGeneratorKind {
Fn,
}
+pub(crate) type LocalDefId = Opaque;
+pub(crate) type CounterValueReference = Opaque;
+pub(crate) type InjectedExpressionId = Opaque;
+pub(crate) type ExpressionOperandId = Opaque;
+
+/// The FakeReadCause describes the type of pattern why a FakeRead statement exists.
+#[derive(Clone, Debug)]
+pub enum FakeReadCause {
+ ForMatchGuard,
+ ForMatchedPlace(LocalDefId),
+ ForGuardBinding,
+ ForLet(LocalDefId),
+ ForIndex,
+}
+
+/// Describes what kind of retag is to be performed
+#[derive(Clone, Debug)]
+pub enum RetagKind {
+ FnEntry,
+ TwoPhase,
+ Raw,
+ Default,
+}
+
+#[derive(Clone, Debug)]
+pub enum Variance {
+ Covariant,
+ Invariant,
+ Contravariant,
+ Bivariant,
+}
+
+#[derive(Clone, Debug)]
+pub enum Op {
+ Subtract,
+ Add,
+}
+
+#[derive(Clone, Debug)]
+pub enum CoverageKind {
+ Counter {
+ function_source_hash: usize,
+ id: CounterValueReference,
+ },
+ Expression {
+ id: InjectedExpressionId,
+ lhs: ExpressionOperandId,
+ op: Op,
+ rhs: ExpressionOperandId,
+ },
+ Unreachable,
+}
+
+#[derive(Clone, Debug)]
+pub struct CodeRegion {
+ pub file_name: String,
+ pub start_line: usize,
+ pub start_col: usize,
+ pub end_line: usize,
+ pub end_col: usize,
+}
+
+#[derive(Clone, Debug)]
+pub struct Coverage {
+ pub kind: CoverageKind,
+ pub code_region: Option<CodeRegion>,
+}
+
+#[derive(Clone, Debug)]
+pub struct CopyNonOverlapping {
+ pub src: Operand,
+ pub dst: Operand,
+ pub count: Operand,
+}
+
+#[derive(Clone, Debug)]
+pub enum NonDivergingIntrinsic {
+ Assume(Operand),
+ CopyNonOverlapping(CopyNonOverlapping),
+}
+
#[derive(Clone, Debug)]
pub enum Statement {
Assign(Place, Rvalue),
+ FakeRead(FakeReadCause, Place),
+ SetDiscriminant { place: Place, variant_index: VariantIdx },
+ Deinit(Place),
+ StorageLive(Local),
+ StorageDead(Local),
+ Retag(RetagKind, Place),
+ PlaceMention(Place),
+ AscribeUserType { place: Place, projections: UserTypeProjection, variance: Variance },
+ Coverage(Coverage),
+ Intrinsic(NonDivergingIntrinsic),
+ ConstEvalCounter,
Nop,
}
-// FIXME this is incomplete
#[derive(Clone, Debug)]
pub enum Rvalue {
- Use(Operand),
+ /// Creates a pointer with the indicated mutability to the place.
+ ///
+ /// This is generated by pointer casts like `&v as *const _` or raw address of expressions like
+ /// `&raw v` or `addr_of!(v)`.
+ AddressOf(Mutability, Place),
+
+ /// Creates an aggregate value, like a tuple or struct.
+ ///
+ /// This is needed because dataflow analysis needs to distinguish
+ /// `dest = Foo { x: ..., y: ... }` from `dest.x = ...; dest.y = ...;` in the case that `Foo`
+ /// has a destructor.
+ ///
+ /// Disallowed after deaggregation for all aggregate kinds except `Array` and `Generator`. After
+ /// generator lowering, `Generator` aggregate kinds are disallowed too.
+ Aggregate(AggregateKind, Vec<Operand>),
+
+ /// * `Offset` has the same semantics as [`offset`](pointer::offset), except that the second
+ /// parameter may be a `usize` as well.
+ /// * The comparison operations accept `bool`s, `char`s, signed or unsigned integers, floats,
+ /// raw pointers, or function pointers and return a `bool`. The types of the operands must be
+ /// matching, up to the usual caveat of the lifetimes in function pointers.
+ /// * Left and right shift operations accept signed or unsigned integers not necessarily of the
+ /// same type and return a value of the same type as their LHS. Like in Rust, the RHS is
+ /// truncated as needed.
+ /// * The `Bit*` operations accept signed integers, unsigned integers, or bools with matching
+ /// types and return a value of that type.
+ /// * The remaining operations accept signed integers, unsigned integers, or floats with
+ /// matching types and return a value of that type.
+ BinaryOp(BinOp, Operand, Operand),
+
+ /// Performs essentially all of the casts that can be performed via `as`.
+ ///
+ /// This allows for casts from/to a variety of types.
+ Cast(CastKind, Operand, Ty),
+
+ /// Same as `BinaryOp`, but yields `(T, bool)` with a `bool` indicating an error condition.
+ ///
+ /// For addition, subtraction, and multiplication on integers the error condition is set when
+ /// the infinite precision result would not be equal to the actual result.
CheckedBinaryOp(BinOp, Operand, Operand),
+
+ /// A CopyForDeref is equivalent to a read from a place.
+ /// When such a read happens, it is guaranteed that the only use of the returned value is a
+ /// deref operation, immediately followed by one or more projections.
+ CopyForDeref(Place),
+
+ /// Computes the discriminant of the place, returning it as an integer of type
+ /// [`discriminant_ty`]. Returns zero for types without discriminant.
+ ///
+ /// The validity requirements for the underlying value are undecided for this rvalue, see
+ /// [#91095]. Note too that the value of the discriminant is not the same thing as the
+ /// variant index; use [`discriminant_for_variant`] to convert.
+ ///
+ /// [`discriminant_ty`]: rustc_middle::ty::Ty::discriminant_ty
+ /// [#91095]: https://github.com/rust-lang/rust/issues/91095
+ /// [`discriminant_for_variant`]: rustc_middle::ty::Ty::discriminant_for_variant
+ Discriminant(Place),
+
+ /// Yields the length of the place, as a `usize`.
+ ///
+ /// If the type of the place is an array, this is the array length. For slices (`[T]`, not
+ /// `&[T]`) this accesses the place's metadata to determine the length. This rvalue is
+ /// ill-formed for places of other types.
+ Len(Place),
+
+ /// Creates a reference to the place.
+ Ref(Region, BorrowKind, Place),
+
+ /// Creates an array where each element is the value of the operand.
+ ///
+ /// This is the cause of a bug in the case where the repetition count is zero because the value
+ /// is not dropped, see [#74836].
+ ///
+ /// Corresponds to source code like `[x; 32]`.
+ ///
+ /// [#74836]: https://github.com/rust-lang/rust/issues/74836
+ Repeat(Operand, Const),
+
+ /// Transmutes a `*mut u8` into shallow-initialized `Box<T>`.
+ ///
+ /// This is different from a normal transmute because dataflow analysis will treat the box as
+ /// initialized but its content as uninitialized. Like other pointer casts, this in general
+ /// affects alias analysis.
+ ShallowInitBox(Operand, Ty),
+
+ /// Creates a pointer/reference to the given thread local.
+ ///
+ /// The yielded type is a `*mut T` if the static is mutable, otherwise if the static is extern a
+ /// `*const T`, and if neither of those apply a `&T`.
+ ///
+ /// **Note:** This is a runtime operation that actually executes code and is in this sense more
+ /// like a function call. Also, eliminating dead stores of this rvalue causes `fn main() {}` to
+ /// SIGILL for some reason that I (JakobDegen) never got a chance to look into.
+ ///
+ /// **Needs clarification**: Are there weird additional semantics here related to the runtime
+ /// nature of this operation?
+ ThreadLocalRef(stable_mir::CrateItem),
+
+ /// Computes a value as described by the operation.
+ NullaryOp(NullOp, Ty),
+
+ /// Exactly like `BinaryOp`, but less operands.
+ ///
+ /// Also does two's-complement arithmetic. Negation requires a signed integer or a float;
+ /// bitwise not requires a signed integer, unsigned integer, or bool. Both operation kinds
+ /// return a value with the same type as their operand.
UnaryOp(UnOp, Operand),
+
+ /// Yields the operand unchanged
+ Use(Operand),
+}
+
+#[derive(Clone, Debug)]
+pub enum AggregateKind {
+ Array(Ty),
+ Tuple,
+ Adt(AdtDef, VariantIdx, GenericArgs, Option<UserTypeAnnotationIndex>, Option<FieldIdx>),
+ Closure(ClosureDef, GenericArgs),
+ Generator(GeneratorDef, GenericArgs, Movability),
}
#[derive(Clone, Debug)]
@@ -153,12 +364,115 @@ pub enum Operand {
#[derive(Clone, Debug)]
pub struct Place {
- pub local: usize,
+ pub local: Local,
pub projection: String,
}
#[derive(Clone, Debug)]
+pub struct UserTypeProjection {
+ pub base: UserTypeAnnotationIndex,
+ pub projection: String,
+}
+
+pub type Local = usize;
+
+type FieldIdx = usize;
+
+/// The source-order index of a variant in a type.
+pub type VariantIdx = usize;
+
+type UserTypeAnnotationIndex = usize;
+
+#[derive(Clone, Debug)]
pub struct SwitchTarget {
pub value: u128,
pub target: usize,
}
+
+#[derive(Clone, Debug)]
+pub enum BorrowKind {
+ /// Data must be immutable and is aliasable.
+ Shared,
+
+ /// The immediately borrowed place must be immutable, but projections from
+ /// it don't need to be. For example, a shallow borrow of `a.b` doesn't
+ /// conflict with a mutable borrow of `a.b.c`.
+ Shallow,
+
+ /// Data is mutable and not aliasable.
+ Mut {
+ /// `true` if this borrow arose from method-call auto-ref
+ kind: MutBorrowKind,
+ },
+}
+
+#[derive(Clone, Debug)]
+pub enum MutBorrowKind {
+ Default,
+ TwoPhaseBorrow,
+ ClosureCapture,
+}
+
+#[derive(Clone, Debug)]
+pub enum Mutability {
+ Not,
+ Mut,
+}
+
+#[derive(Clone, Debug)]
+pub enum Safety {
+ Unsafe,
+ Normal,
+}
+
+#[derive(Clone, Debug)]
+pub enum PointerCoercion {
+ /// Go from a fn-item type to a fn-pointer type.
+ ReifyFnPointer,
+
+ /// Go from a safe fn pointer to an unsafe fn pointer.
+ UnsafeFnPointer,
+
+ /// Go from a non-capturing closure to an fn pointer or an unsafe fn pointer.
+ /// It cannot convert a closure that requires unsafe.
+ ClosureFnPointer(Safety),
+
+ /// Go from a mut raw pointer to a const raw pointer.
+ MutToConstPointer,
+
+ /// Go from `*const [T; N]` to `*const T`
+ ArrayToPointer,
+
+ /// Unsize a pointer/reference value, e.g., `&[T; n]` to
+ /// `&[T]`. Note that the source could be a thin or fat pointer.
+ /// This will do things like convert thin pointers to fat
+ /// pointers, or convert structs containing thin pointers to
+ /// structs containing fat pointers, or convert between fat
+ /// pointers.
+ Unsize,
+}
+
+#[derive(Clone, Debug)]
+pub enum CastKind {
+ PointerExposeAddress,
+ PointerFromExposedAddress,
+ PointerCoercion(PointerCoercion),
+ DynStar,
+ IntToInt,
+ FloatToInt,
+ FloatToFloat,
+ IntToFloat,
+ PtrToPtr,
+ FnPtrToPtr,
+ Transmute,
+}
+
+#[derive(Clone, Debug)]
+pub enum NullOp {
+ /// Returns the size of a value of that type.
+ SizeOf,
+ /// Returns the minimum alignment of a type.
+ AlignOf,
+ /// Returns the offset of a field.
+ OffsetOf(Vec<FieldIdx>),
+}
diff --git a/compiler/rustc_smir/src/stable_mir/mod.rs b/compiler/rustc_smir/src/stable_mir/mod.rs
index 5e599a77b..19061742b 100644
--- a/compiler/rustc_smir/src/stable_mir/mod.rs
+++ b/compiler/rustc_smir/src/stable_mir/mod.rs
@@ -15,7 +15,7 @@ use std::cell::Cell;
use crate::rustc_smir::Tables;
-use self::ty::{Ty, TyKind};
+use self::ty::{ImplDef, ImplTrait, TraitDecl, TraitDef, Ty, TyKind};
pub mod mir;
pub mod ty;
@@ -32,6 +32,12 @@ pub type DefId = usize;
/// A list of crate items.
pub type CrateItems = Vec<CrateItem>;
+/// A list of trait decls.
+pub type TraitDecls = Vec<TraitDef>;
+
+/// A list of impl trait decls.
+pub type ImplTraitDecls = Vec<ImplDef>;
+
/// Holds information about a crate.
#[derive(Clone, PartialEq, Eq, Debug)]
pub struct Crate {
@@ -79,11 +85,31 @@ pub fn all_local_items() -> CrateItems {
with(|cx| cx.all_local_items())
}
+pub fn all_trait_decls() -> TraitDecls {
+ with(|cx| cx.all_trait_decls())
+}
+
+pub fn trait_decl(trait_def: &TraitDef) -> TraitDecl {
+ with(|cx| cx.trait_decl(trait_def))
+}
+
+pub fn all_trait_impls() -> ImplTraitDecls {
+ with(|cx| cx.all_trait_impls())
+}
+
+pub fn trait_impl(trait_impl: &ImplDef) -> ImplTrait {
+ with(|cx| cx.trait_impl(trait_impl))
+}
+
pub trait Context {
fn entry_fn(&mut self) -> Option<CrateItem>;
/// Retrieve all items of the local crate that have a MIR associated with them.
fn all_local_items(&mut self) -> CrateItems;
fn mir_body(&mut self, item: &CrateItem) -> mir::Body;
+ fn all_trait_decls(&mut self) -> TraitDecls;
+ fn trait_decl(&mut self, trait_def: &TraitDef) -> TraitDecl;
+ fn all_trait_impls(&mut self) -> ImplTraitDecls;
+ fn trait_impl(&mut self, trait_impl: &ImplDef) -> ImplTrait;
/// Get information about the local crate.
fn local_crate(&self) -> Crate;
/// Retrieve a list of all external crates.
diff --git a/compiler/rustc_smir/src/stable_mir/ty.rs b/compiler/rustc_smir/src/stable_mir/ty.rs
index 3181af46e..7a6601f09 100644
--- a/compiler/rustc_smir/src/stable_mir/ty.rs
+++ b/compiler/rustc_smir/src/stable_mir/ty.rs
@@ -1,4 +1,10 @@
-use super::with;
+use rustc_middle::mir::interpret::{alloc_range, AllocRange, ConstValue, Pointer};
+
+use super::{mir::Mutability, mir::Safety, with, DefId};
+use crate::{
+ rustc_internal::{opaque, Opaque},
+ rustc_smir::{Stable, Tables},
+};
#[derive(Copy, Clone, Debug)]
pub struct Ty(pub usize);
@@ -9,9 +15,21 @@ impl Ty {
}
}
+#[derive(Debug, Clone)]
+pub struct Const {
+ pub literal: ConstantKind,
+}
+
+type Ident = Opaque;
+pub(crate) type Region = Opaque;
+type Span = Opaque;
+
#[derive(Clone, Debug)]
pub enum TyKind {
RigidTy(RigidTy),
+ Alias(AliasKind, AliasTy),
+ Param(ParamTy),
+ Bound(usize, BoundTy),
}
#[derive(Clone, Debug)]
@@ -21,6 +39,19 @@ pub enum RigidTy {
Int(IntTy),
Uint(UintTy),
Float(FloatTy),
+ Adt(AdtDef, GenericArgs),
+ Foreign(ForeignDef),
+ Str,
+ Array(Ty, Const),
+ Slice(Ty),
+ RawPtr(Ty, Mutability),
+ Ref(Region, Ty, Mutability),
+ FnDef(FnDef, GenericArgs),
+ FnPtr(PolyFnSig),
+ Closure(ClosureDef, GenericArgs),
+ Generator(GeneratorDef, GenericArgs, Movability),
+ Dynamic(Vec<Binder<ExistentialPredicate>>, Region, DynKind),
+ Never,
Tuple(Vec<Ty>),
}
@@ -49,3 +80,384 @@ pub enum FloatTy {
F32,
F64,
}
+
+#[derive(Clone, Copy, Debug, PartialEq, Eq)]
+pub enum Movability {
+ Static,
+ Movable,
+}
+
+#[derive(Clone, PartialEq, Eq, Debug)]
+pub struct ForeignDef(pub(crate) DefId);
+
+#[derive(Clone, PartialEq, Eq, Debug)]
+pub struct FnDef(pub(crate) DefId);
+
+#[derive(Clone, PartialEq, Eq, Debug)]
+pub struct ClosureDef(pub(crate) DefId);
+
+#[derive(Clone, PartialEq, Eq, Debug)]
+pub struct GeneratorDef(pub(crate) DefId);
+
+#[derive(Clone, PartialEq, Eq, Debug)]
+pub struct ParamDef(pub(crate) DefId);
+
+#[derive(Clone, PartialEq, Eq, Debug)]
+pub struct BrNamedDef(pub(crate) DefId);
+
+#[derive(Clone, PartialEq, Eq, Debug)]
+pub struct AdtDef(pub(crate) DefId);
+
+#[derive(Clone, PartialEq, Eq, Debug)]
+pub struct AliasDef(pub(crate) DefId);
+
+#[derive(Clone, PartialEq, Eq, Debug)]
+pub struct TraitDef(pub(crate) DefId);
+
+#[derive(Clone, PartialEq, Eq, Debug)]
+pub struct ConstDef(pub(crate) DefId);
+
+impl TraitDef {
+ pub fn trait_decl(&self) -> TraitDecl {
+ with(|cx| cx.trait_decl(self))
+ }
+}
+
+#[derive(Clone, PartialEq, Eq, Debug)]
+pub struct ImplDef(pub(crate) DefId);
+
+impl ImplDef {
+ pub fn trait_impl(&self) -> ImplTrait {
+ with(|cx| cx.trait_impl(self))
+ }
+}
+
+#[derive(Clone, Debug)]
+pub struct GenericArgs(pub Vec<GenericArgKind>);
+
+#[derive(Clone, Debug)]
+pub enum GenericArgKind {
+ Lifetime(Region),
+ Type(Ty),
+ Const(Const),
+}
+
+#[derive(Clone, Debug)]
+pub enum TermKind {
+ Type(Ty),
+ Const(Const),
+}
+
+#[derive(Clone, Debug)]
+pub enum AliasKind {
+ Projection,
+ Inherent,
+ Opaque,
+ Weak,
+}
+
+#[derive(Clone, Debug)]
+pub struct AliasTy {
+ pub def_id: AliasDef,
+ pub args: GenericArgs,
+}
+
+pub type PolyFnSig = Binder<FnSig>;
+
+#[derive(Clone, Debug)]
+pub struct FnSig {
+ pub inputs_and_output: Vec<Ty>,
+ pub c_variadic: bool,
+ pub unsafety: Safety,
+ pub abi: Abi,
+}
+
+#[derive(Clone, PartialEq, Eq, Debug)]
+pub enum Abi {
+ Rust,
+ C { unwind: bool },
+ Cdecl { unwind: bool },
+ Stdcall { unwind: bool },
+ Fastcall { unwind: bool },
+ Vectorcall { unwind: bool },
+ Thiscall { unwind: bool },
+ Aapcs { unwind: bool },
+ Win64 { unwind: bool },
+ SysV64 { unwind: bool },
+ PtxKernel,
+ Msp430Interrupt,
+ X86Interrupt,
+ AmdGpuKernel,
+ EfiApi,
+ AvrInterrupt,
+ AvrNonBlockingInterrupt,
+ CCmseNonSecureCall,
+ Wasm,
+ System { unwind: bool },
+ RustIntrinsic,
+ RustCall,
+ PlatformIntrinsic,
+ Unadjusted,
+ RustCold,
+ RiscvInterruptM,
+ RiscvInterruptS,
+}
+
+#[derive(Clone, Debug)]
+pub struct Binder<T> {
+ pub value: T,
+ pub bound_vars: Vec<BoundVariableKind>,
+}
+
+#[derive(Clone, Debug)]
+pub struct EarlyBinder<T> {
+ pub value: T,
+}
+
+#[derive(Clone, Debug)]
+pub enum BoundVariableKind {
+ Ty(BoundTyKind),
+ Region(BoundRegionKind),
+ Const,
+}
+
+#[derive(Clone, PartialEq, Eq, Debug)]
+pub enum BoundTyKind {
+ Anon,
+ Param(ParamDef, String),
+}
+
+#[derive(Clone, Debug)]
+pub enum BoundRegionKind {
+ BrAnon(Option<Span>),
+ BrNamed(BrNamedDef, String),
+ BrEnv,
+}
+
+#[derive(Clone, Debug)]
+pub enum DynKind {
+ Dyn,
+ DynStar,
+}
+
+#[derive(Clone, Debug)]
+pub enum ExistentialPredicate {
+ Trait(ExistentialTraitRef),
+ Projection(ExistentialProjection),
+ AutoTrait(TraitDef),
+}
+
+#[derive(Clone, Debug)]
+pub struct ExistentialTraitRef {
+ pub def_id: TraitDef,
+ pub generic_args: GenericArgs,
+}
+
+#[derive(Clone, Debug)]
+pub struct ExistentialProjection {
+ pub def_id: TraitDef,
+ pub generic_args: GenericArgs,
+ pub term: TermKind,
+}
+
+#[derive(Clone, Debug)]
+pub struct ParamTy {
+ pub index: u32,
+ pub name: String,
+}
+
+#[derive(Clone, Debug)]
+pub struct BoundTy {
+ pub var: usize,
+ pub kind: BoundTyKind,
+}
+
+pub type Bytes = Vec<Option<u8>>;
+pub type Size = usize;
+pub type Prov = Opaque;
+pub type Align = u64;
+pub type Promoted = u32;
+pub type InitMaskMaterialized = Vec<u64>;
+
+/// Stores the provenance information of pointers stored in memory.
+#[derive(Clone, Debug)]
+pub struct ProvenanceMap {
+ /// Provenance in this map applies from the given offset for an entire pointer-size worth of
+ /// bytes. Two entries in this map are always at least a pointer size apart.
+ pub ptrs: Vec<(Size, Prov)>,
+}
+
+#[derive(Clone, Debug)]
+pub struct Allocation {
+ pub bytes: Bytes,
+ pub provenance: ProvenanceMap,
+ pub align: Align,
+ pub mutability: Mutability,
+}
+
+impl Allocation {
+ /// Creates new empty `Allocation` from given `Align`.
+ fn new_empty_allocation(align: rustc_target::abi::Align) -> Allocation {
+ Allocation {
+ bytes: Vec::new(),
+ provenance: ProvenanceMap { ptrs: Vec::new() },
+ align: align.bytes(),
+ mutability: Mutability::Not,
+ }
+ }
+}
+
+// We need this method instead of a Stable implementation
+// because we need to get `Ty` of the const we are trying to create, to do that
+// we need to have access to `ConstantKind` but we can't access that inside Stable impl.
+pub fn new_allocation<'tcx>(
+ const_kind: &rustc_middle::mir::ConstantKind<'tcx>,
+ const_value: ConstValue<'tcx>,
+ tables: &mut Tables<'tcx>,
+) -> Allocation {
+ match const_value {
+ ConstValue::Scalar(scalar) => {
+ let size = scalar.size();
+ let align = tables
+ .tcx
+ .layout_of(rustc_middle::ty::ParamEnv::reveal_all().and(const_kind.ty()))
+ .unwrap()
+ .align;
+ let mut allocation = rustc_middle::mir::interpret::Allocation::uninit(size, align.abi);
+ allocation
+ .write_scalar(&tables.tcx, alloc_range(rustc_target::abi::Size::ZERO, size), scalar)
+ .unwrap();
+ allocation.stable(tables)
+ }
+ ConstValue::ZeroSized => {
+ let align = tables
+ .tcx
+ .layout_of(rustc_middle::ty::ParamEnv::empty().and(const_kind.ty()))
+ .unwrap()
+ .align;
+ Allocation::new_empty_allocation(align.abi)
+ }
+ ConstValue::Slice { data, start, end } => {
+ let alloc_id = tables.tcx.create_memory_alloc(data);
+ let ptr = Pointer::new(alloc_id, rustc_target::abi::Size::from_bytes(start));
+ let scalar_ptr = rustc_middle::mir::interpret::Scalar::from_pointer(ptr, &tables.tcx);
+ let scalar_len = rustc_middle::mir::interpret::Scalar::from_target_usize(
+ (end - start) as u64,
+ &tables.tcx,
+ );
+ let layout = tables
+ .tcx
+ .layout_of(rustc_middle::ty::ParamEnv::reveal_all().and(const_kind.ty()))
+ .unwrap();
+ let mut allocation =
+ rustc_middle::mir::interpret::Allocation::uninit(layout.size, layout.align.abi);
+ allocation
+ .write_scalar(
+ &tables.tcx,
+ alloc_range(rustc_target::abi::Size::ZERO, tables.tcx.data_layout.pointer_size),
+ scalar_ptr,
+ )
+ .unwrap();
+ allocation
+ .write_scalar(
+ &tables.tcx,
+ alloc_range(tables.tcx.data_layout.pointer_size, scalar_len.size()),
+ scalar_len,
+ )
+ .unwrap();
+ allocation.stable(tables)
+ }
+ ConstValue::ByRef { alloc, offset } => {
+ let ty_size = tables
+ .tcx
+ .layout_of(rustc_middle::ty::ParamEnv::reveal_all().and(const_kind.ty()))
+ .unwrap()
+ .size;
+ allocation_filter(&alloc.0, alloc_range(offset, ty_size), tables)
+ }
+ }
+}
+
+/// Creates an `Allocation` only from information within the `AllocRange`.
+pub fn allocation_filter<'tcx>(
+ alloc: &rustc_middle::mir::interpret::Allocation,
+ alloc_range: AllocRange,
+ tables: &mut Tables<'tcx>,
+) -> Allocation {
+ let mut bytes: Vec<Option<u8>> = alloc
+ .inspect_with_uninit_and_ptr_outside_interpreter(
+ alloc_range.start.bytes_usize()..alloc_range.end().bytes_usize(),
+ )
+ .iter()
+ .copied()
+ .map(Some)
+ .collect();
+ for (i, b) in bytes.iter_mut().enumerate() {
+ if !alloc
+ .init_mask()
+ .get(rustc_target::abi::Size::from_bytes(i + alloc_range.start.bytes_usize()))
+ {
+ *b = None;
+ }
+ }
+ let mut ptrs = Vec::new();
+ for (offset, prov) in alloc
+ .provenance()
+ .ptrs()
+ .iter()
+ .filter(|a| a.0 >= alloc_range.start && a.0 <= alloc_range.end())
+ {
+ ptrs.push((offset.bytes_usize() - alloc_range.start.bytes_usize(), opaque(prov)));
+ }
+ Allocation {
+ bytes: bytes,
+ provenance: ProvenanceMap { ptrs },
+ align: alloc.align.bytes(),
+ mutability: alloc.mutability.stable(tables),
+ }
+}
+
+#[derive(Clone, Debug)]
+pub enum ConstantKind {
+ Allocated(Allocation),
+ Unevaluated(UnevaluatedConst),
+ ParamCt(Opaque),
+}
+
+#[derive(Clone, Debug)]
+pub struct UnevaluatedConst {
+ pub ty: Ty,
+ pub def: ConstDef,
+ pub args: GenericArgs,
+ pub promoted: Option<Promoted>,
+}
+
+#[derive(Clone, Copy, Debug, PartialEq, Eq)]
+pub enum TraitSpecializationKind {
+ None,
+ Marker,
+ AlwaysApplicable,
+}
+
+#[derive(Clone, Debug)]
+pub struct TraitDecl {
+ pub def_id: TraitDef,
+ pub unsafety: Safety,
+ pub paren_sugar: bool,
+ pub has_auto_impl: bool,
+ pub is_marker: bool,
+ pub is_coinductive: bool,
+ pub skip_array_during_method_dispatch: bool,
+ pub specialization_kind: TraitSpecializationKind,
+ pub must_implement_one_of: Option<Vec<Ident>>,
+ pub implement_via_object: bool,
+ pub deny_explicit_impl: bool,
+}
+
+pub type ImplTrait = EarlyBinder<TraitRef>;
+
+#[derive(Clone, Debug)]
+pub struct TraitRef {
+ pub def_id: TraitDef,
+ pub args: GenericArgs,
+}
diff --git a/compiler/rustc_span/src/def_id.rs b/compiler/rustc_span/src/def_id.rs
index f65a6aa4f..595babc26 100644
--- a/compiler/rustc_span/src/def_id.rs
+++ b/compiler/rustc_span/src/def_id.rs
@@ -28,10 +28,16 @@ impl CrateNum {
CrateNum::from_usize(x)
}
+ // FIXME(typed_def_id): Replace this with `as_mod_def_id`.
#[inline]
pub fn as_def_id(self) -> DefId {
DefId { krate: self, index: CRATE_DEF_INDEX }
}
+
+ #[inline]
+ pub fn as_mod_def_id(self) -> ModDefId {
+ ModDefId::new_unchecked(DefId { krate: self, index: CRATE_DEF_INDEX })
+ }
}
impl fmt::Display for CrateNum {
@@ -485,3 +491,92 @@ impl<CTX: HashStableContext> ToStableHashKey<CTX> for CrateNum {
self.as_def_id().to_stable_hash_key(hcx)
}
}
+
+macro_rules! typed_def_id {
+ ($Name:ident, $LocalName:ident) => {
+ #[derive(Debug, Clone, Copy, PartialEq, Eq, Hash, Encodable, Decodable, HashStable_Generic)]
+ pub struct $Name(DefId);
+
+ impl $Name {
+ pub const fn new_unchecked(def_id: DefId) -> Self {
+ Self(def_id)
+ }
+
+ pub fn to_def_id(self) -> DefId {
+ self.into()
+ }
+
+ pub fn is_local(self) -> bool {
+ self.0.is_local()
+ }
+
+ pub fn as_local(self) -> Option<$LocalName> {
+ self.0.as_local().map($LocalName::new_unchecked)
+ }
+ }
+
+ impl From<$LocalName> for $Name {
+ fn from(local: $LocalName) -> Self {
+ Self(local.0.to_def_id())
+ }
+ }
+
+ impl From<$Name> for DefId {
+ fn from(typed: $Name) -> Self {
+ typed.0
+ }
+ }
+
+ #[derive(Debug, Clone, Copy, PartialEq, Eq, Hash, Encodable, Decodable, HashStable_Generic)]
+ pub struct $LocalName(LocalDefId);
+
+ impl !Ord for $LocalName {}
+ impl !PartialOrd for $LocalName {}
+
+ impl $LocalName {
+ pub const fn new_unchecked(def_id: LocalDefId) -> Self {
+ Self(def_id)
+ }
+
+ pub fn to_def_id(self) -> DefId {
+ self.0.into()
+ }
+
+ pub fn to_local_def_id(self) -> LocalDefId {
+ self.0
+ }
+ }
+
+ impl From<$LocalName> for LocalDefId {
+ fn from(typed: $LocalName) -> Self {
+ typed.0
+ }
+ }
+
+ impl From<$LocalName> for DefId {
+ fn from(typed: $LocalName) -> Self {
+ typed.0.into()
+ }
+ }
+ };
+}
+
+// N.B.: when adding new typed `DefId`s update the corresponding trait impls in
+// `rustc_middle::dep_graph::def_node` for `DepNodeParams`.
+typed_def_id! { ModDefId, LocalModDefId }
+
+impl LocalModDefId {
+ pub const CRATE_DEF_ID: Self = Self::new_unchecked(CRATE_DEF_ID);
+}
+
+impl ModDefId {
+ pub fn is_top_level_module(self) -> bool {
+ self.0.is_top_level_module()
+ }
+}
+
+impl LocalModDefId {
+ pub fn is_top_level_module(self) -> bool {
+ self.0.is_top_level_module()
+ }
+}
diff --git a/compiler/rustc_span/src/edit_distance.rs b/compiler/rustc_span/src/edit_distance.rs
index 259f42386..96a118e59 100644
--- a/compiler/rustc_span/src/edit_distance.rs
+++ b/compiler/rustc_span/src/edit_distance.rs
@@ -238,8 +238,9 @@ fn find_best_match_for_name_impl(
}
fn find_match_by_sorted_words(iter_names: &[Symbol], lookup: &str) -> Option<Symbol> {
+ let lookup_sorted_by_words = sort_by_words(lookup);
iter_names.iter().fold(None, |result, candidate| {
- if sort_by_words(candidate.as_str()) == sort_by_words(lookup) {
+ if sort_by_words(candidate.as_str()) == lookup_sorted_by_words {
Some(*candidate)
} else {
result
@@ -247,9 +248,9 @@ fn find_match_by_sorted_words(iter_names: &[Symbol], lookup: &str) -> Option<Sym
})
}
-fn sort_by_words(name: &str) -> String {
+fn sort_by_words(name: &str) -> Vec<&str> {
let mut split_words: Vec<&str> = name.split('_').collect();
// We are sorting primitive &strs and can use unstable sort here.
split_words.sort_unstable();
- split_words.join("_")
+ split_words
}
diff --git a/compiler/rustc_span/src/edition.rs b/compiler/rustc_span/src/edition.rs
index f16db69aa..608b8c24b 100644
--- a/compiler/rustc_span/src/edition.rs
+++ b/compiler/rustc_span/src/edition.rs
@@ -82,17 +82,17 @@ impl Edition {
}
/// Are we allowed to use features from the Rust 2018 edition?
- pub fn rust_2018(self) -> bool {
+ pub fn at_least_rust_2018(self) -> bool {
self >= Edition::Edition2018
}
/// Are we allowed to use features from the Rust 2021 edition?
- pub fn rust_2021(self) -> bool {
+ pub fn at_least_rust_2021(self) -> bool {
self >= Edition::Edition2021
}
/// Are we allowed to use features from the Rust 2024 edition?
- pub fn rust_2024(self) -> bool {
+ pub fn at_least_rust_2024(self) -> bool {
self >= Edition::Edition2024
}
}
diff --git a/compiler/rustc_span/src/lib.rs b/compiler/rustc_span/src/lib.rs
index 3bb9c4920..c24b8d9ec 100644
--- a/compiler/rustc_span/src/lib.rs
+++ b/compiler/rustc_span/src/lib.rs
@@ -23,6 +23,7 @@
#![feature(round_char_boundary)]
#![deny(rustc::untranslatable_diagnostic)]
#![deny(rustc::diagnostic_outside_of_impl)]
+#![cfg_attr(not(bootstrap), allow(internal_features))]
#[macro_use]
extern crate rustc_macros;
@@ -605,7 +606,7 @@ impl Span {
// FIXME: If this span comes from a `derive` macro but it points at code the user wrote,
// the callsite span and the span will be pointing at different places. It also means that
// we can safely provide suggestions on this span.
- || (matches!(self.ctxt().outer_expn_data().kind, ExpnKind::Macro(MacroKind::Derive, _))
+ || (self.in_derive_expansion()
&& self.parent_callsite().map(|p| (p.lo(), p.hi())) != Some((self.lo(), self.hi())))
}
@@ -685,6 +686,12 @@ impl Span {
}
/// Walk down the expansion ancestors to find a span that's contained within `outer`.
+ ///
+ /// The span returned by this method may have a different [`SyntaxContext`] as `outer`.
+ /// If you need to extend the span, use [`find_ancestor_inside_same_ctxt`] instead,
+ /// because joining spans with different syntax contexts can create unexpected results.
+ ///
+ /// [`find_ancestor_inside_same_ctxt`]: Self::find_ancestor_inside_same_ctxt
pub fn find_ancestor_inside(mut self, outer: Span) -> Option<Span> {
while !outer.contains(self) {
self = self.parent_callsite()?;
@@ -692,11 +699,34 @@ impl Span {
Some(self)
}
- /// Like `find_ancestor_inside`, but specifically for when spans might not
- /// overlaps. Take care when using this, and prefer `find_ancestor_inside`
- /// when you know that the spans are nested (modulo macro expansion).
+ /// Walk down the expansion ancestors to find a span with the same [`SyntaxContext`] as
+ /// `other`.
+ ///
+ /// Like [`find_ancestor_inside_same_ctxt`], but specifically for when spans might not
+ /// overlap. Take care when using this, and prefer [`find_ancestor_inside`] or
+ /// [`find_ancestor_inside_same_ctxt`] when you know that the spans are nested (modulo
+ /// macro expansion).
+ ///
+ /// [`find_ancestor_inside`]: Self::find_ancestor_inside
+ /// [`find_ancestor_inside_same_ctxt`]: Self::find_ancestor_inside_same_ctxt
pub fn find_ancestor_in_same_ctxt(mut self, other: Span) -> Option<Span> {
- while !Span::eq_ctxt(self, other) {
+ while !self.eq_ctxt(other) {
+ self = self.parent_callsite()?;
+ }
+ Some(self)
+ }
+
+ /// Walk down the expansion ancestors to find a span that's contained within `outer` and
+ /// has the same [`SyntaxContext`] as `outer`.
+ ///
+ /// This method is the combination of [`find_ancestor_inside`] and
+ /// [`find_ancestor_in_same_ctxt`] and should be preferred when extending the returned span.
+ /// If you do not need to modify the span, use [`find_ancestor_inside`] instead.
+ ///
+ /// [`find_ancestor_inside`]: Self::find_ancestor_inside
+ /// [`find_ancestor_in_same_ctxt`]: Self::find_ancestor_in_same_ctxt
+ pub fn find_ancestor_inside_same_ctxt(mut self, outer: Span) -> Option<Span> {
+ while !outer.contains(self) || !self.eq_ctxt(outer) {
self = self.parent_callsite()?;
}
Some(self)
@@ -707,24 +737,28 @@ impl Span {
self.ctxt().edition()
}
+ /// Is this edition 2015?
#[inline]
pub fn is_rust_2015(self) -> bool {
self.edition().is_rust_2015()
}
+ /// Are we allowed to use features from the Rust 2018 edition?
#[inline]
- pub fn rust_2018(self) -> bool {
- self.edition().rust_2018()
+ pub fn at_least_rust_2018(self) -> bool {
+ self.edition().at_least_rust_2018()
}
+ /// Are we allowed to use features from the Rust 2021 edition?
#[inline]
- pub fn rust_2021(self) -> bool {
- self.edition().rust_2021()
+ pub fn at_least_rust_2021(self) -> bool {
+ self.edition().at_least_rust_2021()
}
+ /// Are we allowed to use features from the Rust 2024 edition?
#[inline]
- pub fn rust_2024(self) -> bool {
- self.edition().rust_2024()
+ pub fn at_least_rust_2024(self) -> bool {
+ self.edition().at_least_rust_2024()
}
/// Returns the source callee.
@@ -2159,7 +2193,8 @@ where
// If this is not an empty or invalid span, we want to hash the last
// position that belongs to it, as opposed to hashing the first
// position past it.
- let Some((file, line_lo, col_lo, line_hi, col_hi)) = ctx.span_data_to_lines_and_cols(&span) else {
+ let Some((file, line_lo, col_lo, line_hi, col_hi)) = ctx.span_data_to_lines_and_cols(&span)
+ else {
Hash::hash(&TAG_INVALID_SPAN, hasher);
return;
};
diff --git a/compiler/rustc_span/src/source_map.rs b/compiler/rustc_span/src/source_map.rs
index 86716da17..983b2ab04 100644
--- a/compiler/rustc_span/src/source_map.rs
+++ b/compiler/rustc_span/src/source_map.rs
@@ -973,24 +973,21 @@ impl SourceMap {
Span::new(BytePos(start_of_next_point), end_of_next_point, sp.ctxt(), None)
}
- /// Returns a new span to check next none-whitespace character or some specified expected character
- /// If `expect` is none, the first span of non-whitespace character is returned.
- /// If `expect` presented, the first span of the character `expect` is returned
- /// Otherwise, the span reached to limit is returned.
- pub fn span_look_ahead(&self, span: Span, expect: Option<&str>, limit: Option<usize>) -> Span {
+ /// Check whether span is followed by some specified expected string in limit scope
+ pub fn span_look_ahead(&self, span: Span, expect: &str, limit: Option<usize>) -> Option<Span> {
let mut sp = span;
for _ in 0..limit.unwrap_or(100_usize) {
sp = self.next_point(sp);
if let Ok(ref snippet) = self.span_to_snippet(sp) {
- if expect.is_some_and(|es| snippet == es) {
- break;
+ if snippet == expect {
+ return Some(sp);
}
- if expect.is_none() && snippet.chars().any(|c| !c.is_whitespace()) {
+ if snippet.chars().any(|c| !c.is_whitespace()) {
break;
}
}
}
- sp
+ None
}
/// Finds the width of the character, either before or after the end of provided span,
diff --git a/compiler/rustc_span/src/symbol.rs b/compiler/rustc_span/src/symbol.rs
index 5c6d43e50..28a2dfebc 100644
--- a/compiler/rustc_span/src/symbol.rs
+++ b/compiler/rustc_span/src/symbol.rs
@@ -326,6 +326,7 @@ symbols! {
abi_efiapi,
abi_msp430_interrupt,
abi_ptx,
+ abi_riscv_interrupt,
abi_sysv64,
abi_thiscall,
abi_unadjusted,
@@ -372,6 +373,7 @@ symbols! {
arm_target_feature,
array,
arrays,
+ as_mut_ptr,
as_ptr,
as_ref,
as_str,
@@ -399,6 +401,7 @@ symbols! {
async_await,
async_closure,
async_fn_in_trait,
+ async_fn_track_caller,
atomic,
atomic_mod,
atomics,
@@ -442,6 +445,7 @@ symbols! {
bridge,
bswap,
builtin_syntax,
+ c,
c_str,
c_str_literals,
c_unwind,
@@ -465,6 +469,7 @@ symbols! {
cfg_hide,
cfg_overflow_checks,
cfg_panic,
+ cfg_relocation_model,
cfg_sanitize,
cfg_target_abi,
cfg_target_compact,
@@ -496,6 +501,7 @@ symbols! {
cold,
collapse_debuginfo,
column,
+ compare_bytes,
compare_exchange,
compare_exchange_weak,
compile_error,
@@ -539,6 +545,7 @@ symbols! {
const_panic_fmt,
const_param_ty,
const_precise_live_drops,
+ const_ptr_cast,
const_raw_ptr_deref,
const_raw_ptr_to_usize_cast,
const_refs_to_cell,
@@ -570,6 +577,7 @@ symbols! {
crate_type,
crate_visibility_modifier,
crt_dash_static: "crt-static",
+ csky_target_feature,
cstring_type,
ctlz,
ctlz_nonzero,
@@ -619,6 +627,7 @@ symbols! {
destruct,
destructuring_assignment,
diagnostic,
+ diagnostic_namespace,
direct,
discriminant_kind,
discriminant_type,
@@ -654,6 +663,7 @@ symbols! {
dyn_metadata,
dyn_star,
dyn_trait,
+ dynamic_no_pic: "dynamic-no-pic",
e,
edition_panic,
effects,
@@ -781,6 +791,7 @@ symbols! {
generic_associated_types,
generic_associated_types_extended,
generic_const_exprs,
+ generic_const_items,
generic_param_attrs,
get_context,
global_allocator,
@@ -858,6 +869,7 @@ symbols! {
item,
item_like_imports,
iter,
+ iter_mut,
iter_repeat,
iterator_collect_fn,
kcfi,
@@ -1106,6 +1118,8 @@ symbols! {
path,
pattern_parentheses,
phantom_data,
+ pic,
+ pie,
pin,
platform_intrinsics,
plugin,
@@ -1151,9 +1165,14 @@ symbols! {
profiler_builtins,
profiler_runtime,
ptr,
+ ptr_cast,
+ ptr_cast_const,
ptr_cast_mut,
+ ptr_const_is_null,
+ ptr_from_mut,
ptr_from_ref,
ptr_guaranteed_cmp,
+ ptr_is_null,
ptr_mask,
ptr_null,
ptr_null_mut,
@@ -1209,6 +1228,7 @@ symbols! {
register_tool,
relaxed_adts,
relaxed_struct_unsize,
+ relocation_model,
rem,
rem_assign,
repr,
@@ -1229,6 +1249,8 @@ symbols! {
rintf64,
riscv_target_feature,
rlib,
+ ropi,
+ ropi_rwpi: "ropi-rwpi",
rotate_left,
rotate_right,
roundevenf32,
@@ -1263,6 +1285,7 @@ symbols! {
rustc_clean,
rustc_coherence_is_core,
rustc_coinductive,
+ rustc_confusables,
rustc_const_stable,
rustc_const_unstable,
rustc_conversion_suggestion,
@@ -1278,7 +1301,7 @@ symbols! {
rustc_dummy,
rustc_dump_env_program_clauses,
rustc_dump_program_clauses,
- rustc_dump_user_substs,
+ rustc_dump_user_args,
rustc_dump_vtable,
rustc_effective_visibility,
rustc_error,
@@ -1339,6 +1362,7 @@ symbols! {
rustdoc_missing_doc_code_examples,
rustfmt,
rvalue_static_promotion,
+ rwpi,
s,
safety,
sanitize,
@@ -1364,9 +1388,13 @@ symbols! {
simd_arith_offset,
simd_as,
simd_bitmask,
+ simd_bitreverse,
+ simd_bswap,
simd_cast,
simd_cast_ptr,
simd_ceil,
+ simd_ctlz,
+ simd_cttz,
simd_div,
simd_eq,
simd_expose_addr,
diff --git a/compiler/rustc_symbol_mangling/src/legacy.rs b/compiler/rustc_symbol_mangling/src/legacy.rs
index ec7032cd3..3a3356808 100644
--- a/compiler/rustc_symbol_mangling/src/legacy.rs
+++ b/compiler/rustc_symbol_mangling/src/legacy.rs
@@ -2,8 +2,8 @@ use rustc_data_structures::stable_hasher::{Hash64, HashStable, StableHasher};
use rustc_hir::def_id::CrateNum;
use rustc_hir::definitions::{DefPathData, DisambiguatedDefPathData};
use rustc_middle::ty::print::{PrettyPrinter, Print, Printer};
-use rustc_middle::ty::subst::{GenericArg, GenericArgKind};
use rustc_middle::ty::{self, Instance, Ty, TyCtxt, TypeVisitableExt};
+use rustc_middle::ty::{GenericArg, GenericArgKind};
use rustc_middle::util::common::record_time;
use std::fmt::{self, Write};
@@ -26,7 +26,7 @@ pub(super) fn mangle<'tcx>(
let key = tcx.def_key(ty_def_id);
match key.disambiguated_data.data {
DefPathData::TypeNs(_) | DefPathData::ValueNs(_) => {
- instance_ty = tcx.type_of(ty_def_id).subst_identity();
+ instance_ty = tcx.type_of(ty_def_id).instantiate_identity();
debug!(?instance_ty);
break;
}
@@ -58,7 +58,7 @@ pub(super) fn mangle<'tcx>(
def_id,
if let ty::InstanceDef::DropGlue(_, _) = instance.def {
// Add the name of the dropped type to the symbol name
- &*instance.substs
+ &*instance.args
} else {
&[]
},
@@ -95,8 +95,8 @@ fn get_symbol_hash<'tcx>(
instantiating_crate: Option<CrateNum>,
) -> Hash64 {
let def_id = instance.def_id();
- let substs = instance.substs;
- debug!("get_symbol_hash(def_id={:?}, parameters={:?})", def_id, substs);
+ let args = instance.args;
+ debug!("get_symbol_hash(def_id={:?}, parameters={:?})", def_id, args);
tcx.with_stable_hashing_context(|mut hcx| {
let mut hasher = StableHasher::new();
@@ -122,7 +122,7 @@ fn get_symbol_hash<'tcx>(
}
// also include any type parameters (for generic items)
- substs.hash_stable(hcx, &mut hasher);
+ args.hash_stable(hcx, &mut hasher);
if let Some(instantiating_crate) = instantiating_crate {
tcx.def_path_hash(instantiating_crate.as_def_id())
@@ -219,10 +219,10 @@ impl<'tcx> Printer<'tcx> for &mut SymbolPrinter<'tcx> {
fn print_type(mut self, ty: Ty<'tcx>) -> Result<Self::Type, Self::Error> {
match *ty.kind() {
// Print all nominal types as paths (unlike `pretty_print_type`).
- ty::FnDef(def_id, substs)
- | ty::Alias(ty::Projection | ty::Opaque, ty::AliasTy { def_id, substs, .. })
- | ty::Closure(def_id, substs)
- | ty::Generator(def_id, substs, _) => self.print_def_path(def_id, substs),
+ ty::FnDef(def_id, args)
+ | ty::Alias(ty::Projection | ty::Opaque, ty::AliasTy { def_id, args, .. })
+ | ty::Closure(def_id, args)
+ | ty::Generator(def_id, args, _) => self.print_def_path(def_id, args),
// The `pretty_print_type` formatting of array size depends on
// -Zverbose flag, so we cannot reuse it here.
diff --git a/compiler/rustc_symbol_mangling/src/lib.rs b/compiler/rustc_symbol_mangling/src/lib.rs
index 692542da7..74538e9f5 100644
--- a/compiler/rustc_symbol_mangling/src/lib.rs
+++ b/compiler/rustc_symbol_mangling/src/lib.rs
@@ -108,7 +108,7 @@ use rustc_middle::middle::codegen_fn_attrs::CodegenFnAttrFlags;
use rustc_middle::middle::codegen_fn_attrs::CodegenFnAttrs;
use rustc_middle::mir::mono::{InstantiationMode, MonoItem};
use rustc_middle::query::Providers;
-use rustc_middle::ty::subst::SubstsRef;
+use rustc_middle::ty::GenericArgsRef;
use rustc_middle::ty::{self, Instance, TyCtxt};
use rustc_session::config::SymbolManglingVersion;
@@ -144,7 +144,7 @@ fn symbol_name_provider<'tcx>(tcx: TyCtxt<'tcx>, instance: Instance<'tcx>) -> ty
// This closure determines the instantiating crate for instances that
// need an instantiating-crate-suffix for their symbol name, in order
// to differentiate between local copies.
- if is_generic(instance.substs) {
+ if is_generic(instance.args) {
// For generics we might find re-usable upstream instances. If there
// is one, we rely on the symbol being instantiated locally.
instance.upstream_monomorphization(tcx).unwrap_or(LOCAL_CRATE)
@@ -174,13 +174,13 @@ fn compute_symbol_name<'tcx>(
compute_instantiating_crate: impl FnOnce() -> CrateNum,
) -> String {
let def_id = instance.def_id();
- let substs = instance.substs;
+ let args = instance.args;
- debug!("symbol_name(def_id={:?}, substs={:?})", def_id, substs);
+ debug!("symbol_name(def_id={:?}, args={:?})", def_id, args);
if let Some(def_id) = def_id.as_local() {
if tcx.proc_macro_decls_static(()) == Some(def_id) {
- let stable_crate_id = tcx.sess.local_stable_crate_id();
+ let stable_crate_id = tcx.stable_crate_id(LOCAL_CRATE);
return tcx.sess.generate_proc_macro_decls_symbol(stable_crate_id);
}
}
@@ -246,7 +246,7 @@ fn compute_symbol_name<'tcx>(
// the ID of the instantiating crate. This avoids symbol conflicts
// in case the same instances is emitted in two crates of the same
// project.
- let avoid_cross_crate_conflicts = is_generic(substs) || is_globally_shared_function;
+ let avoid_cross_crate_conflicts = is_generic(args) || is_globally_shared_function;
let instantiating_crate = avoid_cross_crate_conflicts.then(compute_instantiating_crate);
@@ -278,6 +278,6 @@ fn compute_symbol_name<'tcx>(
symbol
}
-fn is_generic(substs: SubstsRef<'_>) -> bool {
- substs.non_erasable_generics().next().is_some()
+fn is_generic(args: GenericArgsRef<'_>) -> bool {
+ args.non_erasable_generics().next().is_some()
}
diff --git a/compiler/rustc_symbol_mangling/src/test.rs b/compiler/rustc_symbol_mangling/src/test.rs
index 985b22107..eddfd0df3 100644
--- a/compiler/rustc_symbol_mangling/src/test.rs
+++ b/compiler/rustc_symbol_mangling/src/test.rs
@@ -7,7 +7,7 @@
use crate::errors::{Kind, TestOutput};
use rustc_hir::def_id::LocalDefId;
use rustc_middle::ty::print::with_no_trimmed_paths;
-use rustc_middle::ty::{subst::InternalSubsts, Instance, TyCtxt};
+use rustc_middle::ty::{GenericArgs, Instance, TyCtxt};
use rustc_span::symbol::{sym, Symbol};
const SYMBOL_NAME: Symbol = sym::rustc_symbol_name;
@@ -57,7 +57,7 @@ impl SymbolNamesTest<'_> {
let def_id = def_id.to_def_id();
let instance = Instance::new(
def_id,
- tcx.erase_regions(InternalSubsts::identity_for_item(tcx, def_id)),
+ tcx.erase_regions(GenericArgs::identity_for_item(tcx, def_id)),
);
let mangled = tcx.symbol_name(instance);
tcx.sess.emit_err(TestOutput {
diff --git a/compiler/rustc_symbol_mangling/src/typeid/typeid_itanium_cxx_abi.rs b/compiler/rustc_symbol_mangling/src/typeid/typeid_itanium_cxx_abi.rs
index 3b46275ec..d345368d5 100644
--- a/compiler/rustc_symbol_mangling/src/typeid/typeid_itanium_cxx_abi.rs
+++ b/compiler/rustc_symbol_mangling/src/typeid/typeid_itanium_cxx_abi.rs
@@ -7,18 +7,19 @@
///
/// For more information about LLVM CFI and cross-language LLVM CFI support for the Rust compiler,
/// see design document in the tracking issue #89653.
-use core::fmt::Display;
use rustc_data_structures::base_n;
use rustc_data_structures::fx::FxHashMap;
use rustc_hir as hir;
-use rustc_middle::ty::subst::{GenericArg, GenericArgKind, SubstsRef};
+use rustc_middle::ty::layout::IntegerExt;
use rustc_middle::ty::{
self, Const, ExistentialPredicate, FloatTy, FnSig, Instance, IntTy, List, Region, RegionKind,
TermKind, Ty, TyCtxt, UintTy,
};
+use rustc_middle::ty::{GenericArg, GenericArgKind, GenericArgsRef};
use rustc_span::def_id::DefId;
use rustc_span::sym;
use rustc_target::abi::call::{Conv, FnAbi};
+use rustc_target::abi::Integer;
use rustc_target::spec::abi::Abi;
use std::fmt::Write as _;
@@ -93,44 +94,54 @@ fn encode_const<'tcx>(
dict: &mut FxHashMap<DictKey<'tcx>, usize>,
options: EncodeTyOptions,
) -> String {
- // L<element-type>[n]<element-value>E as literal argument
+ // L<element-type>[n][<element-value>]E as literal argument
let mut s = String::from('L');
- // Element type
- s.push_str(&encode_ty(tcx, c.ty(), dict, options));
+ match c.kind() {
+ // Const parameters
+ ty::ConstKind::Param(..) => {
+ // L<element-type>E as literal argument
- // The only allowed types of const parameters are bool, u8, u16, u32, u64, u128, usize i8, i16,
- // i32, i64, i128, isize, and char. The bool value false is encoded as 0 and true as 1.
- fn push_signed_value<T: Display + PartialOrd>(s: &mut String, value: T, zero: T) {
- if value < zero {
- s.push('n')
- };
- let _ = write!(s, "{value}");
- }
-
- fn push_unsigned_value<T: Display>(s: &mut String, value: T) {
- let _ = write!(s, "{value}");
- }
+ // Element type
+ s.push_str(&encode_ty(tcx, c.ty(), dict, options));
+ }
- if let Some(scalar_int) = c.try_to_scalar_int() {
- let signed = c.ty().is_signed();
- match scalar_int.size().bits() {
- 8 if signed => push_signed_value(&mut s, scalar_int.try_to_i8().unwrap(), 0),
- 16 if signed => push_signed_value(&mut s, scalar_int.try_to_i16().unwrap(), 0),
- 32 if signed => push_signed_value(&mut s, scalar_int.try_to_i32().unwrap(), 0),
- 64 if signed => push_signed_value(&mut s, scalar_int.try_to_i64().unwrap(), 0),
- 128 if signed => push_signed_value(&mut s, scalar_int.try_to_i128().unwrap(), 0),
- 8 => push_unsigned_value(&mut s, scalar_int.try_to_u8().unwrap()),
- 16 => push_unsigned_value(&mut s, scalar_int.try_to_u16().unwrap()),
- 32 => push_unsigned_value(&mut s, scalar_int.try_to_u32().unwrap()),
- 64 => push_unsigned_value(&mut s, scalar_int.try_to_u64().unwrap()),
- 128 => push_unsigned_value(&mut s, scalar_int.try_to_u128().unwrap()),
- _ => {
- bug!("encode_const: unexpected size `{:?}`", scalar_int.size().bits());
+ // Literal arguments
+ ty::ConstKind::Value(..) => {
+ // L<element-type>[n]<element-value>E as literal argument
+
+ // Element type
+ s.push_str(&encode_ty(tcx, c.ty(), dict, options));
+
+ // The only allowed types of const values are bool, u8, u16, u32,
+ // u64, u128, usize i8, i16, i32, i64, i128, isize, and char. The
+ // bool value false is encoded as 0 and true as 1.
+ match c.ty().kind() {
+ ty::Int(ity) => {
+ let bits = c.eval_bits(tcx, ty::ParamEnv::reveal_all(), c.ty());
+ let val = Integer::from_int_ty(&tcx, *ity).size().sign_extend(bits) as i128;
+ if val < 0 {
+ s.push('n');
+ }
+ let _ = write!(s, "{val}");
+ }
+ ty::Uint(_) => {
+ let val = c.eval_bits(tcx, ty::ParamEnv::reveal_all(), c.ty());
+ let _ = write!(s, "{val}");
+ }
+ ty::Bool => {
+ let val = c.try_eval_bool(tcx, ty::ParamEnv::reveal_all()).unwrap();
+ let _ = write!(s, "{val}");
+ }
+ _ => {
+ bug!("encode_const: unexpected type `{:?}`", c.ty());
+ }
}
- };
- } else {
- bug!("encode_const: unexpected type `{:?}`", c.ty());
+ }
+
+ _ => {
+ bug!("encode_const: unexpected kind `{:?}`", c.kind());
+ }
}
// Close the "L..E" pair
@@ -212,12 +223,12 @@ fn encode_predicate<'tcx>(
ty::ExistentialPredicate::Trait(trait_ref) => {
let name = encode_ty_name(tcx, trait_ref.def_id);
let _ = write!(s, "u{}{}", name.len(), &name);
- s.push_str(&encode_substs(tcx, trait_ref.substs, dict, options));
+ s.push_str(&encode_args(tcx, trait_ref.args, dict, options));
}
ty::ExistentialPredicate::Projection(projection) => {
let name = encode_ty_name(tcx, projection.def_id);
let _ = write!(s, "u{}{}", name.len(), &name);
- s.push_str(&encode_substs(tcx, projection.substs, dict, options));
+ s.push_str(&encode_args(tcx, projection.args, dict, options));
match projection.term.unpack() {
TermKind::Ty(ty) => s.push_str(&encode_ty(tcx, ty, dict, options)),
TermKind::Const(c) => s.push_str(&encode_const(tcx, c, dict, options)),
@@ -286,21 +297,21 @@ fn encode_region<'tcx>(
s
}
-/// Encodes substs using the Itanium C++ ABI with vendor extended type qualifiers and types for Rust
+/// Encodes args using the Itanium C++ ABI with vendor extended type qualifiers and types for Rust
/// types that are not used at the FFI boundary.
-fn encode_substs<'tcx>(
+fn encode_args<'tcx>(
tcx: TyCtxt<'tcx>,
- substs: SubstsRef<'tcx>,
+ args: GenericArgsRef<'tcx>,
dict: &mut FxHashMap<DictKey<'tcx>, usize>,
options: EncodeTyOptions,
) -> String {
// [I<subst1..substN>E] as part of vendor extended type
let mut s = String::new();
- let substs: Vec<GenericArg<'_>> = substs.iter().collect();
- if !substs.is_empty() {
+ let args: Vec<GenericArg<'_>> = args.iter().collect();
+ if !args.is_empty() {
s.push('I');
- for subst in substs {
- match subst.unpack() {
+ for arg in args {
+ match arg.unpack() {
GenericArgKind::Lifetime(region) => {
s.push_str(&encode_region(tcx, region, dict, options));
}
@@ -400,7 +411,7 @@ fn encode_ty_name(tcx: TyCtxt<'_>, def_id: DefId) -> String {
let _ = write!(s, "{}", name.len());
// Prepend a '_' if name starts with a digit or '_'
- if let Some(first) = name.as_bytes().get(0) {
+ if let Some(first) = name.as_bytes().first() {
if first.is_ascii_digit() || *first == b'_' {
s.push('_');
}
@@ -524,7 +535,7 @@ fn encode_ty<'tcx>(
}
// User-defined types
- ty::Adt(adt_def, substs) => {
+ ty::Adt(adt_def, args) => {
let mut s = String::new();
let def_id = adt_def.did();
if let Some(cfi_encoding) = tcx.get_attr(def_id, sym::cfi_encoding) {
@@ -570,7 +581,7 @@ fn encode_ty<'tcx>(
// <subst>, as vendor extended type.
let name = encode_ty_name(tcx, def_id);
let _ = write!(s, "u{}{}", name.len(), &name);
- s.push_str(&encode_substs(tcx, substs, dict, options));
+ s.push_str(&encode_args(tcx, args, dict, options));
compress(dict, DictKey::Ty(ty, TyQ::None), &mut s);
}
typeid.push_str(&s);
@@ -608,27 +619,27 @@ fn encode_ty<'tcx>(
}
// Function types
- ty::FnDef(def_id, substs) | ty::Closure(def_id, substs) => {
+ ty::FnDef(def_id, args) | ty::Closure(def_id, args) => {
// u<length><name>[I<element-type1..element-typeN>E], where <element-type> is <subst>,
// as vendor extended type.
let mut s = String::new();
let name = encode_ty_name(tcx, *def_id);
let _ = write!(s, "u{}{}", name.len(), &name);
- s.push_str(&encode_substs(tcx, substs, dict, options));
+ s.push_str(&encode_args(tcx, args, dict, options));
compress(dict, DictKey::Ty(ty, TyQ::None), &mut s);
typeid.push_str(&s);
}
- ty::Generator(def_id, substs, ..) => {
+ ty::Generator(def_id, args, ..) => {
// u<length><name>[I<element-type1..element-typeN>E], where <element-type> is <subst>,
// as vendor extended type.
let mut s = String::new();
let name = encode_ty_name(tcx, *def_id);
let _ = write!(s, "u{}{}", name.len(), &name);
- // Encode parent substs only
- s.push_str(&encode_substs(
+ // Encode parent args only
+ s.push_str(&encode_args(
tcx,
- tcx.mk_substs(substs.as_generator().parent_substs()),
+ tcx.mk_args(args.as_generator().parent_args()),
dict,
options,
));
@@ -732,18 +743,18 @@ fn transform_predicates<'tcx>(
tcx.mk_poly_existential_predicates(&predicates)
}
-/// Transforms substs for being encoded and used in the substitution dictionary.
-fn transform_substs<'tcx>(
+/// Transforms args for being encoded and used in the substitution dictionary.
+fn transform_args<'tcx>(
tcx: TyCtxt<'tcx>,
- substs: SubstsRef<'tcx>,
+ args: GenericArgsRef<'tcx>,
options: TransformTyOptions,
-) -> SubstsRef<'tcx> {
- let substs = substs.iter().map(|subst| match subst.unpack() {
+) -> GenericArgsRef<'tcx> {
+ let args = args.iter().map(|arg| match arg.unpack() {
GenericArgKind::Type(ty) if ty.is_c_void(tcx) => Ty::new_unit(tcx).into(),
GenericArgKind::Type(ty) => transform_ty(tcx, ty, options).into(),
- _ => subst,
+ _ => arg,
});
- tcx.mk_substs_from_iter(substs)
+ tcx.mk_args_from_iter(args)
}
// Transforms a ty:Ty for being encoded and used in the substitution dictionary. It transforms all
@@ -813,11 +824,8 @@ fn transform_ty<'tcx>(tcx: TyCtxt<'tcx>, ty: Ty<'tcx>, options: TransformTyOptio
}
ty::Array(ty0, len) => {
- let len = len
- .try_to_scalar()
- .unwrap()
- .to_u64()
- .unwrap_or_else(|_| panic!("failed to convert length to u64"));
+ let len = len.eval_target_usize(tcx, ty::ParamEnv::reveal_all());
+
ty = Ty::new_array(tcx, transform_ty(tcx, *ty0, options), len);
}
@@ -825,7 +833,7 @@ fn transform_ty<'tcx>(tcx: TyCtxt<'tcx>, ty: Ty<'tcx>, options: TransformTyOptio
ty = Ty::new_slice(tcx, transform_ty(tcx, *ty0, options));
}
- ty::Adt(adt_def, substs) => {
+ ty::Adt(adt_def, args) => {
if ty.is_c_void(tcx) {
ty = Ty::new_unit(tcx);
} else if options.contains(TransformTyOptions::GENERALIZE_REPR_C) && adt_def.repr().c()
@@ -840,13 +848,13 @@ fn transform_ty<'tcx>(tcx: TyCtxt<'tcx>, ty: Ty<'tcx>, options: TransformTyOptio
let variant = adt_def.non_enum_variant();
let param_env = tcx.param_env(variant.def_id);
let field = variant.fields.iter().find(|field| {
- let ty = tcx.type_of(field.did).subst_identity();
+ let ty = tcx.type_of(field.did).instantiate_identity();
let is_zst =
tcx.layout_of(param_env.and(ty)).is_ok_and(|layout| layout.is_zst());
!is_zst
});
if let Some(field) = field {
- let ty0 = tcx.type_of(field.did).subst(tcx, substs);
+ let ty0 = tcx.type_of(field.did).instantiate(tcx, args);
// Generalize any repr(transparent) user-defined type that is either a pointer
// or reference, and either references itself or any other type that contains or
// references itself, to avoid a reference cycle.
@@ -864,25 +872,20 @@ fn transform_ty<'tcx>(tcx: TyCtxt<'tcx>, ty: Ty<'tcx>, options: TransformTyOptio
ty = Ty::new_unit(tcx);
}
} else {
- ty = Ty::new_adt(tcx, *adt_def, transform_substs(tcx, substs, options));
+ ty = Ty::new_adt(tcx, *adt_def, transform_args(tcx, args, options));
}
}
- ty::FnDef(def_id, substs) => {
- ty = Ty::new_fn_def(tcx, *def_id, transform_substs(tcx, substs, options));
+ ty::FnDef(def_id, args) => {
+ ty = Ty::new_fn_def(tcx, *def_id, transform_args(tcx, args, options));
}
- ty::Closure(def_id, substs) => {
- ty = Ty::new_closure(tcx, *def_id, transform_substs(tcx, substs, options));
+ ty::Closure(def_id, args) => {
+ ty = Ty::new_closure(tcx, *def_id, transform_args(tcx, args, options));
}
- ty::Generator(def_id, substs, movability) => {
- ty = Ty::new_generator(
- tcx,
- *def_id,
- transform_substs(tcx, substs, options),
- *movability,
- );
+ ty::Generator(def_id, args, movability) => {
+ ty = Ty::new_generator(tcx, *def_id, transform_args(tcx, args, options), *movability);
}
ty::Ref(region, ty0, ..) => {
diff --git a/compiler/rustc_symbol_mangling/src/v0.rs b/compiler/rustc_symbol_mangling/src/v0.rs
index 5e5cc6e4e..da19a3ba4 100644
--- a/compiler/rustc_symbol_mangling/src/v0.rs
+++ b/compiler/rustc_symbol_mangling/src/v0.rs
@@ -27,7 +27,7 @@ pub(super) fn mangle<'tcx>(
) -> String {
let def_id = instance.def_id();
// FIXME(eddyb) this should ideally not be needed.
- let substs = tcx.normalize_erasing_regions(ty::ParamEnv::reveal_all(), instance.substs);
+ let args = tcx.normalize_erasing_regions(ty::ParamEnv::reveal_all(), instance.args);
let prefix = "_R";
let mut cx = &mut SymbolMangler {
@@ -50,9 +50,9 @@ pub(super) fn mangle<'tcx>(
};
cx = if let Some(shim_kind) = shim_kind {
- cx.path_append_ns(|cx| cx.print_def_path(def_id, substs), 'S', 0, shim_kind).unwrap()
+ cx.path_append_ns(|cx| cx.print_def_path(def_id, args), 'S', 0, shim_kind).unwrap()
} else {
- cx.print_def_path(def_id, substs).unwrap()
+ cx.print_def_path(def_id, args).unwrap()
};
if let Some(instantiating_crate) = instantiating_crate {
cx = cx.print_def_path(instantiating_crate.as_def_id(), &[]).unwrap();
@@ -245,19 +245,19 @@ impl<'tcx> Printer<'tcx> for &mut SymbolMangler<'tcx> {
fn print_def_path(
mut self,
def_id: DefId,
- substs: &'tcx [GenericArg<'tcx>],
+ args: &'tcx [GenericArg<'tcx>],
) -> Result<Self::Path, Self::Error> {
- if let Some(&i) = self.paths.get(&(def_id, substs)) {
+ if let Some(&i) = self.paths.get(&(def_id, args)) {
return self.print_backref(i);
}
let start = self.out.len();
- self = self.default_print_def_path(def_id, substs)?;
+ self = self.default_print_def_path(def_id, args)?;
// Only cache paths that do not refer to an enclosing
// binder (which would change depending on context).
- if !substs.iter().any(|k| k.has_escaping_bound_vars()) {
- self.paths.insert((def_id, substs), start);
+ if !args.iter().any(|k| k.has_escaping_bound_vars()) {
+ self.paths.insert((def_id, args), start);
}
Ok(self)
}
@@ -265,7 +265,7 @@ impl<'tcx> Printer<'tcx> for &mut SymbolMangler<'tcx> {
fn print_impl_path(
mut self,
impl_def_id: DefId,
- substs: &'tcx [GenericArg<'tcx>],
+ args: &'tcx [GenericArg<'tcx>],
mut self_ty: Ty<'tcx>,
mut impl_trait_ref: Option<ty::TraitRef<'tcx>>,
) -> Result<Self::Path, Self::Error> {
@@ -273,8 +273,8 @@ impl<'tcx> Printer<'tcx> for &mut SymbolMangler<'tcx> {
let parent_def_id = DefId { index: key.parent.unwrap(), ..impl_def_id };
let mut param_env = self.tcx.param_env_reveal_all_normalized(impl_def_id);
- if !substs.is_empty() {
- param_env = EarlyBinder::bind(param_env).subst(self.tcx, substs);
+ if !args.is_empty() {
+ param_env = EarlyBinder::bind(param_env).instantiate(self.tcx, args);
}
match &mut impl_trait_ref {
@@ -295,7 +295,7 @@ impl<'tcx> Printer<'tcx> for &mut SymbolMangler<'tcx> {
// Encode impl generic params if the substitutions contain parameters (implying
// polymorphization is enabled) and this isn't an inherent impl.
- if impl_trait_ref.is_some() && substs.iter().any(|a| a.has_non_region_param()) {
+ if impl_trait_ref.is_some() && args.iter().any(|a| a.has_non_region_param()) {
self = self.path_generic_args(
|this| {
this.path_append_ns(
@@ -305,7 +305,7 @@ impl<'tcx> Printer<'tcx> for &mut SymbolMangler<'tcx> {
"",
)
},
- substs,
+ args,
)?;
} else {
self.push_disambiguator(key.disambiguated_data.disambiguator as u64);
@@ -315,7 +315,7 @@ impl<'tcx> Printer<'tcx> for &mut SymbolMangler<'tcx> {
self = self_ty.print(self)?;
if let Some(trait_ref) = impl_trait_ref {
- self = self.print_def_path(trait_ref.def_id, trait_ref.substs)?;
+ self = self.print_def_path(trait_ref.def_id, trait_ref.args)?;
}
Ok(self)
@@ -431,12 +431,12 @@ impl<'tcx> Printer<'tcx> for &mut SymbolMangler<'tcx> {
}
// Mangle all nominal types as paths.
- ty::Adt(ty::AdtDef(Interned(&ty::AdtDefData { did: def_id, .. }, _)), substs)
- | ty::FnDef(def_id, substs)
- | ty::Alias(ty::Projection | ty::Opaque, ty::AliasTy { def_id, substs, .. })
- | ty::Closure(def_id, substs)
- | ty::Generator(def_id, substs, _) => {
- self = self.print_def_path(def_id, substs)?;
+ ty::Adt(ty::AdtDef(Interned(&ty::AdtDefData { did: def_id, .. }, _)), args)
+ | ty::FnDef(def_id, args)
+ | ty::Alias(ty::Projection | ty::Opaque, ty::AliasTy { def_id, args, .. })
+ | ty::Closure(def_id, args)
+ | ty::Generator(def_id, args, _) => {
+ self = self.print_def_path(def_id, args)?;
}
ty::Foreign(def_id) => {
self = self.print_def_path(def_id, &[])?;
@@ -537,7 +537,7 @@ impl<'tcx> Printer<'tcx> for &mut SymbolMangler<'tcx> {
// Use a type that can't appear in defaults of type parameters.
let dummy_self = Ty::new_fresh(cx.tcx, 0);
let trait_ref = trait_ref.with_self_ty(cx.tcx, dummy_self);
- cx = cx.print_def_path(trait_ref.def_id, trait_ref.substs)?;
+ cx = cx.print_def_path(trait_ref.def_id, trait_ref.args)?;
}
ty::ExistentialPredicate::Projection(projection) => {
let name = cx.tcx.associated_item(projection.def_id).name;
@@ -628,7 +628,8 @@ impl<'tcx> Printer<'tcx> for &mut SymbolMangler<'tcx> {
valtree, ty
)
});
- let s = std::str::from_utf8(slice).expect("non utf8 str from miri");
+ let s = std::str::from_utf8(slice)
+ .expect("non utf8 str from MIR interpreter");
self.push("e");
@@ -679,13 +680,13 @@ impl<'tcx> Printer<'tcx> for &mut SymbolMangler<'tcx> {
self.push("T");
self = print_field_list(self)?;
}
- ty::Adt(def, substs) => {
+ ty::Adt(def, args) => {
let variant_idx =
contents.variant.expect("destructed const of adt without variant idx");
let variant_def = &def.variant(variant_idx);
self.push("V");
- self = self.print_def_path(variant_def.def_id, substs)?;
+ self = self.print_def_path(variant_def.def_id, args)?;
match variant_def.ctor_kind() {
Some(CtorKind::Const) => {
@@ -750,7 +751,7 @@ impl<'tcx> Printer<'tcx> for &mut SymbolMangler<'tcx> {
self.push("Y");
self = self_ty.print(self)?;
- self.print_def_path(trait_ref.def_id, trait_ref.substs)
+ self.print_def_path(trait_ref.def_id, trait_ref.args)
}
fn path_append_impl(
diff --git a/compiler/rustc_target/Cargo.toml b/compiler/rustc_target/Cargo.toml
index a71e2e8cc..393e59e8b 100644
--- a/compiler/rustc_target/Cargo.toml
+++ b/compiler/rustc_target/Cargo.toml
@@ -16,6 +16,6 @@ rustc_serialize = { path = "../rustc_serialize" }
rustc_span = { path = "../rustc_span" }
[dependencies.object]
-version = "0.31.1"
+version = "0.32.0"
default-features = false
features = ["elf"]
diff --git a/compiler/rustc_target/src/abi/call/aarch64.rs b/compiler/rustc_target/src/abi/call/aarch64.rs
index a84988fa7..b4c7b0f12 100644
--- a/compiler/rustc_target/src/abi/call/aarch64.rs
+++ b/compiler/rustc_target/src/abi/call/aarch64.rs
@@ -1,25 +1,15 @@
use crate::abi::call::{ArgAbi, FnAbi, Reg, RegKind, Uniform};
use crate::abi::{HasDataLayout, TyAbiInterface};
-/// Given integer-types M and register width N (e.g. M=u16 and N=32 bits), the
-/// `ParamExtension` policy specifies how a uM value should be treated when
-/// passed via register or stack-slot of width N. See also rust-lang/rust#97463.
+/// Indicates the variant of the AArch64 ABI we are compiling for.
+/// Used to accommodate Apple and Microsoft's deviations from the usual AAPCS ABI.
+///
+/// Corresponds to Clang's `AArch64ABIInfo::ABIKind`.
#[derive(Copy, Clone, PartialEq)]
-pub enum ParamExtension {
- /// Indicates that when passing an i8/i16, either as a function argument or
- /// as a return value, it must be sign-extended to 32 bits, and likewise a
- /// u8/u16 must be zero-extended to 32-bits. (This variant is here to
- /// accommodate Apple's deviation from the usual AArch64 ABI as defined by
- /// ARM.)
- ///
- /// See also: <https://developer.apple.com/documentation/xcode/writing-arm64-code-for-apple-platforms#Pass-Arguments-to-Functions-Correctly>
- ExtendTo32Bits,
-
- /// Indicates that no sign- nor zero-extension is performed: if a value of
- /// type with bitwidth M is passed as function argument or return value,
- /// then M bits are copied into the least significant M bits, and the
- /// remaining bits of the register (or word of memory) are untouched.
- NoExtension,
+pub enum AbiKind {
+ AAPCS,
+ DarwinPCS,
+ Win64,
}
fn is_homogeneous_aggregate<'a, Ty, C>(cx: &C, arg: &mut ArgAbi<'a, Ty>) -> Option<Uniform>
@@ -45,15 +35,17 @@ where
})
}
-fn classify_ret<'a, Ty, C>(cx: &C, ret: &mut ArgAbi<'a, Ty>, param_policy: ParamExtension)
+fn classify_ret<'a, Ty, C>(cx: &C, ret: &mut ArgAbi<'a, Ty>, kind: AbiKind)
where
Ty: TyAbiInterface<'a, C> + Copy,
C: HasDataLayout,
{
if !ret.layout.is_aggregate() {
- match param_policy {
- ParamExtension::ExtendTo32Bits => ret.extend_integer_width_to(32),
- ParamExtension::NoExtension => {}
+ if kind == AbiKind::DarwinPCS {
+ // On Darwin, when returning an i8/i16, it must be sign-extended to 32 bits,
+ // and likewise a u8/u16 must be zero-extended to 32-bits.
+ // See also: <https://developer.apple.com/documentation/xcode/writing-arm64-code-for-apple-platforms#Pass-Arguments-to-Functions-Correctly>
+ ret.extend_integer_width_to(32)
}
return;
}
@@ -70,15 +62,17 @@ where
ret.make_indirect();
}
-fn classify_arg<'a, Ty, C>(cx: &C, arg: &mut ArgAbi<'a, Ty>, param_policy: ParamExtension)
+fn classify_arg<'a, Ty, C>(cx: &C, arg: &mut ArgAbi<'a, Ty>, kind: AbiKind)
where
Ty: TyAbiInterface<'a, C> + Copy,
C: HasDataLayout,
{
if !arg.layout.is_aggregate() {
- match param_policy {
- ParamExtension::ExtendTo32Bits => arg.extend_integer_width_to(32),
- ParamExtension::NoExtension => {}
+ if kind == AbiKind::DarwinPCS {
+ // On Darwin, when passing an i8/i16, it must be sign-extended to 32 bits,
+ // and likewise a u8/u16 must be zero-extended to 32-bits.
+ // See also: <https://developer.apple.com/documentation/xcode/writing-arm64-code-for-apple-platforms#Pass-Arguments-to-Functions-Correctly>
+ arg.extend_integer_width_to(32);
}
return;
}
@@ -87,27 +81,39 @@ where
return;
}
let size = arg.layout.size;
- let bits = size.bits();
- if bits <= 128 {
- arg.cast_to(Uniform { unit: Reg::i64(), total: size });
+ let align = if kind == AbiKind::AAPCS {
+ // When passing small aggregates by value, the AAPCS ABI mandates using the unadjusted
+ // alignment of the type (not including `repr(align)`).
+ // This matches behavior of `AArch64ABIInfo::classifyArgumentType` in Clang.
+ // See: <https://github.com/llvm/llvm-project/blob/5e691a1c9b0ad22689d4a434ddf4fed940e58dec/clang/lib/CodeGen/TargetInfo.cpp#L5816-L5823>
+ arg.layout.unadjusted_abi_align
+ } else {
+ arg.layout.align.abi
+ };
+ if size.bits() <= 128 {
+ if align.bits() == 128 {
+ arg.cast_to(Uniform { unit: Reg::i128(), total: size });
+ } else {
+ arg.cast_to(Uniform { unit: Reg::i64(), total: size });
+ }
return;
}
arg.make_indirect();
}
-pub fn compute_abi_info<'a, Ty, C>(cx: &C, fn_abi: &mut FnAbi<'a, Ty>, param_policy: ParamExtension)
+pub fn compute_abi_info<'a, Ty, C>(cx: &C, fn_abi: &mut FnAbi<'a, Ty>, kind: AbiKind)
where
Ty: TyAbiInterface<'a, C> + Copy,
C: HasDataLayout,
{
if !fn_abi.ret.is_ignore() {
- classify_ret(cx, &mut fn_abi.ret, param_policy);
+ classify_ret(cx, &mut fn_abi.ret, kind);
}
for arg in fn_abi.args.iter_mut() {
if arg.is_ignore() {
continue;
}
- classify_arg(cx, arg, param_policy);
+ classify_arg(cx, arg, kind);
}
}
diff --git a/compiler/rustc_target/src/abi/call/csky.rs b/compiler/rustc_target/src/abi/call/csky.rs
new file mode 100644
index 000000000..bbe95fa20
--- /dev/null
+++ b/compiler/rustc_target/src/abi/call/csky.rs
@@ -0,0 +1,31 @@
+// See https://github.com/llvm/llvm-project/blob/d85b94bf0080dcd780656c0f5e6342800720eba9/llvm/lib/Target/CSKY/CSKYCallingConv.td
+use crate::abi::call::{ArgAbi, FnAbi};
+
+fn classify_ret<Ty>(ret: &mut ArgAbi<'_, Ty>) {
+ if ret.layout.is_aggregate() || ret.layout.size.bits() > 64 {
+ ret.make_indirect();
+ } else {
+ ret.extend_integer_width_to(32);
+ }
+}
+
+fn classify_arg<Ty>(arg: &mut ArgAbi<'_, Ty>) {
+ if arg.layout.is_aggregate() || arg.layout.size.bits() > 64 {
+ arg.make_indirect();
+ } else {
+ arg.extend_integer_width_to(32);
+ }
+}
+
+pub fn compute_abi_info<Ty>(fn_abi: &mut FnAbi<'_, Ty>) {
+ if !fn_abi.ret.is_ignore() {
+ classify_ret(&mut fn_abi.ret);
+ }
+
+ for arg in fn_abi.args.iter_mut() {
+ if arg.is_ignore() {
+ continue;
+ }
+ classify_arg(arg);
+ }
+}
diff --git a/compiler/rustc_target/src/abi/call/m68k.rs b/compiler/rustc_target/src/abi/call/m68k.rs
index c1e0f54af..1d4649ed8 100644
--- a/compiler/rustc_target/src/abi/call/m68k.rs
+++ b/compiler/rustc_target/src/abi/call/m68k.rs
@@ -10,7 +10,7 @@ fn classify_ret<Ty>(ret: &mut ArgAbi<'_, Ty>) {
fn classify_arg<Ty>(arg: &mut ArgAbi<'_, Ty>) {
if arg.layout.is_aggregate() {
- arg.make_indirect_byval();
+ arg.make_indirect_byval(None);
} else {
arg.extend_integer_width_to(32);
}
diff --git a/compiler/rustc_target/src/abi/call/mod.rs b/compiler/rustc_target/src/abi/call/mod.rs
index c4abf6f4b..8fab13d5d 100644
--- a/compiler/rustc_target/src/abi/call/mod.rs
+++ b/compiler/rustc_target/src/abi/call/mod.rs
@@ -9,6 +9,7 @@ mod amdgpu;
mod arm;
mod avr;
mod bpf;
+mod csky;
mod hexagon;
mod loongarch;
mod m68k;
@@ -494,9 +495,7 @@ impl<'a, Ty> ArgAbi<'a, Ty> {
.set(ArgAttribute::NonNull)
.set(ArgAttribute::NoUndef);
attrs.pointee_size = layout.size;
- // FIXME(eddyb) We should be doing this, but at least on
- // i686-pc-windows-msvc, it results in wrong stack offsets.
- // attrs.pointee_align = Some(layout.align.abi);
+ attrs.pointee_align = Some(layout.align.abi);
let extra_attrs = layout.is_unsized().then_some(ArgAttributes::new());
@@ -513,11 +512,19 @@ impl<'a, Ty> ArgAbi<'a, Ty> {
self.mode = Self::indirect_pass_mode(&self.layout);
}
- pub fn make_indirect_byval(&mut self) {
+ pub fn make_indirect_byval(&mut self, byval_align: Option<Align>) {
self.make_indirect();
match self.mode {
- PassMode::Indirect { attrs: _, extra_attrs: _, ref mut on_stack } => {
+ PassMode::Indirect { ref mut attrs, extra_attrs: _, ref mut on_stack } => {
*on_stack = true;
+
+ // Some platforms, like 32-bit x86, change the alignment of the type when passing
+ // `byval`. Account for that.
+ if let Some(byval_align) = byval_align {
+ // On all targets with byval align this is currently true, so let's assert it.
+ debug_assert!(byval_align >= Align::from_bytes(4).unwrap());
+ attrs.pointee_align = Some(byval_align);
+ }
}
_ => unreachable!(),
}
@@ -597,6 +604,25 @@ pub enum Conv {
AmdGpuKernel,
AvrInterrupt,
AvrNonBlockingInterrupt,
+
+ RiscvInterrupt {
+ kind: RiscvInterruptKind,
+ },
+}
+
+#[derive(Copy, Clone, PartialEq, Eq, Hash, Debug, HashStable_Generic)]
+pub enum RiscvInterruptKind {
+ Machine,
+ Supervisor,
+}
+
+impl RiscvInterruptKind {
+ pub fn as_str(&self) -> &'static str {
+ match self {
+ Self::Machine => "machine",
+ Self::Supervisor => "supervisor",
+ }
+ }
}
/// Metadata describing how the arguments to a native function
@@ -644,7 +670,8 @@ impl<'a, Ty> FnAbi<'a, Ty> {
{
if abi == spec::abi::Abi::X86Interrupt {
if let Some(arg) = self.args.first_mut() {
- arg.make_indirect_byval();
+ // FIXME(pcwalton): This probably should use the x86 `byval` ABI...
+ arg.make_indirect_byval(None);
}
return Ok(());
}
@@ -672,20 +699,23 @@ impl<'a, Ty> FnAbi<'a, Ty> {
}
},
"aarch64" => {
- let param_policy = if cx.target_spec().is_like_osx {
- aarch64::ParamExtension::ExtendTo32Bits
+ let kind = if cx.target_spec().is_like_osx {
+ aarch64::AbiKind::DarwinPCS
+ } else if cx.target_spec().is_like_windows {
+ aarch64::AbiKind::Win64
} else {
- aarch64::ParamExtension::NoExtension
+ aarch64::AbiKind::AAPCS
};
- aarch64::compute_abi_info(cx, self, param_policy)
+ aarch64::compute_abi_info(cx, self, kind)
}
"amdgpu" => amdgpu::compute_abi_info(cx, self),
"arm" => arm::compute_abi_info(cx, self),
"avr" => avr::compute_abi_info(self),
"loongarch64" => loongarch::compute_abi_info(cx, self),
"m68k" => m68k::compute_abi_info(self),
- "mips" => mips::compute_abi_info(cx, self),
- "mips64" => mips64::compute_abi_info(cx, self),
+ "csky" => csky::compute_abi_info(self),
+ "mips" | "mips32r6" => mips::compute_abi_info(cx, self),
+ "mips64" | "mips64r6" => mips64::compute_abi_info(cx, self),
"powerpc" => powerpc::compute_abi_info(self),
"powerpc64" => powerpc64::compute_abi_info(cx, self),
"s390x" => s390x::compute_abi_info(cx, self),
@@ -744,6 +774,12 @@ impl FromStr for Conv {
"AmdGpuKernel" => Ok(Conv::AmdGpuKernel),
"AvrInterrupt" => Ok(Conv::AvrInterrupt),
"AvrNonBlockingInterrupt" => Ok(Conv::AvrNonBlockingInterrupt),
+ "RiscvInterrupt(machine)" => {
+ Ok(Conv::RiscvInterrupt { kind: RiscvInterruptKind::Machine })
+ }
+ "RiscvInterrupt(supervisor)" => {
+ Ok(Conv::RiscvInterrupt { kind: RiscvInterruptKind::Supervisor })
+ }
_ => Err(format!("'{s}' is not a valid value for entry function call convention.")),
}
}
diff --git a/compiler/rustc_target/src/abi/call/wasm.rs b/compiler/rustc_target/src/abi/call/wasm.rs
index 44427ee53..0eb2309ec 100644
--- a/compiler/rustc_target/src/abi/call/wasm.rs
+++ b/compiler/rustc_target/src/abi/call/wasm.rs
@@ -36,7 +36,7 @@ where
{
arg.extend_integer_width_to(32);
if arg.layout.is_aggregate() && !unwrap_trivial_aggregate(cx, arg) {
- arg.make_indirect_byval();
+ arg.make_indirect_byval(None);
}
}
diff --git a/compiler/rustc_target/src/abi/call/x86.rs b/compiler/rustc_target/src/abi/call/x86.rs
index 7c26335dc..b738c3133 100644
--- a/compiler/rustc_target/src/abi/call/x86.rs
+++ b/compiler/rustc_target/src/abi/call/x86.rs
@@ -1,5 +1,5 @@
use crate::abi::call::{ArgAttribute, FnAbi, PassMode, Reg, RegKind};
-use crate::abi::{HasDataLayout, TyAbiInterface};
+use crate::abi::{Abi, Align, HasDataLayout, TyAbiInterface, TyAndLayout};
use crate::spec::HasTargetSpec;
#[derive(PartialEq)]
@@ -53,8 +53,75 @@ where
if arg.is_ignore() {
continue;
}
- if arg.layout.is_aggregate() {
- arg.make_indirect_byval();
+
+ // FIXME: MSVC 2015+ will pass the first 3 vector arguments in [XYZ]MM0-2
+ // See https://reviews.llvm.org/D72114 for Clang behavior
+
+ let t = cx.target_spec();
+ let align_4 = Align::from_bytes(4).unwrap();
+ let align_16 = Align::from_bytes(16).unwrap();
+
+ if t.is_like_msvc
+ && arg.layout.is_adt()
+ && let Some(max_repr_align) = arg.layout.max_repr_align
+ && max_repr_align > align_4
+ {
+ // MSVC has special rules for overaligned arguments: https://reviews.llvm.org/D72114.
+ // Summarized here:
+ // - Arguments with _requested_ alignment > 4 are passed indirectly.
+ // - For backwards compatibility, arguments with natural alignment > 4 are still passed
+ // on stack (via `byval`). For example, this includes `double`, `int64_t`,
+ // and structs containing them, provided they lack an explicit alignment attribute.
+ assert!(arg.layout.align.abi >= max_repr_align,
+ "abi alignment {:?} less than requested alignment {max_repr_align:?}",
+ arg.layout.align.abi,
+ );
+ arg.make_indirect();
+ } else if arg.layout.is_aggregate() {
+ // We need to compute the alignment of the `byval` argument. The rules can be found in
+ // `X86_32ABIInfo::getTypeStackAlignInBytes` in Clang's `TargetInfo.cpp`. Summarized
+ // here, they are:
+ //
+ // 1. If the natural alignment of the type is <= 4, the alignment is 4.
+ //
+ // 2. Otherwise, on Linux, the alignment of any vector type is the natural alignment.
+ // This doesn't matter here because we only pass aggregates via `byval`, not vectors.
+ //
+ // 3. Otherwise, on Apple platforms, the alignment of anything that contains a vector
+ // type is 16.
+ //
+ // 4. If none of these conditions are true, the alignment is 4.
+
+ fn contains_vector<'a, Ty, C>(cx: &C, layout: TyAndLayout<'a, Ty>) -> bool
+ where
+ Ty: TyAbiInterface<'a, C> + Copy,
+ {
+ match layout.abi {
+ Abi::Uninhabited | Abi::Scalar(_) | Abi::ScalarPair(..) => false,
+ Abi::Vector { .. } => true,
+ Abi::Aggregate { .. } => {
+ for i in 0..layout.fields.count() {
+ if contains_vector(cx, layout.field(cx, i)) {
+ return true;
+ }
+ }
+ false
+ }
+ }
+ }
+
+ let byval_align = if arg.layout.align.abi < align_4 {
+ // (1.)
+ align_4
+ } else if t.is_like_osx && contains_vector(cx, arg.layout) {
+ // (3.)
+ align_16
+ } else {
+ // (4.)
+ align_4
+ };
+
+ arg.make_indirect_byval(Some(byval_align));
} else {
arg.extend_integer_width_to(32);
}
diff --git a/compiler/rustc_target/src/abi/call/x86_64.rs b/compiler/rustc_target/src/abi/call/x86_64.rs
index b1aefaf05..d1efe9776 100644
--- a/compiler/rustc_target/src/abi/call/x86_64.rs
+++ b/compiler/rustc_target/src/abi/call/x86_64.rs
@@ -213,7 +213,7 @@ where
match cls_or_mem {
Err(Memory) => {
if is_arg {
- arg.make_indirect_byval();
+ arg.make_indirect_byval(None);
} else {
// `sret` parameter thus one less integer register available
arg.make_indirect();
diff --git a/compiler/rustc_target/src/abi/mod.rs b/compiler/rustc_target/src/abi/mod.rs
index 589cd3cf9..dd435dbb0 100644
--- a/compiler/rustc_target/src/abi/mod.rs
+++ b/compiler/rustc_target/src/abi/mod.rs
@@ -39,7 +39,7 @@ impl<'a, Ty> Deref for TyAndLayout<'a, Ty> {
/// Trait that needs to be implemented by the higher-level type representation
/// (e.g. `rustc_middle::ty::Ty`), to provide `rustc_target::abi` functionality.
-pub trait TyAbiInterface<'a, C>: Sized {
+pub trait TyAbiInterface<'a, C>: Sized + std::fmt::Debug {
fn ty_and_layout_for_variant(
this: TyAndLayout<'a, Self>,
cx: &C,
@@ -135,29 +135,13 @@ impl<'a, Ty> TyAndLayout<'a, Ty> {
for index in indices {
offset += layout.fields.offset(index);
layout = layout.field(cx, index);
+ assert!(
+ layout.is_sized(),
+ "offset of unsized field (type {:?}) cannot be computed statically",
+ layout.ty
+ );
}
offset
}
}
-
-impl<'a, Ty> TyAndLayout<'a, Ty> {
- /// Returns `true` if the layout corresponds to an unsized type.
- pub fn is_unsized(&self) -> bool {
- self.abi.is_unsized()
- }
-
- #[inline]
- pub fn is_sized(&self) -> bool {
- self.abi.is_sized()
- }
-
- /// Returns `true` if the type is a ZST and not unsized.
- pub fn is_zst(&self) -> bool {
- match self.abi {
- Abi::Scalar(_) | Abi::ScalarPair(..) | Abi::Vector { .. } => false,
- Abi::Uninhabited => self.size.bytes() == 0,
- Abi::Aggregate { sized } => sized && self.size.bytes() == 0,
- }
- }
-}
diff --git a/compiler/rustc_target/src/asm/csky.rs b/compiler/rustc_target/src/asm/csky.rs
new file mode 100644
index 000000000..6f0e7f799
--- /dev/null
+++ b/compiler/rustc_target/src/asm/csky.rs
@@ -0,0 +1,128 @@
+use super::{InlineAsmArch, InlineAsmType};
+use rustc_macros::HashStable_Generic;
+use rustc_span::Symbol;
+use std::fmt;
+
+def_reg_class! {
+ CSKY CSKYInlineAsmRegClass {
+ reg,
+ freg,
+ }
+}
+
+impl CSKYInlineAsmRegClass {
+ pub fn valid_modifiers(self, _arch: super::InlineAsmArch) -> &'static [char] {
+ &[]
+ }
+
+ pub fn suggest_class(self, _arch: InlineAsmArch, _ty: InlineAsmType) -> Option<Self> {
+ None
+ }
+
+ pub fn suggest_modifier(
+ self,
+ _arch: InlineAsmArch,
+ _ty: InlineAsmType,
+ ) -> Option<(char, &'static str)> {
+ None
+ }
+
+ pub fn default_modifier(self, _arch: InlineAsmArch) -> Option<(char, &'static str)> {
+ None
+ }
+
+ pub fn supported_types(
+ self,
+ _arch: InlineAsmArch,
+ ) -> &'static [(InlineAsmType, Option<Symbol>)] {
+ match self {
+ Self::reg => types! { _: I8, I16, I32; },
+ Self::freg => types! { _: F32; },
+ }
+ }
+}
+
+// The reserved registers are taken from <https://github.com/llvm/llvm-project/blob/main/llvm/lib/Target/CSKY/CSKYRegisterInfo.cpp#79>
+def_regs! {
+ CSKY CSKYInlineAsmReg CSKYInlineAsmRegClass {
+ r0: reg = ["r0","a0"],
+ r1: reg = ["r1","a1"],
+ r2: reg = ["r2","a2"],
+ r3: reg = ["r3","a3"],
+ r4: reg = ["r4","l0"],
+ r5: reg = ["r5","l1"],
+ r6: reg = ["r6","l2"],
+ r9: reg = ["r9","l5"],// feature e2
+ r10: reg = ["r10","l6"],// feature e2
+ r11: reg = ["r11","l7"],// feature e2
+ r12: reg = ["r12","t0"],// feature e2
+ r13: reg = ["r13","t1"],// feature e2
+ r16: reg = ["r16","l8"],// feature high-register
+ r17: reg = ["r17","l9"],// feature high-register
+ r18: reg = ["r18","t2"],// feature high-register
+ r19: reg = ["r19","t3"],// feature high-register
+ r20: reg = ["r20","t4"],// feature high-register
+ r21: reg = ["r21","t5"],// feature high-register
+ r22: reg = ["r22","t6"],// feature high-register
+ r23: reg = ["r23","t7", "fp"],// feature high-register
+ r24: reg = ["r24","t8", "sop"],// feature high-register
+ r25: reg = ["r25","t9","tp", "bsp"],// feature high-register
+ f0: freg = ["fr0","vr0"],
+ f1: freg = ["fr1","vr1"],
+ f2: freg = ["fr2","vr2"],
+ f3: freg = ["fr3","vr3"],
+ f4: freg = ["fr4","vr4"],
+ f5: freg = ["fr5","vr5"],
+ f6: freg = ["fr6","vr6"],
+ f7: freg = ["fr7","vr7"],
+ f8: freg = ["fr8","vr8"],
+ f9: freg = ["fr9","vr9"],
+ f10: freg = ["fr10","vr10"],
+ f11: freg = ["fr11","vr11"],
+ f12: freg = ["fr12","vr12"],
+ f13: freg = ["fr13","vr13"],
+ f14: freg = ["fr14","vr14"],
+ f15: freg = ["fr15","vr15"],
+ f16: freg = ["fr16","vr16"],
+ f17: freg = ["fr17","vr17"],
+ f18: freg = ["fr18","vr18"],
+ f19: freg = ["fr19","vr19"],
+ f20: freg = ["fr20","vr20"],
+ f21: freg = ["fr21","vr21"],
+ f22: freg = ["fr22","vr22"],
+ f23: freg = ["fr23","vr23"],
+ f24: freg = ["fr24","vr24"],
+ f25: freg = ["fr25","vr25"],
+ f26: freg = ["fr26","vr26"],
+ f27: freg = ["fr27","vr27"],
+ f28: freg = ["fr28","vr28"],
+ f29: freg = ["fr29","vr29"],
+ f30: freg = ["fr30","vr30"],
+ f31: freg = ["fr31","vr31"],
+ #error = ["r7", "l3"] =>
+ "the base pointer cannot be used as an operand for inline asm",
+ #error = ["r8","l4"] =>
+ "the frame pointer cannot be used as an operand for inline asm",
+ #error = ["r14","sp"] =>
+ "the stack pointer cannot be used as an operand for inline asm",
+ #error = ["r15","lr"] =>
+ "the link register cannot be used as an operand for inline asm",
+ #error = ["r31","tls"] =>
+ "reserver for tls",
+ #error = ["r28", "gb", "rgb", "rdb"] =>
+ "the global pointer cannot be used as an operand for inline asm",
+ #error = ["r26","r27","r29","tb", "rtb", "r30","svbr"] =>
+ "reserved by the ABI",
+ }
+}
+
+impl CSKYInlineAsmReg {
+ pub fn emit(
+ self,
+ out: &mut dyn fmt::Write,
+ _arch: InlineAsmArch,
+ _modifier: Option<char>,
+ ) -> fmt::Result {
+ out.write_str(self.name())
+ }
+}
diff --git a/compiler/rustc_target/src/asm/mod.rs b/compiler/rustc_target/src/asm/mod.rs
index e60b8e78e..a11884bea 100644
--- a/compiler/rustc_target/src/asm/mod.rs
+++ b/compiler/rustc_target/src/asm/mod.rs
@@ -167,6 +167,7 @@ mod aarch64;
mod arm;
mod avr;
mod bpf;
+mod csky;
mod hexagon;
mod loongarch;
mod m68k;
@@ -184,6 +185,7 @@ pub use aarch64::{AArch64InlineAsmReg, AArch64InlineAsmRegClass};
pub use arm::{ArmInlineAsmReg, ArmInlineAsmRegClass};
pub use avr::{AvrInlineAsmReg, AvrInlineAsmRegClass};
pub use bpf::{BpfInlineAsmReg, BpfInlineAsmRegClass};
+pub use csky::{CSKYInlineAsmReg, CSKYInlineAsmRegClass};
pub use hexagon::{HexagonInlineAsmReg, HexagonInlineAsmRegClass};
pub use loongarch::{LoongArchInlineAsmReg, LoongArchInlineAsmRegClass};
pub use m68k::{M68kInlineAsmReg, M68kInlineAsmRegClass};
@@ -220,6 +222,7 @@ pub enum InlineAsmArch {
Avr,
Msp430,
M68k,
+ CSKY,
}
impl FromStr for InlineAsmArch {
@@ -238,8 +241,8 @@ impl FromStr for InlineAsmArch {
"powerpc64" => Ok(Self::PowerPC64),
"hexagon" => Ok(Self::Hexagon),
"loongarch64" => Ok(Self::LoongArch64),
- "mips" => Ok(Self::Mips),
- "mips64" => Ok(Self::Mips64),
+ "mips" | "mips32r6" => Ok(Self::Mips),
+ "mips64" | "mips64r6" => Ok(Self::Mips64),
"s390x" => Ok(Self::S390x),
"spirv" => Ok(Self::SpirV),
"wasm32" => Ok(Self::Wasm32),
@@ -248,6 +251,7 @@ impl FromStr for InlineAsmArch {
"avr" => Ok(Self::Avr),
"msp430" => Ok(Self::Msp430),
"m68k" => Ok(Self::M68k),
+ "csky" => Ok(Self::CSKY),
_ => Err(()),
}
}
@@ -272,6 +276,7 @@ pub enum InlineAsmReg {
Avr(AvrInlineAsmReg),
Msp430(Msp430InlineAsmReg),
M68k(M68kInlineAsmReg),
+ CSKY(CSKYInlineAsmReg),
// Placeholder for invalid register constraints for the current target
Err,
}
@@ -292,6 +297,7 @@ impl InlineAsmReg {
Self::Avr(r) => r.name(),
Self::Msp430(r) => r.name(),
Self::M68k(r) => r.name(),
+ Self::CSKY(r) => r.name(),
Self::Err => "<reg>",
}
}
@@ -311,6 +317,7 @@ impl InlineAsmReg {
Self::Avr(r) => InlineAsmRegClass::Avr(r.reg_class()),
Self::Msp430(r) => InlineAsmRegClass::Msp430(r.reg_class()),
Self::M68k(r) => InlineAsmRegClass::M68k(r.reg_class()),
+ Self::CSKY(r) => InlineAsmRegClass::CSKY(r.reg_class()),
Self::Err => InlineAsmRegClass::Err,
}
}
@@ -344,6 +351,7 @@ impl InlineAsmReg {
InlineAsmArch::Avr => Self::Avr(AvrInlineAsmReg::parse(name)?),
InlineAsmArch::Msp430 => Self::Msp430(Msp430InlineAsmReg::parse(name)?),
InlineAsmArch::M68k => Self::M68k(M68kInlineAsmReg::parse(name)?),
+ InlineAsmArch::CSKY => Self::CSKY(CSKYInlineAsmReg::parse(name)?),
})
}
@@ -371,6 +379,7 @@ impl InlineAsmReg {
Self::Avr(r) => r.validate(arch, reloc_model, target_features, target, is_clobber),
Self::Msp430(r) => r.validate(arch, reloc_model, target_features, target, is_clobber),
Self::M68k(r) => r.validate(arch, reloc_model, target_features, target, is_clobber),
+ Self::CSKY(r) => r.validate(arch, reloc_model, target_features, target, is_clobber),
Self::Err => unreachable!(),
}
}
@@ -397,6 +406,7 @@ impl InlineAsmReg {
Self::Avr(r) => r.emit(out, arch, modifier),
Self::Msp430(r) => r.emit(out, arch, modifier),
Self::M68k(r) => r.emit(out, arch, modifier),
+ Self::CSKY(r) => r.emit(out, arch, modifier),
Self::Err => unreachable!("Use of InlineAsmReg::Err"),
}
}
@@ -416,6 +426,7 @@ impl InlineAsmReg {
Self::Avr(r) => r.overlapping_regs(|r| cb(Self::Avr(r))),
Self::Msp430(_) => cb(self),
Self::M68k(_) => cb(self),
+ Self::CSKY(_) => cb(self),
Self::Err => unreachable!("Use of InlineAsmReg::Err"),
}
}
@@ -440,6 +451,7 @@ pub enum InlineAsmRegClass {
Avr(AvrInlineAsmRegClass),
Msp430(Msp430InlineAsmRegClass),
M68k(M68kInlineAsmRegClass),
+ CSKY(CSKYInlineAsmRegClass),
// Placeholder for invalid register constraints for the current target
Err,
}
@@ -463,6 +475,7 @@ impl InlineAsmRegClass {
Self::Avr(r) => r.name(),
Self::Msp430(r) => r.name(),
Self::M68k(r) => r.name(),
+ Self::CSKY(r) => r.name(),
Self::Err => rustc_span::symbol::sym::reg,
}
}
@@ -488,6 +501,7 @@ impl InlineAsmRegClass {
Self::Avr(r) => r.suggest_class(arch, ty).map(InlineAsmRegClass::Avr),
Self::Msp430(r) => r.suggest_class(arch, ty).map(InlineAsmRegClass::Msp430),
Self::M68k(r) => r.suggest_class(arch, ty).map(InlineAsmRegClass::M68k),
+ Self::CSKY(r) => r.suggest_class(arch, ty).map(InlineAsmRegClass::CSKY),
Self::Err => unreachable!("Use of InlineAsmRegClass::Err"),
}
}
@@ -520,6 +534,7 @@ impl InlineAsmRegClass {
Self::Avr(r) => r.suggest_modifier(arch, ty),
Self::Msp430(r) => r.suggest_modifier(arch, ty),
Self::M68k(r) => r.suggest_modifier(arch, ty),
+ Self::CSKY(r) => r.suggest_modifier(arch, ty),
Self::Err => unreachable!("Use of InlineAsmRegClass::Err"),
}
}
@@ -548,6 +563,7 @@ impl InlineAsmRegClass {
Self::Avr(r) => r.default_modifier(arch),
Self::Msp430(r) => r.default_modifier(arch),
Self::M68k(r) => r.default_modifier(arch),
+ Self::CSKY(r) => r.default_modifier(arch),
Self::Err => unreachable!("Use of InlineAsmRegClass::Err"),
}
}
@@ -575,6 +591,7 @@ impl InlineAsmRegClass {
Self::Avr(r) => r.supported_types(arch),
Self::Msp430(r) => r.supported_types(arch),
Self::M68k(r) => r.supported_types(arch),
+ Self::CSKY(r) => r.supported_types(arch),
Self::Err => unreachable!("Use of InlineAsmRegClass::Err"),
}
}
@@ -607,6 +624,7 @@ impl InlineAsmRegClass {
InlineAsmArch::Avr => Self::Avr(AvrInlineAsmRegClass::parse(name)?),
InlineAsmArch::Msp430 => Self::Msp430(Msp430InlineAsmRegClass::parse(name)?),
InlineAsmArch::M68k => Self::M68k(M68kInlineAsmRegClass::parse(name)?),
+ InlineAsmArch::CSKY => Self::CSKY(CSKYInlineAsmRegClass::parse(name)?),
})
}
@@ -630,6 +648,7 @@ impl InlineAsmRegClass {
Self::Avr(r) => r.valid_modifiers(arch),
Self::Msp430(r) => r.valid_modifiers(arch),
Self::M68k(r) => r.valid_modifiers(arch),
+ Self::CSKY(r) => r.valid_modifiers(arch),
Self::Err => unreachable!("Use of InlineAsmRegClass::Err"),
}
}
@@ -826,6 +845,11 @@ pub fn allocatable_registers(
m68k::fill_reg_map(arch, reloc_model, target_features, target, &mut map);
map
}
+ InlineAsmArch::CSKY => {
+ let mut map = csky::regclass_map();
+ csky::fill_reg_map(arch, reloc_model, target_features, target, &mut map);
+ map
+ }
}
}
diff --git a/compiler/rustc_target/src/json.rs b/compiler/rustc_target/src/json.rs
index 75bb76a9d..af455b643 100644
--- a/compiler/rustc_target/src/json.rs
+++ b/compiler/rustc_target/src/json.rs
@@ -92,6 +92,7 @@ impl<A: ToJson> ToJson for Option<A> {
impl ToJson for crate::abi::call::Conv {
fn to_json(&self) -> Json {
+ let buf: String;
let s = match self {
Self::C => "C",
Self::Rust => "Rust",
@@ -110,6 +111,10 @@ impl ToJson for crate::abi::call::Conv {
Self::AmdGpuKernel => "AmdGpuKernel",
Self::AvrInterrupt => "AvrInterrupt",
Self::AvrNonBlockingInterrupt => "AvrNonBlockingInterrupt",
+ Self::RiscvInterrupt { kind } => {
+ buf = format!("RiscvInterrupt({})", kind.as_str());
+ &buf
+ }
};
Json::String(s.to_owned())
}
diff --git a/compiler/rustc_target/src/lib.rs b/compiler/rustc_target/src/lib.rs
index a7b54766b..b52002b12 100644
--- a/compiler/rustc_target/src/lib.rs
+++ b/compiler/rustc_target/src/lib.rs
@@ -12,12 +12,14 @@
#![feature(associated_type_bounds)]
#![feature(exhaustive_patterns)]
#![feature(iter_intersperse)]
+#![feature(let_chains)]
#![feature(min_specialization)]
#![feature(never_type)]
#![feature(rustc_attrs)]
#![feature(step_trait)]
#![deny(rustc::untranslatable_diagnostic)]
#![deny(rustc::diagnostic_outside_of_impl)]
+#![cfg_attr(not(bootstrap), allow(internal_features))]
use std::path::{Path, PathBuf};
diff --git a/compiler/rustc_target/src/spec/aarch64_unknown_hermit.rs b/compiler/rustc_target/src/spec/aarch64_unknown_hermit.rs
index 87e8d6270..2414867be 100644
--- a/compiler/rustc_target/src/spec/aarch64_unknown_hermit.rs
+++ b/compiler/rustc_target/src/spec/aarch64_unknown_hermit.rs
@@ -1,15 +1,15 @@
-use crate::spec::Target;
+use crate::spec::{Target, TargetOptions};
pub fn target() -> Target {
- let mut base = super::hermit_base::opts();
- base.max_atomic_width = Some(128);
- base.features = "+v8a,+strict-align,+neon,+fp-armv8".into();
-
Target {
llvm_target: "aarch64-unknown-hermit".into(),
pointer_width: 64,
- data_layout: "e-m:e-i8:8:32-i16:16:32-i64:64-i128:128-n32:64-S128".into(),
arch: "aarch64".into(),
- options: base,
+ data_layout: "e-m:e-i8:8:32-i16:16:32-i64:64-i128:128-n32:64-S128".into(),
+ options: TargetOptions {
+ features: "+v8a,+strict-align,+neon,+fp-armv8".into(),
+ max_atomic_width: Some(128),
+ ..super::hermit_base::opts()
+ },
}
}
diff --git a/compiler/rustc_target/src/spec/aarch64_unknown_linux_ohos.rs b/compiler/rustc_target/src/spec/aarch64_unknown_linux_ohos.rs
index bf1b089f6..c8f3db00e 100644
--- a/compiler/rustc_target/src/spec/aarch64_unknown_linux_ohos.rs
+++ b/compiler/rustc_target/src/spec/aarch64_unknown_linux_ohos.rs
@@ -3,9 +3,7 @@ use crate::spec::{Target, TargetOptions};
use super::SanitizerSet;
pub fn target() -> Target {
- let mut base = super::linux_musl_base::opts();
- base.env = "ohos".into();
- base.crt_static_default = false;
+ let mut base = super::linux_ohos_base::opts();
base.max_atomic_width = Some(128);
Target {
@@ -17,8 +15,6 @@ pub fn target() -> Target {
options: TargetOptions {
features: "+reserve-x18".into(),
mcount: "\u{1}_mcount".into(),
- force_emulated_tls: true,
- has_thread_local: false,
supported_sanitizers: SanitizerSet::ADDRESS
| SanitizerSet::CFI
| SanitizerSet::LEAK
diff --git a/compiler/rustc_target/src/spec/aarch64_unknown_teeos.rs b/compiler/rustc_target/src/spec/aarch64_unknown_teeos.rs
new file mode 100644
index 000000000..64a7dc681
--- /dev/null
+++ b/compiler/rustc_target/src/spec/aarch64_unknown_teeos.rs
@@ -0,0 +1,16 @@
+use crate::spec::Target;
+
+pub fn target() -> Target {
+ let mut base = super::teeos_base::opts();
+ base.features = "+strict-align,+neon,+fp-armv8".into();
+ base.max_atomic_width = Some(128);
+ base.linker = Some("aarch64-linux-gnu-ld".into());
+
+ Target {
+ llvm_target: "aarch64-unknown-none".into(),
+ pointer_width: 64,
+ data_layout: "e-m:e-i8:8:32-i16:16:32-i64:64-i128:128-n32:64-S128".into(),
+ arch: "aarch64".into(),
+ options: base,
+ }
+}
diff --git a/compiler/rustc_target/src/spec/abi.rs b/compiler/rustc_target/src/spec/abi.rs
index eb3f66ac3..550cdf6bd 100644
--- a/compiler/rustc_target/src/spec/abi.rs
+++ b/compiler/rustc_target/src/spec/abi.rs
@@ -38,6 +38,8 @@ pub enum Abi {
PlatformIntrinsic,
Unadjusted,
RustCold,
+ RiscvInterruptM,
+ RiscvInterruptS,
}
impl Abi {
@@ -107,11 +109,29 @@ const AbiDatas: &[AbiData] = &[
AbiData { abi: Abi::PlatformIntrinsic, name: "platform-intrinsic" },
AbiData { abi: Abi::Unadjusted, name: "unadjusted" },
AbiData { abi: Abi::RustCold, name: "rust-cold" },
+ AbiData { abi: Abi::RiscvInterruptM, name: "riscv-interrupt-m" },
+ AbiData { abi: Abi::RiscvInterruptS, name: "riscv-interrupt-s" },
];
+#[derive(Copy, Clone, Debug)]
+pub enum AbiUnsupported {
+ Unrecognized,
+ Reason { explain: &'static str },
+}
+
/// Returns the ABI with the given name (if any).
-pub fn lookup(name: &str) -> Option<Abi> {
- AbiDatas.iter().find(|abi_data| name == abi_data.name).map(|&x| x.abi)
+pub fn lookup(name: &str) -> Result<Abi, AbiUnsupported> {
+ AbiDatas.iter().find(|abi_data| name == abi_data.name).map(|&x| x.abi).ok_or_else(|| match name {
+ "riscv-interrupt" => AbiUnsupported::Reason {
+ explain: "please use one of riscv-interrupt-m or riscv-interrupt-s for machine- or supervisor-level interrupts, respectively",
+ },
+ "riscv-interrupt-u" => AbiUnsupported::Reason {
+ explain: "user-mode interrupt handlers have been removed from LLVM pending standardization, see: https://reviews.llvm.org/D149314",
+ },
+
+ _ => AbiUnsupported::Unrecognized,
+
+ })
}
pub fn all_names() -> Vec<&'static str> {
@@ -150,7 +170,8 @@ pub fn is_stable(name: &str) -> Result<(), AbiDisabled> {
// Stable
"Rust" | "C" | "C-unwind" | "cdecl" | "cdecl-unwind" | "stdcall" | "stdcall-unwind"
| "fastcall" | "fastcall-unwind" | "aapcs" | "aapcs-unwind" | "win64" | "win64-unwind"
- | "sysv64" | "sysv64-unwind" | "system" | "system-unwind" | "efiapi" => Ok(()),
+ | "sysv64" | "sysv64-unwind" | "system" | "system-unwind" | "efiapi" | "thiscall"
+ | "thiscall-unwind" => Ok(()),
"rust-intrinsic" => Err(AbiDisabled::Unstable {
feature: sym::intrinsics,
explain: "intrinsics are subject to change",
@@ -167,14 +188,6 @@ pub fn is_stable(name: &str) -> Result<(), AbiDisabled> {
feature: sym::abi_vectorcall,
explain: "vectorcall-unwind ABI is experimental and subject to change",
}),
- "thiscall" => Err(AbiDisabled::Unstable {
- feature: sym::abi_thiscall,
- explain: "thiscall is experimental and subject to change",
- }),
- "thiscall-unwind" => Err(AbiDisabled::Unstable {
- feature: sym::abi_thiscall,
- explain: "thiscall-unwind ABI is experimental and subject to change",
- }),
"rust-call" => Err(AbiDisabled::Unstable {
feature: sym::unboxed_closures,
explain: "rust-call ABI is subject to change",
@@ -207,6 +220,10 @@ pub fn is_stable(name: &str) -> Result<(), AbiDisabled> {
feature: sym::abi_avr_interrupt,
explain: "avr-interrupt and avr-non-blocking-interrupt ABIs are experimental and subject to change",
}),
+ "riscv-interrupt-m" | "riscv-interrupt-s" => Err(AbiDisabled::Unstable {
+ feature: sym::abi_riscv_interrupt,
+ explain: "riscv-interrupt ABIs are experimental and subject to change",
+ }),
"C-cmse-nonsecure-call" => Err(AbiDisabled::Unstable {
feature: sym::abi_c_cmse_nonsecure_call,
explain: "C-cmse-nonsecure-call ABI is experimental and subject to change",
@@ -267,6 +284,8 @@ impl Abi {
PlatformIntrinsic => 32,
Unadjusted => 33,
RustCold => 34,
+ RiscvInterruptM => 35,
+ RiscvInterruptS => 36,
};
debug_assert!(
AbiDatas
diff --git a/compiler/rustc_target/src/spec/abi/tests.rs b/compiler/rustc_target/src/spec/abi/tests.rs
index 8bea5e5ef..251a12fe7 100644
--- a/compiler/rustc_target/src/spec/abi/tests.rs
+++ b/compiler/rustc_target/src/spec/abi/tests.rs
@@ -4,19 +4,19 @@ use super::*;
#[test]
fn lookup_Rust() {
let abi = lookup("Rust");
- assert!(abi.is_some() && abi.unwrap().data().name == "Rust");
+ assert!(abi.is_ok() && abi.unwrap().data().name == "Rust");
}
#[test]
fn lookup_cdecl() {
let abi = lookup("cdecl");
- assert!(abi.is_some() && abi.unwrap().data().name == "cdecl");
+ assert!(abi.is_ok() && abi.unwrap().data().name == "cdecl");
}
#[test]
fn lookup_baz() {
let abi = lookup("baz");
- assert!(abi.is_none());
+ assert!(matches!(abi, Err(AbiUnsupported::Unrecognized)))
}
#[test]
diff --git a/compiler/rustc_target/src/spec/arm_unknown_linux_gnueabi.rs b/compiler/rustc_target/src/spec/arm_unknown_linux_gnueabi.rs
index c0f1827ad..400030ca0 100644
--- a/compiler/rustc_target/src/spec/arm_unknown_linux_gnueabi.rs
+++ b/compiler/rustc_target/src/spec/arm_unknown_linux_gnueabi.rs
@@ -11,6 +11,7 @@ pub fn target() -> Target {
features: "+strict-align,+v6".into(),
max_atomic_width: Some(64),
mcount: "\u{1}__gnu_mcount_nc".into(),
+ llvm_mcount_intrinsic: Some("llvm.arm.gnu.eabi.mcount".into()),
..super::linux_gnu_base::opts()
},
}
diff --git a/compiler/rustc_target/src/spec/arm_unknown_linux_gnueabihf.rs b/compiler/rustc_target/src/spec/arm_unknown_linux_gnueabihf.rs
index 79b8958c2..6228fb15a 100644
--- a/compiler/rustc_target/src/spec/arm_unknown_linux_gnueabihf.rs
+++ b/compiler/rustc_target/src/spec/arm_unknown_linux_gnueabihf.rs
@@ -11,6 +11,7 @@ pub fn target() -> Target {
features: "+strict-align,+v6,+vfp2,-d32".into(),
max_atomic_width: Some(64),
mcount: "\u{1}__gnu_mcount_nc".into(),
+ llvm_mcount_intrinsic: Some("llvm.arm.gnu.eabi.mcount".into()),
..super::linux_gnu_base::opts()
},
}
diff --git a/compiler/rustc_target/src/spec/armeb_unknown_linux_gnueabi.rs b/compiler/rustc_target/src/spec/armeb_unknown_linux_gnueabi.rs
index 4836f3cf7..1d66515a7 100644
--- a/compiler/rustc_target/src/spec/armeb_unknown_linux_gnueabi.rs
+++ b/compiler/rustc_target/src/spec/armeb_unknown_linux_gnueabi.rs
@@ -13,6 +13,7 @@ pub fn target() -> Target {
endian: Endian::Big,
max_atomic_width: Some(64),
mcount: "\u{1}__gnu_mcount_nc".into(),
+ llvm_mcount_intrinsic: Some("llvm.arm.gnu.eabi.mcount".into()),
..super::linux_gnu_base::opts()
},
}
diff --git a/compiler/rustc_target/src/spec/armebv7r_none_eabi.rs b/compiler/rustc_target/src/spec/armebv7r_none_eabi.rs
index 5632bcfce..446efa90d 100644
--- a/compiler/rustc_target/src/spec/armebv7r_none_eabi.rs
+++ b/compiler/rustc_target/src/spec/armebv7r_none_eabi.rs
@@ -5,7 +5,7 @@ use crate::spec::{Cc, LinkerFlavor, Lld, PanicStrategy, RelocModel, Target, Targ
pub fn target() -> Target {
Target {
- llvm_target: "armebv7r-unknown-none-eabi".into(),
+ llvm_target: "armebv7r-none-eabi".into(),
pointer_width: 32,
data_layout: "E-m:e-p:32:32-Fi8-i64:64-v128:64:128-a:0:32-n32-S64".into(),
arch: "arm".into(),
@@ -18,7 +18,7 @@ pub fn target() -> Target {
panic_strategy: PanicStrategy::Abort,
max_atomic_width: Some(64),
emit_debug_gdb_scripts: false,
- // GCC and Clang default to 8 for arm-none here
+ // GCC defaults to 8 for arm-none here.
c_enum_min_bits: Some(8),
..Default::default()
},
diff --git a/compiler/rustc_target/src/spec/armebv7r_none_eabihf.rs b/compiler/rustc_target/src/spec/armebv7r_none_eabihf.rs
index 2815de358..0c9e99ff8 100644
--- a/compiler/rustc_target/src/spec/armebv7r_none_eabihf.rs
+++ b/compiler/rustc_target/src/spec/armebv7r_none_eabihf.rs
@@ -5,7 +5,7 @@ use crate::spec::{Cc, LinkerFlavor, Lld, PanicStrategy, RelocModel, Target, Targ
pub fn target() -> Target {
Target {
- llvm_target: "armebv7r-unknown-none-eabihf".into(),
+ llvm_target: "armebv7r-none-eabihf".into(),
pointer_width: 32,
data_layout: "E-m:e-p:32:32-Fi8-i64:64-v128:64:128-a:0:32-n32-S64".into(),
arch: "arm".into(),
@@ -19,7 +19,7 @@ pub fn target() -> Target {
features: "+vfp3,-d32,-fp16".into(),
max_atomic_width: Some(64),
emit_debug_gdb_scripts: false,
- // GCC and Clang default to 8 for arm-none here
+ // GCC defaults to 8 for arm-none here.
c_enum_min_bits: Some(8),
..Default::default()
},
diff --git a/compiler/rustc_target/src/spec/armv4t_none_eabi.rs b/compiler/rustc_target/src/spec/armv4t_none_eabi.rs
index d0f988b27..44fdd3178 100644
--- a/compiler/rustc_target/src/spec/armv4t_none_eabi.rs
+++ b/compiler/rustc_target/src/spec/armv4t_none_eabi.rs
@@ -39,9 +39,9 @@ pub fn target() -> Target {
has_thumb_interworking: true,
relocation_model: RelocModel::Static,
panic_strategy: PanicStrategy::Abort,
- // from thumb_base, rust-lang/rust#44993.
+ // From thumb_base, rust-lang/rust#44993.
emit_debug_gdb_scripts: false,
- // from thumb_base, apparently gcc/clang give enums a minimum of 8 bits on no-os targets
+ // From thumb_base, GCC gives enums a minimum of 8 bits on no-os targets.
c_enum_min_bits: Some(8),
..Default::default()
},
diff --git a/compiler/rustc_target/src/spec/armv4t_unknown_linux_gnueabi.rs b/compiler/rustc_target/src/spec/armv4t_unknown_linux_gnueabi.rs
index 1de63a920..cffebcc95 100644
--- a/compiler/rustc_target/src/spec/armv4t_unknown_linux_gnueabi.rs
+++ b/compiler/rustc_target/src/spec/armv4t_unknown_linux_gnueabi.rs
@@ -12,6 +12,7 @@ pub fn target() -> Target {
// Atomic operations provided by compiler-builtins
max_atomic_width: Some(32),
mcount: "\u{1}__gnu_mcount_nc".into(),
+ llvm_mcount_intrinsic: Some("llvm.arm.gnu.eabi.mcount".into()),
has_thumb_interworking: true,
..super::linux_gnu_base::opts()
},
diff --git a/compiler/rustc_target/src/spec/armv6_unknown_freebsd.rs b/compiler/rustc_target/src/spec/armv6_unknown_freebsd.rs
index b7cfccc8b..4a8aa3157 100644
--- a/compiler/rustc_target/src/spec/armv6_unknown_freebsd.rs
+++ b/compiler/rustc_target/src/spec/armv6_unknown_freebsd.rs
@@ -13,6 +13,7 @@ pub fn target() -> Target {
features: "+v6,+vfp2,-d32".into(),
max_atomic_width: Some(64),
mcount: "\u{1}__gnu_mcount_nc".into(),
+ llvm_mcount_intrinsic: Some("llvm.arm.gnu.eabi.mcount".into()),
..super::freebsd_base::opts()
},
}
diff --git a/compiler/rustc_target/src/spec/armv7_unknown_linux_gnueabi.rs b/compiler/rustc_target/src/spec/armv7_unknown_linux_gnueabi.rs
index 903042d7e..73ae212a7 100644
--- a/compiler/rustc_target/src/spec/armv7_unknown_linux_gnueabi.rs
+++ b/compiler/rustc_target/src/spec/armv7_unknown_linux_gnueabi.rs
@@ -14,6 +14,7 @@ pub fn target() -> Target {
features: "+v7,+thumb2,+soft-float,-neon".into(),
max_atomic_width: Some(64),
mcount: "\u{1}__gnu_mcount_nc".into(),
+ llvm_mcount_intrinsic: Some("llvm.arm.gnu.eabi.mcount".into()),
..super::linux_gnu_base::opts()
},
}
diff --git a/compiler/rustc_target/src/spec/armv7_unknown_linux_ohos.rs b/compiler/rustc_target/src/spec/armv7_unknown_linux_ohos.rs
index 16da24533..e9b0bda68 100644
--- a/compiler/rustc_target/src/spec/armv7_unknown_linux_ohos.rs
+++ b/compiler/rustc_target/src/spec/armv7_unknown_linux_ohos.rs
@@ -17,12 +17,8 @@ pub fn target() -> Target {
abi: "eabi".into(),
features: "+v7,+thumb2,+soft-float,-neon".into(),
max_atomic_width: Some(64),
- env: "ohos".into(),
- crt_static_default: false,
mcount: "\u{1}mcount".into(),
- force_emulated_tls: true,
- has_thread_local: false,
- ..super::linux_musl_base::opts()
+ ..super::linux_ohos_base::opts()
},
}
}
diff --git a/compiler/rustc_target/src/spec/armv7a_none_eabihf.rs b/compiler/rustc_target/src/spec/armv7a_none_eabihf.rs
index 8cdf3c36b..c134f3e09 100644
--- a/compiler/rustc_target/src/spec/armv7a_none_eabihf.rs
+++ b/compiler/rustc_target/src/spec/armv7a_none_eabihf.rs
@@ -18,7 +18,7 @@ pub fn target() -> Target {
max_atomic_width: Some(64),
panic_strategy: PanicStrategy::Abort,
emit_debug_gdb_scripts: false,
- // GCC and Clang default to 8 for arm-none here
+ // GCC defaults to 8 for arm-none here.
c_enum_min_bits: Some(8),
..Default::default()
};
diff --git a/compiler/rustc_target/src/spec/armv7r_none_eabi.rs b/compiler/rustc_target/src/spec/armv7r_none_eabi.rs
index 74905ed5a..68b252798 100644
--- a/compiler/rustc_target/src/spec/armv7r_none_eabi.rs
+++ b/compiler/rustc_target/src/spec/armv7r_none_eabi.rs
@@ -4,7 +4,7 @@ use crate::spec::{Cc, LinkerFlavor, Lld, PanicStrategy, RelocModel, Target, Targ
pub fn target() -> Target {
Target {
- llvm_target: "armv7r-unknown-none-eabi".into(),
+ llvm_target: "armv7r-none-eabi".into(),
pointer_width: 32,
data_layout: "e-m:e-p:32:32-Fi8-i64:64-v128:64:128-a:0:32-n32-S64".into(),
arch: "arm".into(),
@@ -17,7 +17,7 @@ pub fn target() -> Target {
panic_strategy: PanicStrategy::Abort,
max_atomic_width: Some(64),
emit_debug_gdb_scripts: false,
- // GCC and Clang default to 8 for arm-none here
+ // GCC defaults to 8 for arm-none here.
c_enum_min_bits: Some(8),
..Default::default()
},
diff --git a/compiler/rustc_target/src/spec/armv7r_none_eabihf.rs b/compiler/rustc_target/src/spec/armv7r_none_eabihf.rs
index 516b3f5c1..909765a31 100644
--- a/compiler/rustc_target/src/spec/armv7r_none_eabihf.rs
+++ b/compiler/rustc_target/src/spec/armv7r_none_eabihf.rs
@@ -4,7 +4,7 @@ use crate::spec::{Cc, LinkerFlavor, Lld, PanicStrategy, RelocModel, Target, Targ
pub fn target() -> Target {
Target {
- llvm_target: "armv7r-unknown-none-eabihf".into(),
+ llvm_target: "armv7r-none-eabihf".into(),
pointer_width: 32,
data_layout: "e-m:e-p:32:32-Fi8-i64:64-v128:64:128-a:0:32-n32-S64".into(),
arch: "arm".into(),
@@ -18,7 +18,7 @@ pub fn target() -> Target {
features: "+vfp3,-d32,-fp16".into(),
max_atomic_width: Some(64),
emit_debug_gdb_scripts: false,
- // GCC and Clang default to 8 for arm-none here
+ // GCC defaults to 8 for arm-none here.
c_enum_min_bits: Some(8),
..Default::default()
},
diff --git a/compiler/rustc_target/src/spec/avr_gnu_base.rs b/compiler/rustc_target/src/spec/avr_gnu_base.rs
index fbec44b71..cd324c94b 100644
--- a/compiler/rustc_target/src/spec/avr_gnu_base.rs
+++ b/compiler/rustc_target/src/spec/avr_gnu_base.rs
@@ -23,7 +23,7 @@ pub fn target(target_cpu: &'static str, mmcu: &'static str) -> Target {
LinkerFlavor::Gnu(Cc::Yes, Lld::No),
&["-lgcc"],
),
- max_atomic_width: Some(0),
+ max_atomic_width: Some(16),
atomic_cas: false,
relocation_model: RelocModel::Static,
..TargetOptions::default()
diff --git a/compiler/rustc_target/src/spec/csky_unknown_linux_gnuabiv2.rs b/compiler/rustc_target/src/spec/csky_unknown_linux_gnuabiv2.rs
new file mode 100644
index 000000000..7d03dd26f
--- /dev/null
+++ b/compiler/rustc_target/src/spec/csky_unknown_linux_gnuabiv2.rs
@@ -0,0 +1,20 @@
+use crate::spec::{Cc, LinkerFlavor, Lld, Target, TargetOptions};
+
+// This target is for glibc Linux on Csky
+
+pub fn target() -> Target {
+ Target {
+ //https://github.com/llvm/llvm-project/blob/8b76aea8d8b1b71f6220bc2845abc749f18a19b7/clang/lib/Basic/Targets/CSKY.h
+ llvm_target: "csky-unknown-linux-gnuabiv2".into(),
+ pointer_width: 32,
+ data_layout: "e-m:e-S32-p:32:32-i32:32:32-i64:32:32-f32:32:32-f64:32:32-v64:32:32-v128:32:32-a:0:32-Fi32-n32".into(),
+ arch: "csky".into(),
+ options: TargetOptions {
+ abi: "abiv2".into(),
+ features: "+2e3,+3e7,+7e10,+cache,+dsp1e2,+dspe60,+e1,+e2,+edsp,+elrw,+hard-tp,+high-registers,+hwdiv,+mp,+mp1e2,+nvic,+trust".into(),
+ late_link_args_static: TargetOptions::link_args(LinkerFlavor::Gnu(Cc::Yes, Lld::No), &["-l:libatomic.a"]),
+ max_atomic_width: Some(32),
+ ..super::linux_gnu_base::opts()
+ },
+ }
+}
diff --git a/compiler/rustc_target/src/spec/hermit_base.rs b/compiler/rustc_target/src/spec/hermit_base.rs
index dd9991381..c6e98fc1a 100644
--- a/compiler/rustc_target/src/spec/hermit_base.rs
+++ b/compiler/rustc_target/src/spec/hermit_base.rs
@@ -1,21 +1,15 @@
use crate::spec::{Cc, LinkerFlavor, Lld, PanicStrategy, TargetOptions, TlsModel};
pub fn opts() -> TargetOptions {
- let pre_link_args = TargetOptions::link_args(
- LinkerFlavor::Gnu(Cc::No, Lld::No),
- &["--build-id", "--hash-style=gnu", "--Bstatic"],
- );
-
TargetOptions {
os: "hermit".into(),
- linker_flavor: LinkerFlavor::Gnu(Cc::No, Lld::Yes),
linker: Some("rust-lld".into()),
- has_thread_local: true,
- pre_link_args,
- panic_strategy: PanicStrategy::Abort,
+ linker_flavor: LinkerFlavor::Gnu(Cc::No, Lld::Yes),
+ tls_model: TlsModel::InitialExec,
position_independent_executables: true,
static_position_independent_executables: true,
- tls_model: TlsModel::InitialExec,
+ has_thread_local: true,
+ panic_strategy: PanicStrategy::Abort,
..Default::default()
}
}
diff --git a/compiler/rustc_target/src/spec/linux_ohos_base.rs b/compiler/rustc_target/src/spec/linux_ohos_base.rs
new file mode 100644
index 000000000..4ad4c8373
--- /dev/null
+++ b/compiler/rustc_target/src/spec/linux_ohos_base.rs
@@ -0,0 +1,12 @@
+use crate::spec::TargetOptions;
+
+pub fn opts() -> TargetOptions {
+ let mut base = super::linux_base::opts();
+
+ base.env = "ohos".into();
+ base.crt_static_default = false;
+ base.force_emulated_tls = true;
+ base.has_thread_local = false;
+
+ base
+}
diff --git a/compiler/rustc_target/src/spec/loongarch64_unknown_none.rs b/compiler/rustc_target/src/spec/loongarch64_unknown_none.rs
index 209d481d6..dbc96d68e 100644
--- a/compiler/rustc_target/src/spec/loongarch64_unknown_none.rs
+++ b/compiler/rustc_target/src/spec/loongarch64_unknown_none.rs
@@ -10,7 +10,8 @@ pub fn target() -> Target {
options: TargetOptions {
cpu: "generic".into(),
features: "+f,+d".into(),
- linker_flavor: LinkerFlavor::Gnu(Cc::No, Lld::No),
+ linker_flavor: LinkerFlavor::Gnu(Cc::No, Lld::Yes),
+ linker: Some("rust-lld".into()),
llvm_abiname: "lp64d".into(),
max_atomic_width: Some(64),
relocation_model: RelocModel::Static,
diff --git a/compiler/rustc_target/src/spec/loongarch64_unknown_none_softfloat.rs b/compiler/rustc_target/src/spec/loongarch64_unknown_none_softfloat.rs
index f444a7f24..c4d5c7bc4 100644
--- a/compiler/rustc_target/src/spec/loongarch64_unknown_none_softfloat.rs
+++ b/compiler/rustc_target/src/spec/loongarch64_unknown_none_softfloat.rs
@@ -11,7 +11,8 @@ pub fn target() -> Target {
cpu: "generic".into(),
features: "-f,-d".into(),
abi: "softfloat".into(),
- linker_flavor: LinkerFlavor::Gnu(Cc::No, Lld::No),
+ linker_flavor: LinkerFlavor::Gnu(Cc::No, Lld::Yes),
+ linker: Some("rust-lld".into()),
llvm_abiname: "lp64s".into(),
max_atomic_width: Some(64),
relocation_model: RelocModel::Static,
diff --git a/compiler/rustc_target/src/spec/mipsisa32r6_unknown_linux_gnu.rs b/compiler/rustc_target/src/spec/mipsisa32r6_unknown_linux_gnu.rs
index 1e066b271..983a449b0 100644
--- a/compiler/rustc_target/src/spec/mipsisa32r6_unknown_linux_gnu.rs
+++ b/compiler/rustc_target/src/spec/mipsisa32r6_unknown_linux_gnu.rs
@@ -6,7 +6,7 @@ pub fn target() -> Target {
llvm_target: "mipsisa32r6-unknown-linux-gnu".into(),
pointer_width: 32,
data_layout: "E-m:m-p:32:32-i8:8:32-i16:16:32-i64:64-n32-S64".into(),
- arch: "mips".into(),
+ arch: "mips32r6".into(),
options: TargetOptions {
endian: Endian::Big,
cpu: "mips32r6".into(),
diff --git a/compiler/rustc_target/src/spec/mipsisa32r6el_unknown_linux_gnu.rs b/compiler/rustc_target/src/spec/mipsisa32r6el_unknown_linux_gnu.rs
index 4785929c1..ec0facdfb 100644
--- a/compiler/rustc_target/src/spec/mipsisa32r6el_unknown_linux_gnu.rs
+++ b/compiler/rustc_target/src/spec/mipsisa32r6el_unknown_linux_gnu.rs
@@ -5,7 +5,7 @@ pub fn target() -> Target {
llvm_target: "mipsisa32r6el-unknown-linux-gnu".into(),
pointer_width: 32,
data_layout: "e-m:m-p:32:32-i8:8:32-i16:16:32-i64:64-n32-S64".into(),
- arch: "mips".into(),
+ arch: "mips32r6".into(),
options: TargetOptions {
cpu: "mips32r6".into(),
diff --git a/compiler/rustc_target/src/spec/mipsisa64r6_unknown_linux_gnuabi64.rs b/compiler/rustc_target/src/spec/mipsisa64r6_unknown_linux_gnuabi64.rs
index 766ac7680..16dd1c416 100644
--- a/compiler/rustc_target/src/spec/mipsisa64r6_unknown_linux_gnuabi64.rs
+++ b/compiler/rustc_target/src/spec/mipsisa64r6_unknown_linux_gnuabi64.rs
@@ -6,7 +6,7 @@ pub fn target() -> Target {
llvm_target: "mipsisa64r6-unknown-linux-gnuabi64".into(),
pointer_width: 64,
data_layout: "E-m:e-i8:8:32-i16:16:32-i64:64-n32:64-S128".into(),
- arch: "mips64".into(),
+ arch: "mips64r6".into(),
options: TargetOptions {
abi: "abi64".into(),
endian: Endian::Big,
diff --git a/compiler/rustc_target/src/spec/mipsisa64r6el_unknown_linux_gnuabi64.rs b/compiler/rustc_target/src/spec/mipsisa64r6el_unknown_linux_gnuabi64.rs
index d2b07c654..8d0a6aa8f 100644
--- a/compiler/rustc_target/src/spec/mipsisa64r6el_unknown_linux_gnuabi64.rs
+++ b/compiler/rustc_target/src/spec/mipsisa64r6el_unknown_linux_gnuabi64.rs
@@ -5,7 +5,7 @@ pub fn target() -> Target {
llvm_target: "mipsisa64r6el-unknown-linux-gnuabi64".into(),
pointer_width: 64,
data_layout: "e-m:e-i8:8:32-i16:16:32-i64:64-n32:64-S128".into(),
- arch: "mips64".into(),
+ arch: "mips64r6".into(),
options: TargetOptions {
abi: "abi64".into(),
// NOTE(mips64r6) matches C toolchain
diff --git a/compiler/rustc_target/src/spec/mod.rs b/compiler/rustc_target/src/spec/mod.rs
index 2365dfaf1..31b6961bb 100644
--- a/compiler/rustc_target/src/spec/mod.rs
+++ b/compiler/rustc_target/src/spec/mod.rs
@@ -42,7 +42,7 @@ use crate::spec::crt_objects::{CrtObjects, LinkSelfContainedDefault};
use rustc_data_structures::stable_hasher::{HashStable, StableHasher};
use rustc_fs_util::try_canonicalize;
use rustc_serialize::{Decodable, Decoder, Encodable, Encoder};
-use rustc_span::symbol::{sym, Symbol};
+use rustc_span::symbol::{kw, sym, Symbol};
use serde_json::Value;
use std::borrow::Cow;
use std::collections::BTreeMap;
@@ -74,6 +74,7 @@ mod l4re_base;
mod linux_base;
mod linux_gnu_base;
mod linux_musl_base;
+mod linux_ohos_base;
mod linux_uclibc_base;
mod msvc_base;
mod netbsd_base;
@@ -82,8 +83,10 @@ mod openbsd_base;
mod redox_base;
mod solaris_base;
mod solid_base;
+mod teeos_base;
mod thumb_base;
mod uefi_msvc_base;
+mod unikraft_linux_musl_base;
mod vxworks_base;
mod wasm_base;
mod windows_gnu_base;
@@ -335,7 +338,7 @@ impl LinkerFlavor {
|| stem == "clang++"
|| stem.ends_with("-clang++")
{
- (Some(Cc::Yes), None)
+ (Some(Cc::Yes), Some(Lld::No))
} else if stem == "wasm-ld"
|| stem.ends_with("-wasm-ld")
|| stem == "ld.lld"
@@ -652,6 +655,43 @@ pub enum RelocModel {
RopiRwpi,
}
+impl RelocModel {
+ pub fn desc(&self) -> &str {
+ match *self {
+ RelocModel::Static => "static",
+ RelocModel::Pic => "pic",
+ RelocModel::Pie => "pie",
+ RelocModel::DynamicNoPic => "dynamic-no-pic",
+ RelocModel::Ropi => "ropi",
+ RelocModel::Rwpi => "rwpi",
+ RelocModel::RopiRwpi => "ropi-rwpi",
+ }
+ }
+ pub const fn desc_symbol(&self) -> Symbol {
+ match *self {
+ RelocModel::Static => kw::Static,
+ RelocModel::Pic => sym::pic,
+ RelocModel::Pie => sym::pie,
+ RelocModel::DynamicNoPic => sym::dynamic_no_pic,
+ RelocModel::Ropi => sym::ropi,
+ RelocModel::Rwpi => sym::rwpi,
+ RelocModel::RopiRwpi => sym::ropi_rwpi,
+ }
+ }
+
+ pub const fn all() -> [Symbol; 7] {
+ [
+ RelocModel::Static.desc_symbol(),
+ RelocModel::Pic.desc_symbol(),
+ RelocModel::Pie.desc_symbol(),
+ RelocModel::DynamicNoPic.desc_symbol(),
+ RelocModel::Ropi.desc_symbol(),
+ RelocModel::Rwpi.desc_symbol(),
+ RelocModel::RopiRwpi.desc_symbol(),
+ ]
+ }
+}
+
impl FromStr for RelocModel {
type Err = ();
@@ -671,16 +711,7 @@ impl FromStr for RelocModel {
impl ToJson for RelocModel {
fn to_json(&self) -> Json {
- match *self {
- RelocModel::Static => "static",
- RelocModel::Pic => "pic",
- RelocModel::Pie => "pie",
- RelocModel::DynamicNoPic => "dynamic-no-pic",
- RelocModel::Ropi => "ropi",
- RelocModel::Rwpi => "rwpi",
- RelocModel::RopiRwpi => "ropi-rwpi",
- }
- .to_json()
+ self.desc().to_json()
}
}
@@ -1243,6 +1274,7 @@ supported_targets! {
("i586-unknown-linux-gnu", i586_unknown_linux_gnu),
("loongarch64-unknown-linux-gnu", loongarch64_unknown_linux_gnu),
("m68k-unknown-linux-gnu", m68k_unknown_linux_gnu),
+ ("csky-unknown-linux-gnuabiv2", csky_unknown_linux_gnuabiv2),
("mips-unknown-linux-gnu", mips_unknown_linux_gnu),
("mips64-unknown-linux-gnuabi64", mips64_unknown_linux_gnuabi64),
("mips64el-unknown-linux-gnuabi64", mips64el_unknown_linux_gnuabi64),
@@ -1298,6 +1330,7 @@ supported_targets! {
("armv7-linux-androideabi", armv7_linux_androideabi),
("thumbv7neon-linux-androideabi", thumbv7neon_linux_androideabi),
("aarch64-linux-android", aarch64_linux_android),
+ ("riscv64-linux-android", riscv64_linux_android),
("aarch64-unknown-freebsd", aarch64_unknown_freebsd),
("armv6-unknown-freebsd", armv6_unknown_freebsd),
@@ -1401,6 +1434,7 @@ supported_targets! {
("wasm32-unknown-emscripten", wasm32_unknown_emscripten),
("wasm32-unknown-unknown", wasm32_unknown_unknown),
("wasm32-wasi", wasm32_wasi),
+ ("wasm32-wasi-preview1-threads", wasm32_wasi_preview1_threads),
("wasm64-unknown-unknown", wasm64_unknown_unknown),
("thumbv6m-none-eabi", thumbv6m_none_eabi),
@@ -1417,8 +1451,11 @@ supported_targets! {
("msp430-none-elf", msp430_none_elf),
("aarch64-unknown-hermit", aarch64_unknown_hermit),
+ ("riscv64gc-unknown-hermit", riscv64gc_unknown_hermit),
("x86_64-unknown-hermit", x86_64_unknown_hermit),
+ ("x86_64-unikraft-linux-musl", x86_64_unikraft_linux_musl),
+
("riscv32i-unknown-none-elf", riscv32i_unknown_none_elf),
("riscv32im-unknown-none-elf", riscv32im_unknown_none_elf),
("riscv32imc-unknown-none-elf", riscv32imc_unknown_none_elf),
@@ -1433,6 +1470,8 @@ supported_targets! {
("riscv64gc-unknown-linux-gnu", riscv64gc_unknown_linux_gnu),
("riscv64gc-unknown-linux-musl", riscv64gc_unknown_linux_musl),
+ ("sparc-unknown-none-elf", sparc_unknown_none_elf),
+
("loongarch64-unknown-none", loongarch64_unknown_none),
("loongarch64-unknown-none-softfloat", loongarch64_unknown_none_softfloat),
@@ -1485,6 +1524,8 @@ supported_targets! {
("x86_64-unknown-none", x86_64_unknown_none),
+ ("aarch64-unknown-teeos", aarch64_unknown_teeos),
+
("mips64-openwrt-linux-musl", mips64_openwrt_linux_musl),
("aarch64-unknown-nto-qnx710", aarch64_unknown_nto_qnx_710),
@@ -1493,6 +1534,7 @@ supported_targets! {
("aarch64-unknown-linux-ohos", aarch64_unknown_linux_ohos),
("armv7-unknown-linux-ohos", armv7_unknown_linux_ohos),
+ ("x86_64-unknown-linux-ohos", x86_64_unknown_linux_ohos),
}
/// Cow-Vec-Str: Cow<'static, [Cow<'static, str>]>
@@ -1908,6 +1950,9 @@ pub struct TargetOptions {
/// Use platform dependent mcount function
pub mcount: StaticCow<str>,
+ /// Use LLVM intrinsic for mcount function name
+ pub llvm_mcount_intrinsic: Option<StaticCow<str>>,
+
/// LLVM ABI name, corresponds to the '-mabi' parameter available in multilib C compilers
pub llvm_abiname: StaticCow<str>,
@@ -2169,6 +2214,7 @@ impl Default for TargetOptions {
override_export_symbols: None,
merge_functions: MergeFunctions::Aliases,
mcount: "mcount".into(),
+ llvm_mcount_intrinsic: None,
llvm_abiname: "".into(),
relax_elf_relocations: false,
llvm_args: cvs![],
@@ -2257,6 +2303,7 @@ impl Target {
PtxKernel => self.arch == "nvptx64",
Msp430Interrupt => self.arch == "msp430",
AmdGpuKernel => self.arch == "amdgcn",
+ RiscvInterruptM | RiscvInterruptS => ["riscv32", "riscv64"].contains(&&self.arch[..]),
AvrInterrupt | AvrNonBlockingInterrupt => self.arch == "avr",
Wasm => ["wasm32", "wasm64"].contains(&&self.arch[..]),
Thiscall { .. } => self.arch == "x86",
@@ -2688,7 +2735,7 @@ impl Target {
let name = (stringify!($key_name)).replace("_", "-");
obj.remove(&name).and_then(|o| o.as_str().and_then(|s| {
match lookup_abi(s) {
- Some(abi) => base.$key_name = Some(abi),
+ Ok(abi) => base.$key_name = Some(abi),
_ => return Some(Err(format!("'{}' is not a valid value for abi", s))),
}
Some(Ok(()))
@@ -2825,6 +2872,7 @@ impl Target {
key!(override_export_symbols, opt_list);
key!(merge_functions, MergeFunctions)?;
key!(mcount = "target-mcount");
+ key!(llvm_mcount_intrinsic, optional);
key!(llvm_abiname);
key!(relax_elf_relocations, bool);
key!(llvm_args, list);
@@ -3081,6 +3129,7 @@ impl ToJson for Target {
target_option_val!(override_export_symbols);
target_option_val!(merge_functions);
target_option_val!(mcount, "target-mcount");
+ target_option_val!(llvm_mcount_intrinsic);
target_option_val!(llvm_abiname);
target_option_val!(relax_elf_relocations);
target_option_val!(llvm_args);
diff --git a/compiler/rustc_target/src/spec/powerpc64_ibm_aix.rs b/compiler/rustc_target/src/spec/powerpc64_ibm_aix.rs
index 34934379c..4e105a03e 100644
--- a/compiler/rustc_target/src/spec/powerpc64_ibm_aix.rs
+++ b/compiler/rustc_target/src/spec/powerpc64_ibm_aix.rs
@@ -11,7 +11,7 @@ pub fn target() -> Target {
Target {
llvm_target: "powerpc64-ibm-aix".into(),
pointer_width: 64,
- data_layout: "E-m:a-i64:64-n32:64-S128-v256:256:256-v512:512:512".into(),
+ data_layout: "E-m:a-Fi64-i64:64-n32:64-S128-v256:256:256-v512:512:512".into(),
arch: "powerpc64".into(),
options: base,
}
diff --git a/compiler/rustc_target/src/spec/powerpc64_unknown_freebsd.rs b/compiler/rustc_target/src/spec/powerpc64_unknown_freebsd.rs
index 08b273207..e8fe55a00 100644
--- a/compiler/rustc_target/src/spec/powerpc64_unknown_freebsd.rs
+++ b/compiler/rustc_target/src/spec/powerpc64_unknown_freebsd.rs
@@ -11,7 +11,7 @@ pub fn target() -> Target {
Target {
llvm_target: "powerpc64-unknown-freebsd".into(),
pointer_width: 64,
- data_layout: "E-m:e-i64:64-n32:64".into(),
+ data_layout: "E-m:e-Fn32-i64:64-n32:64".into(),
arch: "powerpc64".into(),
options: TargetOptions { endian: Endian::Big, mcount: "_mcount".into(), ..base },
}
diff --git a/compiler/rustc_target/src/spec/powerpc64_unknown_linux_gnu.rs b/compiler/rustc_target/src/spec/powerpc64_unknown_linux_gnu.rs
index ce64de861..7a0cc539f 100644
--- a/compiler/rustc_target/src/spec/powerpc64_unknown_linux_gnu.rs
+++ b/compiler/rustc_target/src/spec/powerpc64_unknown_linux_gnu.rs
@@ -11,7 +11,7 @@ pub fn target() -> Target {
Target {
llvm_target: "powerpc64-unknown-linux-gnu".into(),
pointer_width: 64,
- data_layout: "E-m:e-i64:64-n32:64-S128-v256:256:256-v512:512:512".into(),
+ data_layout: "E-m:e-Fi64-i64:64-n32:64-S128-v256:256:256-v512:512:512".into(),
arch: "powerpc64".into(),
options: TargetOptions { endian: Endian::Big, mcount: "_mcount".into(), ..base },
}
diff --git a/compiler/rustc_target/src/spec/powerpc64_unknown_linux_musl.rs b/compiler/rustc_target/src/spec/powerpc64_unknown_linux_musl.rs
index 81286a668..f80b22828 100644
--- a/compiler/rustc_target/src/spec/powerpc64_unknown_linux_musl.rs
+++ b/compiler/rustc_target/src/spec/powerpc64_unknown_linux_musl.rs
@@ -11,7 +11,7 @@ pub fn target() -> Target {
Target {
llvm_target: "powerpc64-unknown-linux-musl".into(),
pointer_width: 64,
- data_layout: "E-m:e-i64:64-n32:64-S128-v256:256:256-v512:512:512".into(),
+ data_layout: "E-m:e-Fi64-i64:64-n32:64-S128-v256:256:256-v512:512:512".into(),
arch: "powerpc64".into(),
options: TargetOptions { endian: Endian::Big, mcount: "_mcount".into(), ..base },
}
diff --git a/compiler/rustc_target/src/spec/powerpc64_unknown_openbsd.rs b/compiler/rustc_target/src/spec/powerpc64_unknown_openbsd.rs
index 7232dce3e..3643f7b0c 100644
--- a/compiler/rustc_target/src/spec/powerpc64_unknown_openbsd.rs
+++ b/compiler/rustc_target/src/spec/powerpc64_unknown_openbsd.rs
@@ -11,7 +11,7 @@ pub fn target() -> Target {
Target {
llvm_target: "powerpc64-unknown-openbsd".into(),
pointer_width: 64,
- data_layout: "E-m:e-i64:64-n32:64".into(),
+ data_layout: "E-m:e-Fn32-i64:64-n32:64".into(),
arch: "powerpc64".into(),
options: TargetOptions { endian: Endian::Big, mcount: "_mcount".into(), ..base },
}
diff --git a/compiler/rustc_target/src/spec/powerpc64_wrs_vxworks.rs b/compiler/rustc_target/src/spec/powerpc64_wrs_vxworks.rs
index 10da7872c..b0472e64e 100644
--- a/compiler/rustc_target/src/spec/powerpc64_wrs_vxworks.rs
+++ b/compiler/rustc_target/src/spec/powerpc64_wrs_vxworks.rs
@@ -11,7 +11,7 @@ pub fn target() -> Target {
Target {
llvm_target: "powerpc64-unknown-linux-gnu".into(),
pointer_width: 64,
- data_layout: "E-m:e-i64:64-n32:64-S128-v256:256:256-v512:512:512".into(),
+ data_layout: "E-m:e-Fi64-i64:64-n32:64-S128-v256:256:256-v512:512:512".into(),
arch: "powerpc64".into(),
options: TargetOptions { endian: Endian::Big, ..base },
}
diff --git a/compiler/rustc_target/src/spec/powerpc64le_unknown_freebsd.rs b/compiler/rustc_target/src/spec/powerpc64le_unknown_freebsd.rs
index 8c941e106..342b1cf4f 100644
--- a/compiler/rustc_target/src/spec/powerpc64le_unknown_freebsd.rs
+++ b/compiler/rustc_target/src/spec/powerpc64le_unknown_freebsd.rs
@@ -10,7 +10,7 @@ pub fn target() -> Target {
Target {
llvm_target: "powerpc64le-unknown-freebsd".into(),
pointer_width: 64,
- data_layout: "e-m:e-i64:64-n32:64".into(),
+ data_layout: "e-m:e-Fn32-i64:64-n32:64".into(),
arch: "powerpc64".into(),
options: TargetOptions { mcount: "_mcount".into(), ..base },
}
diff --git a/compiler/rustc_target/src/spec/powerpc64le_unknown_linux_gnu.rs b/compiler/rustc_target/src/spec/powerpc64le_unknown_linux_gnu.rs
index fd896e086..815e3d278 100644
--- a/compiler/rustc_target/src/spec/powerpc64le_unknown_linux_gnu.rs
+++ b/compiler/rustc_target/src/spec/powerpc64le_unknown_linux_gnu.rs
@@ -10,7 +10,7 @@ pub fn target() -> Target {
Target {
llvm_target: "powerpc64le-unknown-linux-gnu".into(),
pointer_width: 64,
- data_layout: "e-m:e-i64:64-n32:64-S128-v256:256:256-v512:512:512".into(),
+ data_layout: "e-m:e-Fn32-i64:64-n32:64-S128-v256:256:256-v512:512:512".into(),
arch: "powerpc64".into(),
options: TargetOptions { mcount: "_mcount".into(), ..base },
}
diff --git a/compiler/rustc_target/src/spec/powerpc64le_unknown_linux_musl.rs b/compiler/rustc_target/src/spec/powerpc64le_unknown_linux_musl.rs
index 3cffcf497..0b9b78bce 100644
--- a/compiler/rustc_target/src/spec/powerpc64le_unknown_linux_musl.rs
+++ b/compiler/rustc_target/src/spec/powerpc64le_unknown_linux_musl.rs
@@ -10,7 +10,7 @@ pub fn target() -> Target {
Target {
llvm_target: "powerpc64le-unknown-linux-musl".into(),
pointer_width: 64,
- data_layout: "e-m:e-i64:64-n32:64-S128-v256:256:256-v512:512:512".into(),
+ data_layout: "e-m:e-Fn32-i64:64-n32:64-S128-v256:256:256-v512:512:512".into(),
arch: "powerpc64".into(),
options: TargetOptions { mcount: "_mcount".into(), ..base },
}
diff --git a/compiler/rustc_target/src/spec/powerpc_unknown_freebsd.rs b/compiler/rustc_target/src/spec/powerpc_unknown_freebsd.rs
index 342f321bd..e036f5bdb 100644
--- a/compiler/rustc_target/src/spec/powerpc_unknown_freebsd.rs
+++ b/compiler/rustc_target/src/spec/powerpc_unknown_freebsd.rs
@@ -14,7 +14,7 @@ pub fn target() -> Target {
Target {
llvm_target: "powerpc-unknown-freebsd13.0".into(),
pointer_width: 32,
- data_layout: "E-m:e-p:32:32-i64:64-n32".into(),
+ data_layout: "E-m:e-p:32:32-Fn32-i64:64-n32".into(),
arch: "powerpc".into(),
options: TargetOptions {
endian: Endian::Big,
diff --git a/compiler/rustc_target/src/spec/powerpc_unknown_linux_gnu.rs b/compiler/rustc_target/src/spec/powerpc_unknown_linux_gnu.rs
index c8c61dc46..c8d6f8b9c 100644
--- a/compiler/rustc_target/src/spec/powerpc_unknown_linux_gnu.rs
+++ b/compiler/rustc_target/src/spec/powerpc_unknown_linux_gnu.rs
@@ -10,7 +10,7 @@ pub fn target() -> Target {
Target {
llvm_target: "powerpc-unknown-linux-gnu".into(),
pointer_width: 32,
- data_layout: "E-m:e-p:32:32-i64:64-n32".into(),
+ data_layout: "E-m:e-p:32:32-Fn32-i64:64-n32".into(),
arch: "powerpc".into(),
options: TargetOptions { endian: Endian::Big, mcount: "_mcount".into(), ..base },
}
diff --git a/compiler/rustc_target/src/spec/powerpc_unknown_linux_gnuspe.rs b/compiler/rustc_target/src/spec/powerpc_unknown_linux_gnuspe.rs
index 5c51ec91f..fdaa9d366 100644
--- a/compiler/rustc_target/src/spec/powerpc_unknown_linux_gnuspe.rs
+++ b/compiler/rustc_target/src/spec/powerpc_unknown_linux_gnuspe.rs
@@ -10,7 +10,7 @@ pub fn target() -> Target {
Target {
llvm_target: "powerpc-unknown-linux-gnuspe".into(),
pointer_width: 32,
- data_layout: "E-m:e-p:32:32-i64:64-n32".into(),
+ data_layout: "E-m:e-p:32:32-Fn32-i64:64-n32".into(),
arch: "powerpc".into(),
options: TargetOptions {
abi: "spe".into(),
diff --git a/compiler/rustc_target/src/spec/powerpc_unknown_linux_musl.rs b/compiler/rustc_target/src/spec/powerpc_unknown_linux_musl.rs
index fc7d802cb..7fe708cf5 100644
--- a/compiler/rustc_target/src/spec/powerpc_unknown_linux_musl.rs
+++ b/compiler/rustc_target/src/spec/powerpc_unknown_linux_musl.rs
@@ -10,7 +10,7 @@ pub fn target() -> Target {
Target {
llvm_target: "powerpc-unknown-linux-musl".into(),
pointer_width: 32,
- data_layout: "E-m:e-p:32:32-i64:64-n32".into(),
+ data_layout: "E-m:e-p:32:32-Fn32-i64:64-n32".into(),
arch: "powerpc".into(),
options: TargetOptions { endian: Endian::Big, mcount: "_mcount".into(), ..base },
}
diff --git a/compiler/rustc_target/src/spec/powerpc_unknown_netbsd.rs b/compiler/rustc_target/src/spec/powerpc_unknown_netbsd.rs
index 912149c79..6f8875ba7 100644
--- a/compiler/rustc_target/src/spec/powerpc_unknown_netbsd.rs
+++ b/compiler/rustc_target/src/spec/powerpc_unknown_netbsd.rs
@@ -10,7 +10,7 @@ pub fn target() -> Target {
Target {
llvm_target: "powerpc-unknown-netbsd".into(),
pointer_width: 32,
- data_layout: "E-m:e-p:32:32-i64:64-n32".into(),
+ data_layout: "E-m:e-p:32:32-Fn32-i64:64-n32".into(),
arch: "powerpc".into(),
options: TargetOptions { endian: Endian::Big, mcount: "__mcount".into(), ..base },
}
diff --git a/compiler/rustc_target/src/spec/powerpc_unknown_openbsd.rs b/compiler/rustc_target/src/spec/powerpc_unknown_openbsd.rs
index dec85f996..280d36698 100644
--- a/compiler/rustc_target/src/spec/powerpc_unknown_openbsd.rs
+++ b/compiler/rustc_target/src/spec/powerpc_unknown_openbsd.rs
@@ -10,7 +10,7 @@ pub fn target() -> Target {
Target {
llvm_target: "powerpc-unknown-openbsd".into(),
pointer_width: 32,
- data_layout: "E-m:e-p:32:32-i64:64-n32".into(),
+ data_layout: "E-m:e-p:32:32-Fn32-i64:64-n32".into(),
arch: "powerpc".into(),
options: base,
}
diff --git a/compiler/rustc_target/src/spec/powerpc_wrs_vxworks.rs b/compiler/rustc_target/src/spec/powerpc_wrs_vxworks.rs
index a8c1c2a61..6f245e6ab 100644
--- a/compiler/rustc_target/src/spec/powerpc_wrs_vxworks.rs
+++ b/compiler/rustc_target/src/spec/powerpc_wrs_vxworks.rs
@@ -10,7 +10,7 @@ pub fn target() -> Target {
Target {
llvm_target: "powerpc-unknown-linux-gnu".into(),
pointer_width: 32,
- data_layout: "E-m:e-p:32:32-i64:64-n32".into(),
+ data_layout: "E-m:e-p:32:32-Fn32-i64:64-n32".into(),
arch: "powerpc".into(),
options: TargetOptions { endian: Endian::Big, features: "+secure-plt".into(), ..base },
}
diff --git a/compiler/rustc_target/src/spec/powerpc_wrs_vxworks_spe.rs b/compiler/rustc_target/src/spec/powerpc_wrs_vxworks_spe.rs
index abb8d13da..1d5a5e5c6 100644
--- a/compiler/rustc_target/src/spec/powerpc_wrs_vxworks_spe.rs
+++ b/compiler/rustc_target/src/spec/powerpc_wrs_vxworks_spe.rs
@@ -10,7 +10,7 @@ pub fn target() -> Target {
Target {
llvm_target: "powerpc-unknown-linux-gnuspe".into(),
pointer_width: 32,
- data_layout: "E-m:e-p:32:32-i64:64-n32".into(),
+ data_layout: "E-m:e-p:32:32-Fn32-i64:64-n32".into(),
arch: "powerpc".into(),
options: TargetOptions {
abi: "spe".into(),
diff --git a/compiler/rustc_target/src/spec/riscv64_linux_android.rs b/compiler/rustc_target/src/spec/riscv64_linux_android.rs
new file mode 100644
index 000000000..af0d68554
--- /dev/null
+++ b/compiler/rustc_target/src/spec/riscv64_linux_android.rs
@@ -0,0 +1,19 @@
+use crate::spec::{CodeModel, SanitizerSet, Target, TargetOptions};
+
+pub fn target() -> Target {
+ Target {
+ llvm_target: "riscv64-linux-android".into(),
+ pointer_width: 64,
+ data_layout: "e-m:e-p:64:64-i64:64-i128:128-n32:64-S128".into(),
+ arch: "riscv64".into(),
+ options: TargetOptions {
+ code_model: Some(CodeModel::Medium),
+ cpu: "generic-rv64".into(),
+ features: "+m,+a,+f,+d,+c".into(),
+ llvm_abiname: "lp64d".into(),
+ supported_sanitizers: SanitizerSet::ADDRESS,
+ max_atomic_width: Some(64),
+ ..super::android_base::opts()
+ },
+ }
+}
diff --git a/compiler/rustc_target/src/spec/riscv64gc_unknown_hermit.rs b/compiler/rustc_target/src/spec/riscv64gc_unknown_hermit.rs
new file mode 100644
index 000000000..1f6a34c0c
--- /dev/null
+++ b/compiler/rustc_target/src/spec/riscv64gc_unknown_hermit.rs
@@ -0,0 +1,20 @@
+use crate::spec::{CodeModel, RelocModel, Target, TargetOptions, TlsModel};
+
+pub fn target() -> Target {
+ Target {
+ llvm_target: "riscv64-unknown-hermit".into(),
+ pointer_width: 64,
+ arch: "riscv64".into(),
+ data_layout: "e-m:e-p:64:64-i64:64-i128:128-n32:64-S128".into(),
+ options: TargetOptions {
+ cpu: "generic-rv64".into(),
+ features: "+m,+a,+f,+d,+c".into(),
+ relocation_model: RelocModel::Pic,
+ code_model: Some(CodeModel::Medium),
+ tls_model: TlsModel::LocalExec,
+ max_atomic_width: Some(64),
+ llvm_abiname: "lp64d".into(),
+ ..super::hermit_base::opts()
+ },
+ }
+}
diff --git a/compiler/rustc_target/src/spec/sparc_unknown_linux_gnu.rs b/compiler/rustc_target/src/spec/sparc_unknown_linux_gnu.rs
index 12968abda..b10e6264b 100644
--- a/compiler/rustc_target/src/spec/sparc_unknown_linux_gnu.rs
+++ b/compiler/rustc_target/src/spec/sparc_unknown_linux_gnu.rs
@@ -5,7 +5,7 @@ pub fn target() -> Target {
let mut base = super::linux_gnu_base::opts();
base.endian = Endian::Big;
base.cpu = "v9".into();
- base.max_atomic_width = Some(64);
+ base.max_atomic_width = Some(32);
base.add_pre_link_args(LinkerFlavor::Gnu(Cc::Yes, Lld::No), &["-mv8plus"]);
Target {
diff --git a/compiler/rustc_target/src/spec/sparc_unknown_none_elf.rs b/compiler/rustc_target/src/spec/sparc_unknown_none_elf.rs
new file mode 100644
index 000000000..7e908a0f3
--- /dev/null
+++ b/compiler/rustc_target/src/spec/sparc_unknown_none_elf.rs
@@ -0,0 +1,27 @@
+use crate::abi::Endian;
+use crate::spec::{Cc, LinkerFlavor, Lld, PanicStrategy, RelocModel, Target, TargetOptions};
+
+pub fn target() -> Target {
+ let options = TargetOptions {
+ linker_flavor: LinkerFlavor::Gnu(Cc::Yes, Lld::No),
+ linker: Some("sparc-elf-gcc".into()),
+ endian: Endian::Big,
+ cpu: "v7".into(),
+ abi: "elf".into(),
+ max_atomic_width: Some(32),
+ atomic_cas: true,
+ panic_strategy: PanicStrategy::Abort,
+ relocation_model: RelocModel::Static,
+ no_default_libraries: false,
+ emit_debug_gdb_scripts: false,
+ eh_frame_header: false,
+ ..Default::default()
+ };
+ Target {
+ data_layout: "E-m:e-p:32:32-i64:64-f128:64-n32-S64".into(),
+ llvm_target: "sparc-unknown-none-elf".into(),
+ pointer_width: 32,
+ arch: "sparc".into(),
+ options,
+ }
+}
diff --git a/compiler/rustc_target/src/spec/teeos_base.rs b/compiler/rustc_target/src/spec/teeos_base.rs
new file mode 100644
index 000000000..1bc71bab0
--- /dev/null
+++ b/compiler/rustc_target/src/spec/teeos_base.rs
@@ -0,0 +1,29 @@
+use super::{Cc, LinkerFlavor, Lld, PanicStrategy};
+use crate::spec::{RelroLevel, TargetOptions};
+
+pub fn opts() -> TargetOptions {
+ let lld_args = &["-zmax-page-size=4096", "-znow", "-ztext", "--execute-only"];
+ let cc_args = &["-Wl,-zmax-page-size=4096", "-Wl,-znow", "-Wl,-ztext", "-mexecute-only"];
+
+ let mut pre_link_args = TargetOptions::link_args(LinkerFlavor::Gnu(Cc::No, Lld::No), lld_args);
+ super::add_link_args(&mut pre_link_args, LinkerFlavor::Gnu(Cc::Yes, Lld::No), cc_args);
+
+ TargetOptions {
+ os: "teeos".into(),
+ vendor: "unknown".into(),
+ dynamic_linking: true,
+ linker_flavor: LinkerFlavor::Gnu(Cc::Yes, Lld::No),
+ // rpath hardcodes -Wl, so it can't be used together with ld.lld.
+ // C TAs also don't support rpath, so this is fine.
+ has_rpath: false,
+ // Note: Setting has_thread_local to true causes an error when
+ // loading / dyn-linking the TA
+ has_thread_local: false,
+ position_independent_executables: true,
+ relro_level: RelroLevel::Full,
+ crt_static_respected: true,
+ pre_link_args,
+ panic_strategy: PanicStrategy::Abort,
+ ..Default::default()
+ }
+}
diff --git a/compiler/rustc_target/src/spec/thumb_base.rs b/compiler/rustc_target/src/spec/thumb_base.rs
index 2220b9326..0decfecb4 100644
--- a/compiler/rustc_target/src/spec/thumb_base.rs
+++ b/compiler/rustc_target/src/spec/thumb_base.rs
@@ -52,7 +52,7 @@ pub fn opts() -> TargetOptions {
// breaks debugging. Preserve LR by default to prevent that from happening.
frame_pointer: FramePointer::Always,
// ARM supports multiple ABIs for enums, the linux one matches the default of 32 here
- // but any arm-none or thumb-none target will be defaulted to 8 on GCC and clang
+ // but any arm-none or thumb-none target will be defaulted to 8 on GCC.
c_enum_min_bits: Some(8),
..Default::default()
}
diff --git a/compiler/rustc_target/src/spec/thumbv4t_none_eabi.rs b/compiler/rustc_target/src/spec/thumbv4t_none_eabi.rs
index 9c59bb911..88a76f49a 100644
--- a/compiler/rustc_target/src/spec/thumbv4t_none_eabi.rs
+++ b/compiler/rustc_target/src/spec/thumbv4t_none_eabi.rs
@@ -45,8 +45,6 @@ pub fn target() -> Target {
relocation_model: RelocModel::Static,
// suggested from thumb_base, rust-lang/rust#44993.
emit_debug_gdb_scripts: false,
- // suggested from thumb_base, with no-os gcc/clang use 8-bit enums
- c_enum_min_bits: Some(8),
frame_pointer: FramePointer::MayOmit,
main_needs_argc_argv: false,
diff --git a/compiler/rustc_target/src/spec/unikraft_linux_musl_base.rs b/compiler/rustc_target/src/spec/unikraft_linux_musl_base.rs
new file mode 100644
index 000000000..9ccd0a1e7
--- /dev/null
+++ b/compiler/rustc_target/src/spec/unikraft_linux_musl_base.rs
@@ -0,0 +1,15 @@
+use crate::spec::{cvs, PanicStrategy, RelocModel, TargetOptions};
+
+pub fn opts() -> TargetOptions {
+ TargetOptions {
+ os: "linux".into(),
+ env: "musl".into(),
+ vendor: "unikraft".into(),
+ linker: Some("kraftld".into()),
+ relocation_model: RelocModel::Static,
+ families: cvs!["unix"],
+ has_thread_local: true,
+ panic_strategy: PanicStrategy::Abort,
+ ..Default::default()
+ }
+}
diff --git a/compiler/rustc_target/src/spec/wasm32_wasi_preview1_threads.rs b/compiler/rustc_target/src/spec/wasm32_wasi_preview1_threads.rs
new file mode 100644
index 000000000..c567155fe
--- /dev/null
+++ b/compiler/rustc_target/src/spec/wasm32_wasi_preview1_threads.rs
@@ -0,0 +1,134 @@
+//! The `wasm32-wasi-preview1-threads` target is a new and still (as of July 2023) an
+//! experimental target. The definition in this file is likely to be tweaked
+//! over time and shouldn't be relied on too much.
+//!
+//! The `wasi-threads` target is a proposal to define a standardized set of syscalls
+//! that WebAssembly files can interoperate with. This set of syscalls is
+//! intended to empower WebAssembly binaries with native capabilities such as
+//! threads, filesystem access, network access, etc.
+//!
+//! You can see more about the proposal at <https://github.com/WebAssembly/wasi-threads>.
+//!
+//! The Rust target definition here is interesting in a few ways. We want to
+//! serve two use cases here with this target:
+//!
+//! * First, we want Rust usage of the target to be as hassle-free as possible,
+//! ideally avoiding the need to configure and install a local wasm32-wasi-preview1-threads
+//! toolchain.
+//!
+//! * Second, one of the primary use cases of LLVM's new wasm backend and the
+//! wasm support in LLD is that any compiled language can interoperate with
+//! any other. To that the `wasm32-wasi-preview1-threads` target is the first with a viable C
+//! standard library and sysroot common definition, so we want Rust and C/C++
+//! code to interoperate when compiled to `wasm32-unknown-unknown`.
+//!
+//! You'll note, however, that the two goals above are somewhat at odds with one
+//! another. To attempt to solve both use cases in one go we define a target
+//! that (ab)uses the `crt-static` target feature to indicate which one you're
+//! in.
+//!
+//! ## No interop with C required
+//!
+//! By default the `crt-static` target feature is enabled, and when enabled
+//! this means that the bundled version of `libc.a` found in `liblibc.rlib`
+//! is used. This isn't intended really for interoperation with a C because it
+//! may be the case that Rust's bundled C library is incompatible with a
+//! foreign-compiled C library. In this use case, though, we use `rust-lld` and
+//! some copied crt startup object files to ensure that you can download the
+//! wasi target for Rust and you're off to the races, no further configuration
+//! necessary.
+//!
+//! All in all, by default, no external dependencies are required. You can
+//! compile `wasm32-wasi-preview1-threads` binaries straight out of the box. You can't, however,
+//! reliably interoperate with C code in this mode (yet).
+//!
+//! ## Interop with C required
+//!
+//! For the second goal we repurpose the `target-feature` flag, meaning that
+//! you'll need to do a few things to have C/Rust code interoperate.
+//!
+//! 1. All Rust code needs to be compiled with `-C target-feature=-crt-static`,
+//! indicating that the bundled C standard library in the Rust sysroot will
+//! not be used.
+//!
+//! 2. If you're using rustc to build a linked artifact then you'll need to
+//! specify `-C linker` to a `clang` binary that supports
+//! `wasm32-wasi-preview1-threads` and is configured with the `wasm32-wasi-preview1-threads` sysroot. This
+//! will cause Rust code to be linked against the libc.a that the specified
+//! `clang` provides.
+//!
+//! 3. If you're building a staticlib and integrating Rust code elsewhere, then
+//! compiling with `-C target-feature=-crt-static` is all you need to do.
+//!
+//! You can configure the linker via Cargo using the
+//! `CARGO_TARGET_WASM32_WASI_LINKER` env var. Be sure to also set
+//! `CC_wasm32-wasi-preview1-threads` if any crates in the dependency graph are using the `cc`
+//! crate.
+//!
+//! ## Remember, this is all in flux
+//!
+//! The wasi target is **very** new in its specification. It's likely going to
+//! be a long effort to get it standardized and stable. We'll be following it as
+//! best we can with this target. Don't start relying on too much here unless
+//! you know what you're getting in to!
+
+use super::crt_objects::{self, LinkSelfContainedDefault};
+use super::{wasm_base, Cc, LinkerFlavor, Target};
+
+pub fn target() -> Target {
+ let mut options = wasm_base::options();
+
+ options.os = "wasi".into();
+
+ options.add_pre_link_args(
+ LinkerFlavor::WasmLld(Cc::No),
+ &["--import-memory", "--export-memory", "--shared-memory"],
+ );
+ options.add_pre_link_args(
+ LinkerFlavor::WasmLld(Cc::Yes),
+ &[
+ "--target=wasm32-wasi-threads",
+ "-Wl,--import-memory",
+ "-Wl,--export-memory,",
+ "-Wl,--shared-memory",
+ ],
+ );
+
+ options.pre_link_objects_self_contained = crt_objects::pre_wasi_self_contained();
+ options.post_link_objects_self_contained = crt_objects::post_wasi_self_contained();
+
+ // FIXME: Figure out cases in which WASM needs to link with a native toolchain.
+ options.link_self_contained = LinkSelfContainedDefault::True;
+
+ // Right now this is a bit of a workaround but we're currently saying that
+ // the target by default has a static crt which we're taking as a signal
+ // for "use the bundled crt". If that's turned off then the system's crt
+ // will be used, but this means that default usage of this target doesn't
+ // need an external compiler but it's still interoperable with an external
+ // compiler if configured correctly.
+ options.crt_static_default = true;
+ options.crt_static_respected = true;
+
+ // Allow `+crt-static` to create a "cdylib" output which is just a wasm file
+ // without a main function.
+ options.crt_static_allows_dylibs = true;
+
+ // WASI's `sys::args::init` function ignores its arguments; instead,
+ // `args::args()` makes the WASI API calls itself.
+ options.main_needs_argc_argv = false;
+
+ // And, WASI mangles the name of "main" to distinguish between different
+ // signatures.
+ options.entry_name = "__main_void".into();
+
+ options.singlethread = false;
+ options.features = "+atomics,+bulk-memory,+mutable-globals".into();
+
+ Target {
+ llvm_target: "wasm32-wasi".into(),
+ pointer_width: 32,
+ data_layout: "e-m:e-p:32:32-p10:8:8-p20:8:8-i64:64-n32:64-S128-ni:1:10:20".into(),
+ arch: "wasm32".into(),
+ options,
+ }
+}
diff --git a/compiler/rustc_target/src/spec/x86_64_unikraft_linux_musl.rs b/compiler/rustc_target/src/spec/x86_64_unikraft_linux_musl.rs
new file mode 100644
index 000000000..2aa093b13
--- /dev/null
+++ b/compiler/rustc_target/src/spec/x86_64_unikraft_linux_musl.rs
@@ -0,0 +1,19 @@
+use crate::spec::{Cc, LinkerFlavor, Lld, StackProbeType, Target, TargetOptions};
+
+pub fn target() -> Target {
+ Target {
+ llvm_target: "x86_64-unknown-linux-musl".into(),
+ pointer_width: 64,
+ arch: "x86_64".into(),
+ data_layout: "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-f80:128-n8:16:32:64-S128"
+ .into(),
+ options: TargetOptions {
+ cpu: "x86-64".into(),
+ plt_by_default: false,
+ pre_link_args: TargetOptions::link_args(LinkerFlavor::Gnu(Cc::Yes, Lld::No), &["-m64"]),
+ max_atomic_width: Some(64),
+ stack_probes: StackProbeType::X86,
+ ..super::unikraft_linux_musl_base::opts()
+ },
+ }
+}
diff --git a/compiler/rustc_target/src/spec/x86_64_unknown_hermit.rs b/compiler/rustc_target/src/spec/x86_64_unknown_hermit.rs
index 74ef2527c..1eb069301 100644
--- a/compiler/rustc_target/src/spec/x86_64_unknown_hermit.rs
+++ b/compiler/rustc_target/src/spec/x86_64_unknown_hermit.rs
@@ -1,19 +1,19 @@
-use crate::spec::{StackProbeType, Target};
+use crate::spec::{StackProbeType, Target, TargetOptions};
pub fn target() -> Target {
- let mut base = super::hermit_base::opts();
- base.cpu = "x86-64".into();
- base.plt_by_default = false;
- base.max_atomic_width = Some(64);
- base.features = "+rdrnd,+rdseed".into();
- base.stack_probes = StackProbeType::X86;
-
Target {
llvm_target: "x86_64-unknown-hermit".into(),
pointer_width: 64,
+ arch: "x86_64".into(),
data_layout: "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-f80:128-n8:16:32:64-S128"
.into(),
- arch: "x86_64".into(),
- options: base,
+ options: TargetOptions {
+ cpu: "x86-64".into(),
+ features: "+rdrnd,+rdseed".into(),
+ plt_by_default: false,
+ max_atomic_width: Some(64),
+ stack_probes: StackProbeType::X86,
+ ..super::hermit_base::opts()
+ },
}
}
diff --git a/compiler/rustc_target/src/spec/x86_64_unknown_linux_ohos.rs b/compiler/rustc_target/src/spec/x86_64_unknown_linux_ohos.rs
new file mode 100644
index 000000000..a96be8cd5
--- /dev/null
+++ b/compiler/rustc_target/src/spec/x86_64_unknown_linux_ohos.rs
@@ -0,0 +1,26 @@
+use crate::spec::{Cc, LinkerFlavor, Lld, SanitizerSet, StackProbeType, Target};
+
+pub fn target() -> Target {
+ let mut base = super::linux_ohos_base::opts();
+ base.cpu = "x86-64".into();
+ base.max_atomic_width = Some(64);
+ base.add_pre_link_args(LinkerFlavor::Gnu(Cc::Yes, Lld::No), &["-m64"]);
+ base.stack_probes = StackProbeType::X86;
+ base.static_position_independent_executables = true;
+ base.supported_sanitizers = SanitizerSet::ADDRESS
+ | SanitizerSet::CFI
+ | SanitizerSet::LEAK
+ | SanitizerSet::MEMORY
+ | SanitizerSet::THREAD;
+ base.supports_xray = true;
+
+ Target {
+ // LLVM 15 doesn't support OpenHarmony yet, use a linux target instead.
+ llvm_target: "x86_64-unknown-linux-musl".into(),
+ pointer_width: 64,
+ data_layout: "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-f80:128-n8:16:32:64-S128"
+ .into(),
+ arch: "x86_64".into(),
+ options: base,
+ }
+}
diff --git a/compiler/rustc_trait_selection/messages.ftl b/compiler/rustc_trait_selection/messages.ftl
index 217ba71b6..f4c9dfa34 100644
--- a/compiler/rustc_trait_selection/messages.ftl
+++ b/compiler/rustc_trait_selection/messages.ftl
@@ -1,3 +1,22 @@
+trait_selection_adjust_signature_borrow = consider adjusting the signature so it borrows its {$len ->
+ [one] argument
+ *[other] arguments
+ }
+
+trait_selection_adjust_signature_remove_borrow = consider adjusting the signature so it does not borrow its {$len ->
+ [one] argument
+ *[other] arguments
+ }
+
+trait_selection_closure_fn_mut_label = closure is `FnMut` because it mutates the variable `{$place}` here
+
+trait_selection_closure_fn_once_label = closure is `FnOnce` because it moves the variable `{$place}` out of its environment
+
+trait_selection_closure_kind_mismatch = expected a closure that implements the `{$expected}` trait, but this closure only implements `{$found}`
+ .label = this closure implements `{$found}`, not `{$expected}`
+
+trait_selection_closure_kind_requirement = the requirement to implement `{$expected}` derives from here
+
trait_selection_dump_vtable_entries = vtable entries for `{$trait_ref}`: {$entries}
trait_selection_empty_on_clause_in_rustc_on_unimplemented = empty `on`-clause in `#[rustc_on_unimplemented]`
diff --git a/compiler/rustc_trait_selection/src/errors.rs b/compiler/rustc_trait_selection/src/errors.rs
index 54e22cc3d..c1fb287d6 100644
--- a/compiler/rustc_trait_selection/src/errors.rs
+++ b/compiler/rustc_trait_selection/src/errors.rs
@@ -1,7 +1,10 @@
use crate::fluent_generated as fluent;
-use rustc_errors::{ErrorGuaranteed, Handler, IntoDiagnostic};
+use rustc_errors::{
+ AddToDiagnostic, Applicability, Diagnostic, ErrorGuaranteed, Handler, IntoDiagnostic,
+ SubdiagnosticMessage,
+};
use rustc_macros::Diagnostic;
-use rustc_middle::ty::{self, PolyTraitRef, Ty};
+use rustc_middle::ty::{self, ClosureKind, PolyTraitRef, Ty};
use rustc_span::{Span, Symbol};
#[derive(Diagnostic)]
@@ -97,3 +100,68 @@ pub struct InherentProjectionNormalizationOverflow {
pub span: Span,
pub ty: String,
}
+
+pub enum AdjustSignatureBorrow {
+ Borrow { to_borrow: Vec<(Span, String)> },
+ RemoveBorrow { remove_borrow: Vec<(Span, String)> },
+}
+
+impl AddToDiagnostic for AdjustSignatureBorrow {
+ fn add_to_diagnostic_with<F>(self, diag: &mut Diagnostic, _: F)
+ where
+ F: Fn(&mut Diagnostic, SubdiagnosticMessage) -> SubdiagnosticMessage,
+ {
+ match self {
+ AdjustSignatureBorrow::Borrow { to_borrow } => {
+ diag.set_arg("len", to_borrow.len());
+ diag.multipart_suggestion_verbose(
+ fluent::trait_selection_adjust_signature_borrow,
+ to_borrow,
+ Applicability::MaybeIncorrect,
+ );
+ }
+ AdjustSignatureBorrow::RemoveBorrow { remove_borrow } => {
+ diag.set_arg("len", remove_borrow.len());
+ diag.multipart_suggestion_verbose(
+ fluent::trait_selection_adjust_signature_remove_borrow,
+ remove_borrow,
+ Applicability::MaybeIncorrect,
+ );
+ }
+ }
+ }
+}
+
+#[derive(Diagnostic)]
+#[diag(trait_selection_closure_kind_mismatch, code = "E0525")]
+pub struct ClosureKindMismatch {
+ #[primary_span]
+ #[label]
+ pub closure_span: Span,
+ pub expected: ClosureKind,
+ pub found: ClosureKind,
+ #[label(trait_selection_closure_kind_requirement)]
+ pub cause_span: Span,
+
+ #[subdiagnostic]
+ pub fn_once_label: Option<ClosureFnOnceLabel>,
+
+ #[subdiagnostic]
+ pub fn_mut_label: Option<ClosureFnMutLabel>,
+}
+
+#[derive(Subdiagnostic)]
+#[label(trait_selection_closure_fn_once_label)]
+pub struct ClosureFnOnceLabel {
+ #[primary_span]
+ pub span: Span,
+ pub place: String,
+}
+
+#[derive(Subdiagnostic)]
+#[label(trait_selection_closure_fn_mut_label)]
+pub struct ClosureFnMutLabel {
+ #[primary_span]
+ pub span: Span,
+ pub place: String,
+}
diff --git a/compiler/rustc_trait_selection/src/infer.rs b/compiler/rustc_trait_selection/src/infer.rs
index 6efc1e730..38153cccf 100644
--- a/compiler/rustc_trait_selection/src/infer.rs
+++ b/compiler/rustc_trait_selection/src/infer.rs
@@ -72,7 +72,7 @@ impl<'tcx> InferCtxtExt<'tcx> for InferCtxt<'tcx> {
cause: traits::ObligationCause::dummy(),
param_env,
recursion_depth: 0,
- predicate: ty::Binder::dummy(trait_ref).without_const().to_predicate(self.tcx),
+ predicate: ty::Binder::dummy(trait_ref).to_predicate(self.tcx),
};
self.evaluate_obligation(&obligation).unwrap_or(traits::EvaluationResult::EvaluatedToErr)
}
diff --git a/compiler/rustc_trait_selection/src/solve/alias_relate.rs b/compiler/rustc_trait_selection/src/solve/alias_relate.rs
index 422a6ee34..6b839d64b 100644
--- a/compiler/rustc_trait_selection/src/solve/alias_relate.rs
+++ b/compiler/rustc_trait_selection/src/solve/alias_relate.rs
@@ -1,3 +1,16 @@
+//! Implements the `AliasRelate` goal, which is used when unifying aliases.
+//! Doing this via a separate goal is called "deferred alias relation" and part
+//! of our more general approach to "lazy normalization".
+//!
+//! This goal, e.g. `A alias-relate B`, may be satisfied by one of three branches:
+//! * normalizes-to: If `A` is a projection, we can prove the equivalent
+//! projection predicate with B as the right-hand side of the projection.
+//! This goal is computed in both directions, if both are aliases.
+//! * subst-relate: Equate `A` and `B` by their substs, if they're both
+//! aliases with the same def-id.
+//! * bidirectional-normalizes-to: If `A` and `B` are both projections, and both
+//! may apply, then we can compute the "intersection" of both normalizes-to by
+//! performing them together. This is used specifically to resolve ambiguities.
use super::{EvalCtxt, SolverMode};
use rustc_infer::traits::query::NoSolution;
use rustc_middle::traits::solve::{Certainty, Goal, QueryResult};
@@ -65,25 +78,28 @@ impl<'tcx> EvalCtxt<'_, 'tcx> {
direction,
Invert::Yes,
));
- // Relate via substs
- let subst_relate_response = self
- .assemble_subst_relate_candidate(param_env, alias_lhs, alias_rhs, direction);
- candidates.extend(subst_relate_response);
+ // Relate via args
+ candidates.extend(
+ self.assemble_subst_relate_candidate(
+ param_env, alias_lhs, alias_rhs, direction,
+ ),
+ );
debug!(?candidates);
if let Some(merged) = self.try_merge_responses(&candidates) {
Ok(merged)
} else {
- // When relating two aliases and we have ambiguity, we prefer
- // relating the generic arguments of the aliases over normalizing
- // them. This is necessary for inference during typeck.
+ // When relating two aliases and we have ambiguity, if both
+ // aliases can be normalized to something, we prefer
+ // "bidirectionally normalizing" both of them within the same
+ // candidate.
+ //
+ // See <https://github.com/rust-lang/trait-system-refactor-initiative/issues/25>.
//
// As this is incomplete, we must not do so during coherence.
match self.solver_mode() {
SolverMode::Normal => {
- if let Ok(subst_relate_response) = subst_relate_response {
- Ok(subst_relate_response)
- } else if let Ok(bidirectional_normalizes_to_response) = self
+ if let Ok(bidirectional_normalizes_to_response) = self
.assemble_bidirectional_normalizes_to_candidate(
param_env, lhs, rhs, direction,
)
@@ -115,6 +131,8 @@ impl<'tcx> EvalCtxt<'_, 'tcx> {
})
}
+ // Computes the normalizes-to branch, with side-effects. This must be performed
+ // in a probe in order to not taint the evaluation context.
fn normalizes_to_inner(
&mut self,
param_env: ty::ParamEnv<'tcx>,
@@ -124,9 +142,13 @@ impl<'tcx> EvalCtxt<'_, 'tcx> {
invert: Invert,
) -> Result<(), NoSolution> {
let other = match direction {
- // This is purely an optimization.
+ // This is purely an optimization. No need to instantiate a new
+ // infer var and equate the RHS to it.
ty::AliasRelationDirection::Equate => other,
+ // Instantiate an infer var and subtype our RHS to it, so that we
+ // properly represent a subtype relation between the LHS and RHS
+ // of the goal.
ty::AliasRelationDirection::Subtype => {
let fresh = self.next_term_infer_of_kind(other);
let (sub, sup) = match invert {
@@ -140,7 +162,7 @@ impl<'tcx> EvalCtxt<'_, 'tcx> {
self.add_goal(Goal::new(
self.tcx(),
param_env,
- ty::Binder::dummy(ty::ProjectionPredicate { projection_ty: alias, term: other }),
+ ty::ProjectionPredicate { projection_ty: alias, term: other },
));
Ok(())
@@ -153,7 +175,7 @@ impl<'tcx> EvalCtxt<'_, 'tcx> {
alias_rhs: ty::AliasTy<'tcx>,
direction: ty::AliasRelationDirection,
) -> QueryResult<'tcx> {
- self.probe_candidate("substs relate").enter(|ecx| {
+ self.probe_candidate("args relate").enter(|ecx| {
match direction {
ty::AliasRelationDirection::Equate => {
ecx.eq(param_env, alias_lhs, alias_rhs)?;
diff --git a/compiler/rustc_trait_selection/src/solve/assembly/mod.rs b/compiler/rustc_trait_selection/src/solve/assembly/mod.rs
index 28138054a..36194f973 100644
--- a/compiler/rustc_trait_selection/src/solve/assembly/mod.rs
+++ b/compiler/rustc_trait_selection/src/solve/assembly/mod.rs
@@ -1,18 +1,18 @@
//! Code shared by trait and projection goals for candidate assembly.
-use super::search_graph::OverflowHandler;
use super::{EvalCtxt, SolverMode};
use crate::traits::coherence;
-use rustc_data_structures::fx::FxIndexSet;
use rustc_hir::def_id::DefId;
use rustc_infer::traits::query::NoSolution;
-use rustc_infer::traits::util::elaborate;
use rustc_infer::traits::Reveal;
use rustc_middle::traits::solve::inspect::CandidateKind;
-use rustc_middle::traits::solve::{CanonicalResponse, Certainty, Goal, MaybeCause, QueryResult};
-use rustc_middle::ty::fast_reject::TreatProjections;
-use rustc_middle::ty::TypeFoldable;
+use rustc_middle::traits::solve::{CanonicalResponse, Certainty, Goal, QueryResult};
+use rustc_middle::traits::BuiltinImplSource;
+use rustc_middle::ty::fast_reject::{SimplifiedType, TreatParams};
use rustc_middle::ty::{self, Ty, TyCtxt};
+use rustc_middle::ty::{fast_reject, TypeFoldable};
+use rustc_middle::ty::{ToPredicate, TypeVisitableExt};
+use rustc_span::ErrorGuaranteed;
use std::fmt::Debug;
pub(super) mod structural_traits;
@@ -87,16 +87,6 @@ pub(super) enum CandidateSource {
AliasBound,
}
-/// Records additional information about what kind of built-in impl this is.
-/// This should only be used by selection.
-#[derive(Debug, Clone, Copy)]
-pub(super) enum BuiltinImplSource {
- TraitUpcasting,
- Object,
- Misc,
- Ambiguity,
-}
-
/// Methods used to assemble candidates for either trait or projection goals.
pub(super) trait GoalKind<'tcx>:
TypeFoldable<TyCtxt<'tcx>> + Copy + Eq + std::fmt::Display
@@ -109,10 +99,10 @@ pub(super) trait GoalKind<'tcx>:
fn trait_def_id(self, tcx: TyCtxt<'tcx>) -> DefId;
- // Try equating an assumption predicate against a goal's predicate. If it
- // holds, then execute the `then` callback, which should do any additional
- // work, then produce a response (typically by executing
- // [`EvalCtxt::evaluate_added_goals_and_make_canonical_response`]).
+ /// Try equating an assumption predicate against a goal's predicate. If it
+ /// holds, then execute the `then` callback, which should do any additional
+ /// work, then produce a response (typically by executing
+ /// [`EvalCtxt::evaluate_added_goals_and_make_canonical_response`]).
fn probe_and_match_goal_against_assumption(
ecx: &mut EvalCtxt<'_, 'tcx>,
goal: Goal<'tcx, Self>,
@@ -120,9 +110,9 @@ pub(super) trait GoalKind<'tcx>:
then: impl FnOnce(&mut EvalCtxt<'_, 'tcx>) -> QueryResult<'tcx>,
) -> QueryResult<'tcx>;
- // Consider a clause, which consists of a "assumption" and some "requirements",
- // to satisfy a goal. If the requirements hold, then attempt to satisfy our
- // goal by equating it with the assumption.
+ /// Consider a clause, which consists of a "assumption" and some "requirements",
+ /// to satisfy a goal. If the requirements hold, then attempt to satisfy our
+ /// goal by equating it with the assumption.
fn consider_implied_clause(
ecx: &mut EvalCtxt<'_, 'tcx>,
goal: Goal<'tcx, Self>,
@@ -149,9 +139,9 @@ pub(super) trait GoalKind<'tcx>:
})
}
- // Consider a clause specifically for a `dyn Trait` self type. This requires
- // additionally checking all of the supertraits and object bounds to hold,
- // since they're not implied by the well-formedness of the object type.
+ /// Consider a clause specifically for a `dyn Trait` self type. This requires
+ /// additionally checking all of the supertraits and object bounds to hold,
+ /// since they're not implied by the well-formedness of the object type.
fn consider_object_bound_candidate(
ecx: &mut EvalCtxt<'_, 'tcx>,
goal: Goal<'tcx, Self>,
@@ -160,18 +150,14 @@ pub(super) trait GoalKind<'tcx>:
Self::probe_and_match_goal_against_assumption(ecx, goal, assumption, |ecx| {
let tcx = ecx.tcx();
let ty::Dynamic(bounds, _, _) = *goal.predicate.self_ty().kind() else {
- bug!("expected object type in `consider_object_bound_candidate`");
- };
- ecx.add_goals(
- structural_traits::predicates_for_object_candidate(
- &ecx,
- goal.param_env,
- goal.predicate.trait_ref(tcx),
- bounds,
- )
- .into_iter()
- .map(|pred| goal.with(tcx, pred)),
- );
+ bug!("expected object type in `consider_object_bound_candidate`");
+ };
+ ecx.add_goals(structural_traits::predicates_for_object_candidate(
+ &ecx,
+ goal.param_env,
+ goal.predicate.trait_ref(tcx),
+ bounds,
+ ));
ecx.evaluate_added_goals_and_make_canonical_response(Certainty::Yes)
})
}
@@ -182,112 +168,137 @@ pub(super) trait GoalKind<'tcx>:
impl_def_id: DefId,
) -> QueryResult<'tcx>;
- // A type implements an `auto trait` if its components do as well. These components
- // are given by built-in rules from [`instantiate_constituent_tys_for_auto_trait`].
+ /// If the predicate contained an error, we want to avoid emitting unnecessary trait
+ /// errors but still want to emit errors for other trait goals. We have some special
+ /// handling for this case.
+ ///
+ /// Trait goals always hold while projection goals never do. This is a bit arbitrary
+ /// but prevents incorrect normalization while hiding any trait errors.
+ fn consider_error_guaranteed_candidate(
+ ecx: &mut EvalCtxt<'_, 'tcx>,
+ guar: ErrorGuaranteed,
+ ) -> QueryResult<'tcx>;
+
+ /// A type implements an `auto trait` if its components do as well.
+ ///
+ /// These components are given by built-in rules from
+ /// [`structural_traits::instantiate_constituent_tys_for_auto_trait`].
fn consider_auto_trait_candidate(
ecx: &mut EvalCtxt<'_, 'tcx>,
goal: Goal<'tcx, Self>,
) -> QueryResult<'tcx>;
- // A trait alias holds if the RHS traits and `where` clauses hold.
+ /// A trait alias holds if the RHS traits and `where` clauses hold.
fn consider_trait_alias_candidate(
ecx: &mut EvalCtxt<'_, 'tcx>,
goal: Goal<'tcx, Self>,
) -> QueryResult<'tcx>;
- // A type is `Copy` or `Clone` if its components are `Sized`. These components
- // are given by built-in rules from [`instantiate_constituent_tys_for_sized_trait`].
+ /// A type is `Copy` or `Clone` if its components are `Sized`.
+ ///
+ /// These components are given by built-in rules from
+ /// [`structural_traits::instantiate_constituent_tys_for_sized_trait`].
fn consider_builtin_sized_candidate(
ecx: &mut EvalCtxt<'_, 'tcx>,
goal: Goal<'tcx, Self>,
) -> QueryResult<'tcx>;
- // A type is `Copy` or `Clone` if its components are `Copy` or `Clone`. These
- // components are given by built-in rules from [`instantiate_constituent_tys_for_copy_clone_trait`].
+ /// A type is `Copy` or `Clone` if its components are `Copy` or `Clone`.
+ ///
+ /// These components are given by built-in rules from
+ /// [`structural_traits::instantiate_constituent_tys_for_copy_clone_trait`].
fn consider_builtin_copy_clone_candidate(
ecx: &mut EvalCtxt<'_, 'tcx>,
goal: Goal<'tcx, Self>,
) -> QueryResult<'tcx>;
- // A type is `PointerLike` if we can compute its layout, and that layout
- // matches the layout of `usize`.
+ /// A type is `PointerLike` if we can compute its layout, and that layout
+ /// matches the layout of `usize`.
fn consider_builtin_pointer_like_candidate(
ecx: &mut EvalCtxt<'_, 'tcx>,
goal: Goal<'tcx, Self>,
) -> QueryResult<'tcx>;
- // A type is a `FnPtr` if it is of `FnPtr` type.
+ /// A type is a `FnPtr` if it is of `FnPtr` type.
fn consider_builtin_fn_ptr_trait_candidate(
ecx: &mut EvalCtxt<'_, 'tcx>,
goal: Goal<'tcx, Self>,
) -> QueryResult<'tcx>;
- // A callable type (a closure, fn def, or fn ptr) is known to implement the `Fn<A>`
- // family of traits where `A` is given by the signature of the type.
+ /// A callable type (a closure, fn def, or fn ptr) is known to implement the `Fn<A>`
+ /// family of traits where `A` is given by the signature of the type.
fn consider_builtin_fn_trait_candidates(
ecx: &mut EvalCtxt<'_, 'tcx>,
goal: Goal<'tcx, Self>,
kind: ty::ClosureKind,
) -> QueryResult<'tcx>;
- // `Tuple` is implemented if the `Self` type is a tuple.
+ /// `Tuple` is implemented if the `Self` type is a tuple.
fn consider_builtin_tuple_candidate(
ecx: &mut EvalCtxt<'_, 'tcx>,
goal: Goal<'tcx, Self>,
) -> QueryResult<'tcx>;
- // `Pointee` is always implemented.
- //
- // See the projection implementation for the `Metadata` types for all of
- // the built-in types. For structs, the metadata type is given by the struct
- // tail.
+ /// `Pointee` is always implemented.
+ ///
+ /// See the projection implementation for the `Metadata` types for all of
+ /// the built-in types. For structs, the metadata type is given by the struct
+ /// tail.
fn consider_builtin_pointee_candidate(
ecx: &mut EvalCtxt<'_, 'tcx>,
goal: Goal<'tcx, Self>,
) -> QueryResult<'tcx>;
- // A generator (that comes from an `async` desugaring) is known to implement
- // `Future<Output = O>`, where `O` is given by the generator's return type
- // that was computed during type-checking.
+ /// A generator (that comes from an `async` desugaring) is known to implement
+ /// `Future<Output = O>`, where `O` is given by the generator's return type
+ /// that was computed during type-checking.
fn consider_builtin_future_candidate(
ecx: &mut EvalCtxt<'_, 'tcx>,
goal: Goal<'tcx, Self>,
) -> QueryResult<'tcx>;
- // A generator (that doesn't come from an `async` desugaring) is known to
- // implement `Generator<R, Yield = Y, Return = O>`, given the resume, yield,
- // and return types of the generator computed during type-checking.
+ /// A generator (that doesn't come from an `async` desugaring) is known to
+ /// implement `Generator<R, Yield = Y, Return = O>`, given the resume, yield,
+ /// and return types of the generator computed during type-checking.
fn consider_builtin_generator_candidate(
ecx: &mut EvalCtxt<'_, 'tcx>,
goal: Goal<'tcx, Self>,
) -> QueryResult<'tcx>;
- // The most common forms of unsizing are array to slice, and concrete (Sized)
- // type into a `dyn Trait`. ADTs and Tuples can also have their final field
- // unsized if it's generic.
- fn consider_builtin_unsize_candidate(
+ fn consider_builtin_discriminant_kind_candidate(
ecx: &mut EvalCtxt<'_, 'tcx>,
goal: Goal<'tcx, Self>,
) -> QueryResult<'tcx>;
- // `dyn Trait1` can be unsized to `dyn Trait2` if they are the same trait, or
- // if `Trait2` is a (transitive) supertrait of `Trait2`.
- fn consider_builtin_dyn_upcast_candidates(
+ fn consider_builtin_destruct_candidate(
ecx: &mut EvalCtxt<'_, 'tcx>,
goal: Goal<'tcx, Self>,
- ) -> Vec<CanonicalResponse<'tcx>>;
+ ) -> QueryResult<'tcx>;
- fn consider_builtin_discriminant_kind_candidate(
+ fn consider_builtin_transmute_candidate(
ecx: &mut EvalCtxt<'_, 'tcx>,
goal: Goal<'tcx, Self>,
) -> QueryResult<'tcx>;
- fn consider_builtin_destruct_candidate(
+ /// Consider (possibly several) candidates to upcast or unsize a type to another
+ /// type, excluding the coercion of a sized type into a `dyn Trait`.
+ ///
+ /// We return the `BuiltinImplSource` for each candidate as it is needed
+ /// for unsize coercion in hir typeck and because it is difficult to
+ /// otherwise recompute this for codegen. This is a bit of a mess but the
+ /// easiest way to maintain the existing behavior for now.
+ fn consider_structural_builtin_unsize_candidates(
ecx: &mut EvalCtxt<'_, 'tcx>,
goal: Goal<'tcx, Self>,
- ) -> QueryResult<'tcx>;
+ ) -> Vec<(CanonicalResponse<'tcx>, BuiltinImplSource)>;
- fn consider_builtin_transmute_candidate(
+ /// Consider the `Unsize` candidate corresponding to coercing a sized type
+ /// into a `dyn Trait`.
+ ///
+ /// This is computed separately from the rest of the `Unsize` candidates
+ /// since it is only done once per self type, and not once per
+ /// *normalization step* (in `assemble_candidates_via_self_ty`).
+ fn consider_unsize_to_dyn_candidate(
ecx: &mut EvalCtxt<'_, 'tcx>,
goal: Goal<'tcx, Self>,
) -> QueryResult<'tcx>;
@@ -299,35 +310,68 @@ impl<'tcx> EvalCtxt<'_, 'tcx> {
goal: Goal<'tcx, G>,
) -> Vec<Candidate<'tcx>> {
debug_assert_eq!(goal, self.resolve_vars_if_possible(goal));
+ if let Some(ambig) = self.assemble_self_ty_infer_ambiguity_response(goal) {
+ return ambig;
+ }
+
+ let mut candidates = self.assemble_candidates_via_self_ty(goal, 0);
+
+ self.assemble_unsize_to_dyn_candidate(goal, &mut candidates);
+
+ self.assemble_blanket_impl_candidates(goal, &mut candidates);
+
+ self.assemble_param_env_candidates(goal, &mut candidates);
+
+ self.assemble_coherence_unknowable_candidates(goal, &mut candidates);
- // HACK: `_: Trait` is ambiguous, because it may be satisfied via a builtin rule,
- // object bound, alias bound, etc. We are unable to determine this until we can at
- // least structurally resolve the type one layer.
- if goal.predicate.self_ty().is_ty_var() {
- return vec![Candidate {
- source: CandidateSource::BuiltinImpl(BuiltinImplSource::Ambiguity),
+ candidates
+ }
+
+ /// `?0: Trait` is ambiguous, because it may be satisfied via a builtin rule,
+ /// object bound, alias bound, etc. We are unable to determine this until we can at
+ /// least structurally resolve the type one layer.
+ ///
+ /// It would also require us to consider all impls of the trait, which is both pretty
+ /// bad for perf and would also constrain the self type if there is just a single impl.
+ fn assemble_self_ty_infer_ambiguity_response<G: GoalKind<'tcx>>(
+ &mut self,
+ goal: Goal<'tcx, G>,
+ ) -> Option<Vec<Candidate<'tcx>>> {
+ goal.predicate.self_ty().is_ty_var().then(|| {
+ vec![Candidate {
+ source: CandidateSource::BuiltinImpl(BuiltinImplSource::Misc),
result: self
.evaluate_added_goals_and_make_canonical_response(Certainty::AMBIGUOUS)
.unwrap(),
- }];
+ }]
+ })
+ }
+
+ /// Assemble candidates which apply to the self type. This only looks at candidate which
+ /// apply to the specific self type and ignores all others.
+ ///
+ /// Returns `None` if the self type is still ambiguous.
+ fn assemble_candidates_via_self_ty<G: GoalKind<'tcx>>(
+ &mut self,
+ goal: Goal<'tcx, G>,
+ num_steps: usize,
+ ) -> Vec<Candidate<'tcx>> {
+ debug_assert_eq!(goal, self.resolve_vars_if_possible(goal));
+ if let Some(ambig) = self.assemble_self_ty_infer_ambiguity_response(goal) {
+ return ambig;
}
let mut candidates = Vec::new();
- self.assemble_candidates_after_normalizing_self_ty(goal, &mut candidates);
-
- self.assemble_impl_candidates(goal, &mut candidates);
+ self.assemble_non_blanket_impl_candidates(goal, &mut candidates);
self.assemble_builtin_impl_candidates(goal, &mut candidates);
- self.assemble_param_env_candidates(goal, &mut candidates);
-
self.assemble_alias_bound_candidates(goal, &mut candidates);
self.assemble_object_bound_candidates(goal, &mut candidates);
- self.assemble_coherence_unknowable_candidates(goal, &mut candidates);
-
+ self.assemble_candidates_after_normalizing_self_ty(goal, &mut candidates, num_steps);
candidates
}
@@ -350,70 +394,179 @@ impl<'tcx> EvalCtxt<'_, 'tcx> {
&mut self,
goal: Goal<'tcx, G>,
candidates: &mut Vec<Candidate<'tcx>>,
+ num_steps: usize,
+ ) {
+ let tcx = self.tcx();
+ let &ty::Alias(_, projection_ty) = goal.predicate.self_ty().kind() else { return };
+
+ candidates.extend(self.probe(|_| CandidateKind::NormalizedSelfTyAssembly).enter(|ecx| {
+ if num_steps < ecx.local_overflow_limit() {
+ let normalized_ty = ecx.next_ty_infer();
+ let normalizes_to_goal = goal.with(
+ tcx,
+ ty::ProjectionPredicate { projection_ty, term: normalized_ty.into() },
+ );
+ ecx.add_goal(normalizes_to_goal);
+ if let Err(NoSolution) = ecx.try_evaluate_added_goals() {
+ debug!("self type normalization failed");
+ return vec![];
+ }
+ let normalized_ty = ecx.resolve_vars_if_possible(normalized_ty);
+ debug!(?normalized_ty, "self type normalized");
+ // NOTE: Alternatively we could call `evaluate_goal` here and only
+ // have a `Normalized` candidate. This doesn't work as long as we
+ // use `CandidateSource` in winnowing.
+ let goal = goal.with(tcx, goal.predicate.with_self_ty(tcx, normalized_ty));
+ ecx.assemble_candidates_via_self_ty(goal, num_steps + 1)
+ } else {
+ match ecx.evaluate_added_goals_and_make_canonical_response(Certainty::OVERFLOW) {
+ Ok(result) => vec![Candidate {
+ source: CandidateSource::BuiltinImpl(BuiltinImplSource::Misc),
+ result,
+ }],
+ Err(NoSolution) => vec![],
+ }
+ }
+ }));
+ }
+
+ #[instrument(level = "debug", skip_all)]
+ fn assemble_non_blanket_impl_candidates<G: GoalKind<'tcx>>(
+ &mut self,
+ goal: Goal<'tcx, G>,
+ candidates: &mut Vec<Candidate<'tcx>>,
) {
let tcx = self.tcx();
- let &ty::Alias(_, projection_ty) = goal.predicate.self_ty().kind() else {
- return
+ let self_ty = goal.predicate.self_ty();
+ let trait_impls = tcx.trait_impls_of(goal.predicate.trait_def_id(tcx));
+ let mut consider_impls_for_simplified_type = |simp| {
+ if let Some(impls_for_type) = trait_impls.non_blanket_impls().get(&simp) {
+ for &impl_def_id in impls_for_type {
+ match G::consider_impl_candidate(self, goal, impl_def_id) {
+ Ok(result) => candidates
+ .push(Candidate { source: CandidateSource::Impl(impl_def_id), result }),
+ Err(NoSolution) => (),
+ }
+ }
+ }
};
- let normalized_self_candidates: Result<_, NoSolution> =
- self.probe(|_| CandidateKind::NormalizedSelfTyAssembly).enter(|ecx| {
- ecx.with_incremented_depth(
- |ecx| {
- let result = ecx.evaluate_added_goals_and_make_canonical_response(
- Certainty::Maybe(MaybeCause::Overflow),
- )?;
- Ok(vec![Candidate {
- source: CandidateSource::BuiltinImpl(BuiltinImplSource::Ambiguity),
- result,
- }])
- },
- |ecx| {
- let normalized_ty = ecx.next_ty_infer();
- let normalizes_to_goal = goal.with(
- tcx,
- ty::Binder::dummy(ty::ProjectionPredicate {
- projection_ty,
- term: normalized_ty.into(),
- }),
- );
- ecx.add_goal(normalizes_to_goal);
- let _ = ecx.try_evaluate_added_goals().inspect_err(|_| {
- debug!("self type normalization failed");
- })?;
- let normalized_ty = ecx.resolve_vars_if_possible(normalized_ty);
- debug!(?normalized_ty, "self type normalized");
- // NOTE: Alternatively we could call `evaluate_goal` here and only
- // have a `Normalized` candidate. This doesn't work as long as we
- // use `CandidateSource` in winnowing.
- let goal = goal.with(tcx, goal.predicate.with_self_ty(tcx, normalized_ty));
- Ok(ecx.assemble_and_evaluate_candidates(goal))
- },
- )
- });
+ match self_ty.kind() {
+ ty::Bool
+ | ty::Char
+ | ty::Int(_)
+ | ty::Uint(_)
+ | ty::Float(_)
+ | ty::Adt(_, _)
+ | ty::Foreign(_)
+ | ty::Str
+ | ty::Array(_, _)
+ | ty::Slice(_)
+ | ty::RawPtr(_)
+ | ty::Ref(_, _, _)
+ | ty::FnDef(_, _)
+ | ty::FnPtr(_)
+ | ty::Dynamic(_, _, _)
+ | ty::Closure(_, _)
+ | ty::Generator(_, _, _)
+ | ty::Never
+ | ty::Tuple(_) => {
+ let simp =
+ fast_reject::simplify_type(tcx, self_ty, TreatParams::ForLookup).unwrap();
+ consider_impls_for_simplified_type(simp);
+ }
+
+ // HACK: For integer and float variables we have to manually look at all impls
+ // which have some integer or float as a self type.
+ ty::Infer(ty::IntVar(_)) => {
+ use ty::IntTy::*;
+ use ty::UintTy::*;
+ // This causes a compiler error if any new integer kinds are added.
+ let (I8 | I16 | I32 | I64 | I128 | Isize): ty::IntTy;
+ let (U8 | U16 | U32 | U64 | U128 | Usize): ty::UintTy;
+ let possible_integers = [
+ // signed integers
+ SimplifiedType::Int(I8),
+ SimplifiedType::Int(I16),
+ SimplifiedType::Int(I32),
+ SimplifiedType::Int(I64),
+ SimplifiedType::Int(I128),
+ SimplifiedType::Int(Isize),
+ // unsigned integers
+ SimplifiedType::Uint(U8),
+ SimplifiedType::Uint(U16),
+ SimplifiedType::Uint(U32),
+ SimplifiedType::Uint(U64),
+ SimplifiedType::Uint(U128),
+ SimplifiedType::Uint(Usize),
+ ];
+ for simp in possible_integers {
+ consider_impls_for_simplified_type(simp);
+ }
+ }
+
+ ty::Infer(ty::FloatVar(_)) => {
+ // This causes a compiler error if any new float kinds are added.
+ let (ty::FloatTy::F32 | ty::FloatTy::F64);
+ let possible_floats = [
+ SimplifiedType::Float(ty::FloatTy::F32),
+ SimplifiedType::Float(ty::FloatTy::F64),
+ ];
+
+ for simp in possible_floats {
+ consider_impls_for_simplified_type(simp);
+ }
+ }
+
+ // The only traits applying to aliases and placeholders are blanket impls.
+ //
+ // Impls which apply to an alias after normalization are handled by
+ // `assemble_candidates_after_normalizing_self_ty`.
+ ty::Alias(_, _) | ty::Placeholder(..) | ty::Error(_) => (),
+
+ // FIXME: These should ideally not exist as a self type. It would be nice for
+ // the builtin auto trait impls of generators to instead directly recurse
+ // into the witness.
+ ty::GeneratorWitness(_) | ty::GeneratorWitnessMIR(_, _) => (),
+
+ // These variants should not exist as a self type.
+ ty::Infer(ty::TyVar(_) | ty::FreshTy(_) | ty::FreshIntTy(_) | ty::FreshFloatTy(_))
+ | ty::Param(_)
+ | ty::Bound(_, _) => bug!("unexpected self type: {self_ty}"),
+ }
+ }
- if let Ok(normalized_self_candidates) = normalized_self_candidates {
- candidates.extend(normalized_self_candidates);
+ fn assemble_unsize_to_dyn_candidate<G: GoalKind<'tcx>>(
+ &mut self,
+ goal: Goal<'tcx, G>,
+ candidates: &mut Vec<Candidate<'tcx>>,
+ ) {
+ let tcx = self.tcx();
+ if tcx.lang_items().unsize_trait() == Some(goal.predicate.trait_def_id(tcx)) {
+ match G::consider_unsize_to_dyn_candidate(self, goal) {
+ Ok(result) => candidates.push(Candidate {
+ source: CandidateSource::BuiltinImpl(BuiltinImplSource::Misc),
+ result,
+ }),
+ Err(NoSolution) => (),
+ }
}
}
- #[instrument(level = "debug", skip_all)]
- fn assemble_impl_candidates<G: GoalKind<'tcx>>(
+ fn assemble_blanket_impl_candidates<G: GoalKind<'tcx>>(
&mut self,
goal: Goal<'tcx, G>,
candidates: &mut Vec<Candidate<'tcx>>,
) {
let tcx = self.tcx();
- tcx.for_each_relevant_impl_treating_projections(
- goal.predicate.trait_def_id(tcx),
- goal.predicate.self_ty(),
- TreatProjections::NextSolverLookup,
- |impl_def_id| match G::consider_impl_candidate(self, goal, impl_def_id) {
+ let trait_impls = tcx.trait_impls_of(goal.predicate.trait_def_id(tcx));
+ for &impl_def_id in trait_impls.blanket_impls() {
+ match G::consider_impl_candidate(self, goal, impl_def_id) {
Ok(result) => candidates
.push(Candidate { source: CandidateSource::Impl(impl_def_id), result }),
Err(NoSolution) => (),
- },
- );
+ }
+ }
}
#[instrument(level = "debug", skip_all)]
@@ -422,8 +575,9 @@ impl<'tcx> EvalCtxt<'_, 'tcx> {
goal: Goal<'tcx, G>,
candidates: &mut Vec<Candidate<'tcx>>,
) {
- let lang_items = self.tcx().lang_items();
- let trait_def_id = goal.predicate.trait_def_id(self.tcx());
+ let tcx = self.tcx();
+ let lang_items = tcx.lang_items();
+ let trait_def_id = goal.predicate.trait_def_id(tcx);
// N.B. When assembling built-in candidates for lang items that are also
// `auto` traits, then the auto trait candidate that is assembled in
@@ -432,9 +586,11 @@ impl<'tcx> EvalCtxt<'_, 'tcx> {
// Instead of adding the logic here, it's a better idea to add it in
// `EvalCtxt::disqualify_auto_trait_candidate_due_to_possible_impl` in
// `solve::trait_goals` instead.
- let result = if self.tcx().trait_is_auto(trait_def_id) {
+ let result = if let Err(guar) = goal.predicate.error_reported() {
+ G::consider_error_guaranteed_candidate(self, guar)
+ } else if tcx.trait_is_auto(trait_def_id) {
G::consider_auto_trait_candidate(self, goal)
- } else if self.tcx().trait_is_alias(trait_def_id) {
+ } else if tcx.trait_is_alias(trait_def_id) {
G::consider_trait_alias_candidate(self, goal)
} else if lang_items.sized_trait() == Some(trait_def_id) {
G::consider_builtin_sized_candidate(self, goal)
@@ -456,8 +612,6 @@ impl<'tcx> EvalCtxt<'_, 'tcx> {
G::consider_builtin_future_candidate(self, goal)
} else if lang_items.gen_trait() == Some(trait_def_id) {
G::consider_builtin_generator_candidate(self, goal)
- } else if lang_items.unsize_trait() == Some(trait_def_id) {
- G::consider_builtin_unsize_candidate(self, goal)
} else if lang_items.discriminant_kind_trait() == Some(trait_def_id) {
G::consider_builtin_discriminant_kind_candidate(self, goal)
} else if lang_items.destruct_trait() == Some(trait_def_id) {
@@ -479,11 +633,8 @@ impl<'tcx> EvalCtxt<'_, 'tcx> {
// There may be multiple unsize candidates for a trait with several supertraits:
// `trait Foo: Bar<A> + Bar<B>` and `dyn Foo: Unsize<dyn Bar<_>>`
if lang_items.unsize_trait() == Some(trait_def_id) {
- for result in G::consider_builtin_dyn_upcast_candidates(self, goal) {
- candidates.push(Candidate {
- source: CandidateSource::BuiltinImpl(BuiltinImplSource::TraitUpcasting),
- result,
- });
+ for (result, source) in G::consider_structural_builtin_unsize_candidates(self, goal) {
+ candidates.push(Candidate { source: CandidateSource::BuiltinImpl(source), result });
}
}
}
@@ -544,7 +695,8 @@ impl<'tcx> EvalCtxt<'_, 'tcx> {
ty::Alias(ty::Projection | ty::Opaque, alias_ty) => alias_ty,
};
- for assumption in self.tcx().item_bounds(alias_ty.def_id).subst(self.tcx(), alias_ty.substs)
+ for assumption in
+ self.tcx().item_bounds(alias_ty.def_id).instantiate(self.tcx(), alias_ty.args)
{
match G::consider_alias_bound_candidate(self, goal, assumption) {
Ok(result) => {
@@ -584,7 +736,6 @@ impl<'tcx> EvalCtxt<'_, 'tcx> {
self.tcx(),
ty::TraitPredicate {
trait_ref: self_trait_ref,
- constness: ty::BoundConstness::NotConst,
polarity: ty::ImplPolarity::Positive,
},
);
@@ -698,30 +849,53 @@ impl<'tcx> EvalCtxt<'_, 'tcx> {
ty::Dynamic(bounds, ..) => bounds,
};
- let own_bounds: FxIndexSet<_> =
- bounds.iter().map(|bound| bound.with_self_ty(tcx, self_ty)).collect();
- for assumption in elaborate(tcx, own_bounds.iter().copied())
- // we only care about bounds that match the `Self` type
- .filter_only_self()
- {
- // FIXME: Predicates are fully elaborated in the object type's existential bounds
- // list. We want to only consider these pre-elaborated projections, and not other
- // projection predicates that we reach by elaborating the principal trait ref,
- // since that'll cause ambiguity.
- //
- // We can remove this when we have implemented lifetime intersections in responses.
- if assumption.as_projection_clause().is_some() && !own_bounds.contains(&assumption) {
- continue;
- }
+ // Do not consider built-in object impls for non-object-safe types.
+ if bounds.principal_def_id().is_some_and(|def_id| !tcx.check_is_object_safe(def_id)) {
+ return;
+ }
- match G::consider_object_bound_candidate(self, goal, assumption) {
- Ok(result) => candidates.push(Candidate {
- source: CandidateSource::BuiltinImpl(BuiltinImplSource::Object),
- result,
- }),
- Err(NoSolution) => (),
+ // Consider all of the auto-trait and projection bounds, which don't
+ // need to be recorded as a `BuiltinImplSource::Object` since they don't
+ // really have a vtable base...
+ for bound in bounds {
+ match bound.skip_binder() {
+ ty::ExistentialPredicate::Trait(_) => {
+ // Skip principal
+ }
+ ty::ExistentialPredicate::Projection(_)
+ | ty::ExistentialPredicate::AutoTrait(_) => {
+ match G::consider_object_bound_candidate(
+ self,
+ goal,
+ bound.with_self_ty(tcx, self_ty),
+ ) {
+ Ok(result) => candidates.push(Candidate {
+ source: CandidateSource::BuiltinImpl(BuiltinImplSource::Misc),
+ result,
+ }),
+ Err(NoSolution) => (),
+ }
+ }
}
}
+
+ // FIXME: We only need to do *any* of this if we're considering a trait goal,
+ // since we don't need to look at any supertrait or anything if we are doing
+ // a projection goal.
+ if let Some(principal) = bounds.principal() {
+ let principal_trait_ref = principal.with_self_ty(tcx, self_ty);
+ self.walk_vtable(principal_trait_ref, |ecx, assumption, vtable_base, _| {
+ match G::consider_object_bound_candidate(ecx, goal, assumption.to_predicate(tcx)) {
+ Ok(result) => candidates.push(Candidate {
+ source: CandidateSource::BuiltinImpl(BuiltinImplSource::Object {
+ vtable_base,
+ }),
+ result,
+ }),
+ Err(NoSolution) => (),
+ }
+ });
+ }
}
#[instrument(level = "debug", skip_all)]
@@ -730,26 +904,43 @@ impl<'tcx> EvalCtxt<'_, 'tcx> {
goal: Goal<'tcx, G>,
candidates: &mut Vec<Candidate<'tcx>>,
) {
+ let tcx = self.tcx();
match self.solver_mode() {
SolverMode::Normal => return,
- SolverMode::Coherence => {
- let trait_ref = goal.predicate.trait_ref(self.tcx());
- match coherence::trait_ref_is_knowable(self.tcx(), trait_ref) {
- Ok(()) => {}
- Err(_) => match self
- .evaluate_added_goals_and_make_canonical_response(Certainty::AMBIGUOUS)
- {
- Ok(result) => candidates.push(Candidate {
- source: CandidateSource::BuiltinImpl(BuiltinImplSource::Ambiguity),
- result,
- }),
- // FIXME: This will be reachable at some point if we're in
- // `assemble_candidates_after_normalizing_self_ty` and we get a
- // universe error. We'll deal with it at this point.
- Err(NoSolution) => bug!("coherence candidate resulted in NoSolution"),
- },
+ SolverMode::Coherence => {}
+ };
+
+ let result = self.probe_candidate("coherence unknowable").enter(|ecx| {
+ let trait_ref = goal.predicate.trait_ref(tcx);
+
+ #[derive(Debug)]
+ enum FailureKind {
+ Overflow,
+ NoSolution(NoSolution),
+ }
+ let lazily_normalize_ty = |ty| match ecx.try_normalize_ty(goal.param_env, ty) {
+ Ok(Some(ty)) => Ok(ty),
+ Ok(None) => Err(FailureKind::Overflow),
+ Err(e) => Err(FailureKind::NoSolution(e)),
+ };
+
+ match coherence::trait_ref_is_knowable(tcx, trait_ref, lazily_normalize_ty) {
+ Err(FailureKind::Overflow) => {
+ ecx.evaluate_added_goals_and_make_canonical_response(Certainty::OVERFLOW)
+ }
+ Err(FailureKind::NoSolution(NoSolution)) | Ok(Ok(())) => Err(NoSolution),
+ Ok(Err(_)) => {
+ ecx.evaluate_added_goals_and_make_canonical_response(Certainty::AMBIGUOUS)
}
}
+ });
+
+ match result {
+ Ok(result) => candidates.push(Candidate {
+ source: CandidateSource::BuiltinImpl(BuiltinImplSource::Misc),
+ result,
+ }),
+ Err(NoSolution) => {}
}
}
diff --git a/compiler/rustc_trait_selection/src/solve/assembly/structural_traits.rs b/compiler/rustc_trait_selection/src/solve/assembly/structural_traits.rs
index 3bb8cad15..c47767101 100644
--- a/compiler/rustc_trait_selection/src/solve/assembly/structural_traits.rs
+++ b/compiler/rustc_trait_selection/src/solve/assembly/structural_traits.rs
@@ -1,6 +1,9 @@
+//! Code which is used by built-in goals that match "structurally", such a auto
+//! traits, `Copy`/`Clone`.
use rustc_data_structures::fx::FxHashMap;
use rustc_hir::{def_id::DefId, Movability, Mutability};
use rustc_infer::traits::query::NoSolution;
+use rustc_middle::traits::solve::Goal;
use rustc_middle::ty::{
self, Ty, TyCtxt, TypeFoldable, TypeFolder, TypeSuperFoldable, TypeVisitableExt,
};
@@ -51,36 +54,36 @@ pub(in crate::solve) fn instantiate_constituent_tys_for_auto_trait<'tcx>(
Ok(tys.iter().collect())
}
- ty::Closure(_, ref substs) => Ok(vec![substs.as_closure().tupled_upvars_ty()]),
+ ty::Closure(_, ref args) => Ok(vec![args.as_closure().tupled_upvars_ty()]),
- ty::Generator(_, ref substs, _) => {
- let generator_substs = substs.as_generator();
- Ok(vec![generator_substs.tupled_upvars_ty(), generator_substs.witness()])
+ ty::Generator(_, ref args, _) => {
+ let generator_args = args.as_generator();
+ Ok(vec![generator_args.tupled_upvars_ty(), generator_args.witness()])
}
ty::GeneratorWitness(types) => Ok(ecx.instantiate_binder_with_placeholders(types).to_vec()),
- ty::GeneratorWitnessMIR(def_id, substs) => Ok(ecx
+ ty::GeneratorWitnessMIR(def_id, args) => Ok(ecx
.tcx()
.generator_hidden_types(def_id)
.map(|bty| {
ecx.instantiate_binder_with_placeholders(replace_erased_lifetimes_with_bound_vars(
tcx,
- bty.subst(tcx, substs),
+ bty.instantiate(tcx, args),
))
})
.collect()),
// For `PhantomData<T>`, we pass `T`.
- ty::Adt(def, substs) if def.is_phantom_data() => Ok(vec![substs.type_at(0)]),
+ ty::Adt(def, args) if def.is_phantom_data() => Ok(vec![args.type_at(0)]),
- ty::Adt(def, substs) => Ok(def.all_fields().map(|f| f.ty(tcx, substs)).collect()),
+ ty::Adt(def, args) => Ok(def.all_fields().map(|f| f.ty(tcx, args)).collect()),
- ty::Alias(ty::Opaque, ty::AliasTy { def_id, substs, .. }) => {
+ ty::Alias(ty::Opaque, ty::AliasTy { def_id, args, .. }) => {
// We can resolve the `impl Trait` to its concrete type,
// which enforces a DAG between the functions requiring
// the auto trait bounds in question.
- Ok(vec![tcx.type_of(def_id).subst(tcx, substs)])
+ Ok(vec![tcx.type_of(def_id).instantiate(tcx, args)])
}
}
}
@@ -146,9 +149,9 @@ pub(in crate::solve) fn instantiate_constituent_tys_for_sized_trait<'tcx>(
ty::Tuple(tys) => Ok(tys.to_vec()),
- ty::Adt(def, substs) => {
+ ty::Adt(def, args) => {
let sized_crit = def.sized_constraint(ecx.tcx());
- Ok(sized_crit.subst_iter_copied(ecx.tcx(), substs).collect())
+ Ok(sized_crit.iter_instantiated(ecx.tcx(), args).collect())
}
}
}
@@ -158,14 +161,12 @@ pub(in crate::solve) fn instantiate_constituent_tys_for_copy_clone_trait<'tcx>(
ty: Ty<'tcx>,
) -> Result<Vec<Ty<'tcx>>, NoSolution> {
match *ty.kind() {
- ty::Infer(ty::IntVar(_) | ty::FloatVar(_))
- | ty::FnDef(..)
- | ty::FnPtr(_)
- | ty::Error(_) => Ok(vec![]),
+ ty::FnDef(..) | ty::FnPtr(_) | ty::Error(_) => Ok(vec![]),
// Implementations are provided in core
ty::Uint(_)
| ty::Int(_)
+ | ty::Infer(ty::IntVar(_) | ty::FloatVar(_))
| ty::Bool
| ty::Float(_)
| ty::Char
@@ -192,11 +193,11 @@ pub(in crate::solve) fn instantiate_constituent_tys_for_copy_clone_trait<'tcx>(
ty::Tuple(tys) => Ok(tys.to_vec()),
- ty::Closure(_, substs) => Ok(vec![substs.as_closure().tupled_upvars_ty()]),
+ ty::Closure(_, args) => Ok(vec![args.as_closure().tupled_upvars_ty()]),
- ty::Generator(_, substs, Movability::Movable) => {
+ ty::Generator(_, args, Movability::Movable) => {
if ecx.tcx().features().generator_clone {
- let generator = substs.as_generator();
+ let generator = args.as_generator();
Ok(vec![generator.tupled_upvars_ty(), generator.witness()])
} else {
Err(NoSolution)
@@ -205,13 +206,13 @@ pub(in crate::solve) fn instantiate_constituent_tys_for_copy_clone_trait<'tcx>(
ty::GeneratorWitness(types) => Ok(ecx.instantiate_binder_with_placeholders(types).to_vec()),
- ty::GeneratorWitnessMIR(def_id, substs) => Ok(ecx
+ ty::GeneratorWitnessMIR(def_id, args) => Ok(ecx
.tcx()
.generator_hidden_types(def_id)
.map(|bty| {
ecx.instantiate_binder_with_placeholders(replace_erased_lifetimes_with_bound_vars(
ecx.tcx(),
- bty.subst(ecx.tcx(), substs),
+ bty.instantiate(ecx.tcx(), args),
))
})
.collect()),
@@ -226,13 +227,13 @@ pub(in crate::solve) fn extract_tupled_inputs_and_output_from_callable<'tcx>(
) -> Result<Option<ty::Binder<'tcx, (Ty<'tcx>, Ty<'tcx>)>>, NoSolution> {
match *self_ty.kind() {
// keep this in sync with assemble_fn_pointer_candidates until the old solver is removed.
- ty::FnDef(def_id, substs) => {
+ ty::FnDef(def_id, args) => {
let sig = tcx.fn_sig(def_id);
if sig.skip_binder().is_fn_trait_compatible()
&& tcx.codegen_fn_attrs(def_id).target_features.is_empty()
{
Ok(Some(
- sig.subst(tcx, substs)
+ sig.instantiate(tcx, args)
.map_bound(|sig| (Ty::new_tup(tcx, sig.inputs()), sig.output())),
))
} else {
@@ -247,9 +248,9 @@ pub(in crate::solve) fn extract_tupled_inputs_and_output_from_callable<'tcx>(
Err(NoSolution)
}
}
- ty::Closure(_, substs) => {
- let closure_substs = substs.as_closure();
- match closure_substs.kind_ty().to_opt_closure_kind() {
+ ty::Closure(_, args) => {
+ let closure_args = args.as_closure();
+ match closure_args.kind_ty().to_opt_closure_kind() {
// If the closure's kind doesn't extend the goal kind,
// then the closure doesn't implement the trait.
Some(closure_kind) => {
@@ -265,7 +266,7 @@ pub(in crate::solve) fn extract_tupled_inputs_and_output_from_callable<'tcx>(
}
}
}
- Ok(Some(closure_substs.sig().map_bound(|sig| (sig.inputs()[0], sig.output()))))
+ Ok(Some(closure_args.sig().map_bound(|sig| (sig.inputs()[0], sig.output()))))
}
ty::Bool
| ty::Char
@@ -343,17 +344,18 @@ pub(in crate::solve) fn predicates_for_object_candidate<'tcx>(
param_env: ty::ParamEnv<'tcx>,
trait_ref: ty::TraitRef<'tcx>,
object_bound: &'tcx ty::List<ty::PolyExistentialPredicate<'tcx>>,
-) -> Vec<ty::Clause<'tcx>> {
+) -> Vec<Goal<'tcx, ty::Predicate<'tcx>>> {
let tcx = ecx.tcx();
let mut requirements = vec![];
requirements.extend(
- tcx.super_predicates_of(trait_ref.def_id).instantiate(tcx, trait_ref.substs).predicates,
+ tcx.super_predicates_of(trait_ref.def_id).instantiate(tcx, trait_ref.args).predicates,
);
for item in tcx.associated_items(trait_ref.def_id).in_definition_order() {
// FIXME(associated_const_equality): Also add associated consts to
// the requirements here.
if item.kind == ty::AssocKind::Type {
- requirements.extend(tcx.item_bounds(item.def_id).subst_iter(tcx, trait_ref.substs));
+ requirements
+ .extend(tcx.item_bounds(item.def_id).iter_instantiated(tcx, trait_ref.args));
}
}
@@ -373,17 +375,22 @@ pub(in crate::solve) fn predicates_for_object_candidate<'tcx>(
}
}
- requirements.fold_with(&mut ReplaceProjectionWith {
- ecx,
- param_env,
- mapping: replace_projection_with,
- })
+ let mut folder =
+ ReplaceProjectionWith { ecx, param_env, mapping: replace_projection_with, nested: vec![] };
+ let folded_requirements = requirements.fold_with(&mut folder);
+
+ folder
+ .nested
+ .into_iter()
+ .chain(folded_requirements.into_iter().map(|clause| Goal::new(tcx, param_env, clause)))
+ .collect()
}
struct ReplaceProjectionWith<'a, 'tcx> {
ecx: &'a EvalCtxt<'a, 'tcx>,
param_env: ty::ParamEnv<'tcx>,
mapping: FxHashMap<DefId, ty::PolyProjectionPredicate<'tcx>>,
+ nested: Vec<Goal<'tcx, ty::Predicate<'tcx>>>,
}
impl<'tcx> TypeFolder<TyCtxt<'tcx>> for ReplaceProjectionWith<'_, 'tcx> {
@@ -399,13 +406,12 @@ impl<'tcx> TypeFolder<TyCtxt<'tcx>> for ReplaceProjectionWith<'_, 'tcx> {
// but the where clauses we instantiated are not. We can solve this by instantiating
// the binder at the usage site.
let proj = self.ecx.instantiate_binder_with_infer(*replacement);
- // FIXME: Technically this folder could be fallible?
- let nested = self
- .ecx
- .eq_and_get_goals(self.param_env, alias_ty, proj.projection_ty)
- .expect("expected to be able to unify goal projection with dyn's projection");
- // FIXME: Technically we could register these too..
- assert!(nested.is_empty(), "did not expect unification to have any nested goals");
+ // FIXME: Technically this equate could be fallible...
+ self.nested.extend(
+ self.ecx
+ .eq_and_get_goals(self.param_env, alias_ty, proj.projection_ty)
+ .expect("expected to be able to unify goal projection with dyn's projection"),
+ );
proj.term.ty().unwrap()
} else {
ty.super_fold_with(self)
diff --git a/compiler/rustc_trait_selection/src/solve/canonicalize.rs b/compiler/rustc_trait_selection/src/solve/canonicalize.rs
index 255620489..a9d182abf 100644
--- a/compiler/rustc_trait_selection/src/solve/canonicalize.rs
+++ b/compiler/rustc_trait_selection/src/solve/canonicalize.rs
@@ -125,9 +125,8 @@ impl<'a, 'tcx> Canonicalizer<'a, 'tcx> {
// - var_infos: [E0, U1, E1, U1, E1, E6, U6], curr_compressed_uv: 1, next_orig_uv: 6
// - var_infos: [E0, U1, E1, U1, E1, E2, U2], curr_compressed_uv: 2, next_orig_uv: -
//
- // This algorithm runs in `O(nm)` where `n` is the number of different universe
- // indices in the input and `m` is the number of canonical variables.
- // This should be fine as both `n` and `m` are expected to be small.
+ // This algorithm runs in `O(n²)` where `n` is the number of different universe
+ // indices in the input. This should be fine as `n` is expected to be small.
let mut curr_compressed_uv = ty::UniverseIndex::ROOT;
let mut existential_in_new_uv = false;
let mut next_orig_uv = Some(ty::UniverseIndex::ROOT);
@@ -208,23 +207,18 @@ impl<'tcx> TypeFolder<TyCtxt<'tcx>> for Canonicalizer<'_, 'tcx> {
t
}
- fn fold_region(&mut self, mut r: ty::Region<'tcx>) -> ty::Region<'tcx> {
- match self.canonicalize_mode {
- CanonicalizeMode::Input => {
- // Don't resolve infer vars in input, since it affects
- // caching and may cause trait selection bugs which rely
- // on regions to be equal.
- }
- CanonicalizeMode::Response { .. } => {
- if let ty::ReVar(vid) = *r {
- r = self
- .infcx
- .inner
- .borrow_mut()
- .unwrap_region_constraints()
- .opportunistic_resolve_var(self.infcx.tcx, vid);
- }
- }
+ fn fold_region(&mut self, r: ty::Region<'tcx>) -> ty::Region<'tcx> {
+ if let ty::ReVar(vid) = *r {
+ let resolved_region = self
+ .infcx
+ .inner
+ .borrow_mut()
+ .unwrap_region_constraints()
+ .opportunistic_resolve_var(self.infcx.tcx, vid);
+ assert_eq!(
+ r, resolved_region,
+ "region var should have been resolved, {r} -> {resolved_region}"
+ );
}
let kind = match *r {
@@ -263,50 +257,38 @@ impl<'tcx> TypeFolder<TyCtxt<'tcx>> for Canonicalizer<'_, 'tcx> {
ty::ReError(_) => return r,
};
- let var = ty::BoundVar::from(
- self.variables.iter().position(|&v| v == r.into()).unwrap_or_else(|| {
- let var = self.variables.len();
- self.variables.push(r.into());
- self.primitive_var_infos.push(CanonicalVarInfo { kind });
- var
- }),
- );
+ let existing_bound_var = match self.canonicalize_mode {
+ CanonicalizeMode::Input => None,
+ CanonicalizeMode::Response { .. } => {
+ self.variables.iter().position(|&v| v == r.into()).map(ty::BoundVar::from)
+ }
+ };
+ let var = existing_bound_var.unwrap_or_else(|| {
+ let var = ty::BoundVar::from(self.variables.len());
+ self.variables.push(r.into());
+ self.primitive_var_infos.push(CanonicalVarInfo { kind });
+ var
+ });
let br = ty::BoundRegion { var, kind: BrAnon(None) };
ty::Region::new_late_bound(self.interner(), self.binder_index, br)
}
- fn fold_ty(&mut self, mut t: Ty<'tcx>) -> Ty<'tcx> {
+ fn fold_ty(&mut self, t: Ty<'tcx>) -> Ty<'tcx> {
let kind = match *t.kind() {
- ty::Infer(ty::TyVar(mut vid)) => {
- // We need to canonicalize the *root* of our ty var.
- // This is so that our canonical response correctly reflects
- // any equated inference vars correctly!
- let root_vid = self.infcx.root_var(vid);
- if root_vid != vid {
- t = Ty::new_var(self.infcx.tcx, root_vid);
- vid = root_vid;
- }
-
- match self.infcx.probe_ty_var(vid) {
- Ok(t) => return self.fold_ty(t),
- Err(ui) => CanonicalVarKind::Ty(CanonicalTyVarKind::General(ui)),
- }
+ ty::Infer(ty::TyVar(vid)) => {
+ assert_eq!(self.infcx.root_var(vid), vid, "ty vid should have been resolved");
+ let Err(ui) = self.infcx.probe_ty_var(vid) else {
+ bug!("ty var should have been resolved: {t}");
+ };
+ CanonicalVarKind::Ty(CanonicalTyVarKind::General(ui))
}
ty::Infer(ty::IntVar(vid)) => {
- let nt = self.infcx.opportunistic_resolve_int_var(vid);
- if nt != t {
- return self.fold_ty(nt);
- } else {
- CanonicalVarKind::Ty(CanonicalTyVarKind::Int)
- }
+ assert_eq!(self.infcx.opportunistic_resolve_int_var(vid), t);
+ CanonicalVarKind::Ty(CanonicalTyVarKind::Int)
}
ty::Infer(ty::FloatVar(vid)) => {
- let nt = self.infcx.opportunistic_resolve_float_var(vid);
- if nt != t {
- return self.fold_ty(nt);
- } else {
- CanonicalVarKind::Ty(CanonicalTyVarKind::Float)
- }
+ assert_eq!(self.infcx.opportunistic_resolve_float_var(vid), t);
+ CanonicalVarKind::Ty(CanonicalTyVarKind::Float)
}
ty::Infer(ty::FreshTy(_) | ty::FreshIntTy(_) | ty::FreshFloatTy(_)) => {
bug!("fresh var during canonicalization: {t:?}")
@@ -369,22 +351,19 @@ impl<'tcx> TypeFolder<TyCtxt<'tcx>> for Canonicalizer<'_, 'tcx> {
Ty::new_bound(self.infcx.tcx, self.binder_index, bt)
}
- fn fold_const(&mut self, mut c: ty::Const<'tcx>) -> ty::Const<'tcx> {
+ fn fold_const(&mut self, c: ty::Const<'tcx>) -> ty::Const<'tcx> {
let kind = match c.kind() {
- ty::ConstKind::Infer(ty::InferConst::Var(mut vid)) => {
- // We need to canonicalize the *root* of our const var.
- // This is so that our canonical response correctly reflects
- // any equated inference vars correctly!
- let root_vid = self.infcx.root_const_var(vid);
- if root_vid != vid {
- c = ty::Const::new_var(self.infcx.tcx, root_vid, c.ty());
- vid = root_vid;
- }
-
- match self.infcx.probe_const_var(vid) {
- Ok(c) => return self.fold_const(c),
- Err(universe) => CanonicalVarKind::Const(universe, c.ty()),
- }
+ ty::ConstKind::Infer(ty::InferConst::Var(vid)) => {
+ assert_eq!(
+ self.infcx.root_const_var(vid),
+ vid,
+ "const var should have been resolved"
+ );
+ let Err(ui) = self.infcx.probe_const_var(vid) else {
+ bug!("const var should have been resolved");
+ };
+ // FIXME: we should fold this ty eventually
+ CanonicalVarKind::Const(ui, c.ty())
}
ty::ConstKind::Infer(ty::InferConst::Fresh(_)) => {
bug!("fresh var during canonicalization: {c:?}")
diff --git a/compiler/rustc_trait_selection/src/solve/eval_ctxt.rs b/compiler/rustc_trait_selection/src/solve/eval_ctxt.rs
index 74dfbdddb..5c2cbe399 100644
--- a/compiler/rustc_trait_selection/src/solve/eval_ctxt.rs
+++ b/compiler/rustc_trait_selection/src/solve/eval_ctxt.rs
@@ -1,20 +1,21 @@
+use rustc_data_structures::stack::ensure_sufficient_stack;
use rustc_hir::def_id::{DefId, LocalDefId};
use rustc_infer::infer::at::ToTrace;
use rustc_infer::infer::canonical::CanonicalVarValues;
use rustc_infer::infer::type_variable::{TypeVariableOrigin, TypeVariableOriginKind};
use rustc_infer::infer::{
- DefineOpaqueTypes, InferCtxt, InferOk, LateBoundRegionConversionTime, RegionVariableOrigin,
- TyCtxtInferExt,
+ DefineOpaqueTypes, InferCtxt, InferOk, LateBoundRegionConversionTime, TyCtxtInferExt,
};
use rustc_infer::traits::query::NoSolution;
use rustc_infer::traits::ObligationCause;
+use rustc_middle::infer::canonical::CanonicalVarInfos;
use rustc_middle::infer::unify_key::{ConstVariableOrigin, ConstVariableOriginKind};
use rustc_middle::traits::solve::inspect;
use rustc_middle::traits::solve::{
- CanonicalInput, CanonicalResponse, Certainty, IsNormalizesToHack, MaybeCause,
- PredefinedOpaques, PredefinedOpaquesData, QueryResult,
+ CanonicalInput, CanonicalResponse, Certainty, IsNormalizesToHack, PredefinedOpaques,
+ PredefinedOpaquesData, QueryResult,
};
-use rustc_middle::traits::DefiningAnchor;
+use rustc_middle::traits::{specialization_graph, DefiningAnchor};
use rustc_middle::ty::{
self, OpaqueTypeKey, Ty, TyCtxt, TypeFoldable, TypeSuperVisitable, TypeVisitable,
TypeVisitableExt, TypeVisitor,
@@ -24,10 +25,10 @@ use rustc_span::DUMMY_SP;
use std::io::Write;
use std::ops::ControlFlow;
-use crate::traits::specialization_graph;
+use crate::traits::vtable::{count_own_vtable_entries, prepare_vtable_segments, VtblSegment};
use super::inspect::ProofTreeBuilder;
-use super::search_graph::{self, OverflowHandler};
+use super::search_graph;
use super::SolverMode;
use super::{search_graph::SearchGraph, Goal};
pub use select::InferCtxtSelectExt;
@@ -54,6 +55,9 @@ pub struct EvalCtxt<'a, 'tcx> {
/// the job already.
infcx: &'a InferCtxt<'tcx>,
+ /// The variable info for the `var_values`, only used to make an ambiguous response
+ /// with no constraints.
+ variables: CanonicalVarInfos<'tcx>,
pub(super) var_values: CanonicalVarValues<'tcx>,
predefined_opaques_in_body: PredefinedOpaques<'tcx>,
@@ -116,7 +120,8 @@ impl NestedGoals<'_> {
#[derive(PartialEq, Eq, Debug, Hash, HashStable, Clone, Copy)]
pub enum GenerateProofTree {
Yes(UseGlobalCache),
- No,
+ IfEnabled,
+ Never,
}
#[derive(PartialEq, Eq, Debug, Hash, HashStable, Clone, Copy)]
@@ -169,6 +174,10 @@ impl<'a, 'tcx> EvalCtxt<'a, 'tcx> {
self.search_graph.solver_mode()
}
+ pub(super) fn local_overflow_limit(&self) -> usize {
+ self.search_graph.local_overflow_limit()
+ }
+
/// Creates a root evaluation context and search graph. This should only be
/// used from outside of any evaluation, and other methods should be preferred
/// over using this manually (such as [`InferCtxtEvalExt::evaluate_root_goal`]).
@@ -182,18 +191,19 @@ impl<'a, 'tcx> EvalCtxt<'a, 'tcx> {
let mut ecx = EvalCtxt {
search_graph: &mut search_graph,
- infcx: infcx,
+ infcx,
+ nested_goals: NestedGoals::new(),
+ inspect: ProofTreeBuilder::new_maybe_root(infcx.tcx, generate_proof_tree),
+
// Only relevant when canonicalizing the response,
// which we don't do within this evaluation context.
predefined_opaques_in_body: infcx
.tcx
.mk_predefined_opaques_in_body(PredefinedOpaquesData::default()),
- // Only relevant when canonicalizing the response.
max_input_universe: ty::UniverseIndex::ROOT,
+ variables: ty::List::empty(),
var_values: CanonicalVarValues::dummy(),
- nested_goals: NestedGoals::new(),
tainted: Ok(()),
- inspect: ProofTreeBuilder::new_maybe_root(infcx.tcx, generate_proof_tree),
};
let result = f(&mut ecx);
@@ -202,7 +212,7 @@ impl<'a, 'tcx> EvalCtxt<'a, 'tcx> {
(&tree, infcx.tcx.sess.opts.unstable_opts.dump_solver_proof_tree)
{
let mut lock = std::io::stdout().lock();
- let _ = lock.write_fmt(format_args!("{tree:?}"));
+ let _ = lock.write_fmt(format_args!("{tree:?}\n"));
let _ = lock.flush();
}
@@ -243,6 +253,7 @@ impl<'a, 'tcx> EvalCtxt<'a, 'tcx> {
let mut ecx = EvalCtxt {
infcx,
+ variables: canonical_input.variables,
var_values,
predefined_opaques_in_body: input.predefined_opaques_in_body,
max_input_universe: canonical_input.max_universe,
@@ -270,6 +281,7 @@ impl<'a, 'tcx> EvalCtxt<'a, 'tcx> {
// assertions against dropping an `InferCtxt` without taking opaques.
// FIXME: Once we remove support for the old impl we can remove this.
if input.anchor != DefiningAnchor::Error {
+ // This seems ok, but fragile.
let _ = infcx.take_opaque_types();
}
@@ -297,24 +309,26 @@ impl<'a, 'tcx> EvalCtxt<'a, 'tcx> {
// Deal with overflow, caching, and coinduction.
//
// The actual solver logic happens in `ecx.compute_goal`.
- search_graph.with_new_goal(
- tcx,
- canonical_input,
- goal_evaluation,
- |search_graph, goal_evaluation| {
- EvalCtxt::enter_canonical(
- tcx,
- search_graph,
- canonical_input,
- goal_evaluation,
- |ecx, goal| {
- let result = ecx.compute_goal(goal);
- ecx.inspect.query_result(result);
- result
- },
- )
- },
- )
+ ensure_sufficient_stack(|| {
+ search_graph.with_new_goal(
+ tcx,
+ canonical_input,
+ goal_evaluation,
+ |search_graph, goal_evaluation| {
+ EvalCtxt::enter_canonical(
+ tcx,
+ search_graph,
+ canonical_input,
+ goal_evaluation,
+ |ecx, goal| {
+ let result = ecx.compute_goal(goal);
+ ecx.inspect.query_result(result);
+ result
+ },
+ )
+ },
+ )
+ })
}
/// Recursively evaluates `goal`, returning whether any inference vars have
@@ -326,6 +340,7 @@ impl<'a, 'tcx> EvalCtxt<'a, 'tcx> {
) -> Result<(bool, Certainty, Vec<Goal<'tcx, ty::Predicate<'tcx>>>), NoSolution> {
let (orig_values, canonical_goal) = self.canonicalize_goal(goal);
let mut goal_evaluation = self.inspect.new_goal_evaluation(goal, is_normalizes_to_hack);
+ let encountered_overflow = self.search_graph.encountered_overflow();
let canonical_response = EvalCtxt::evaluate_canonical_goal(
self.tcx(),
self.search_graph,
@@ -341,7 +356,7 @@ impl<'a, 'tcx> EvalCtxt<'a, 'tcx> {
Ok(response) => response,
};
- let has_changed = !canonical_response.value.var_values.is_identity()
+ let has_changed = !canonical_response.value.var_values.is_identity_modulo_regions()
|| !canonical_response.value.external_constraints.opaque_types.is_empty();
let (certainty, nested_goals) = match self.instantiate_and_apply_query_response(
goal.param_env,
@@ -373,37 +388,60 @@ impl<'a, 'tcx> EvalCtxt<'a, 'tcx> {
&& is_normalizes_to_hack == IsNormalizesToHack::No
&& !self.search_graph.in_cycle()
{
- debug!("rerunning goal to check result is stable");
- let (_orig_values, canonical_goal) = self.canonicalize_goal(goal);
- let new_canonical_response = EvalCtxt::evaluate_canonical_goal(
- self.tcx(),
- self.search_graph,
- canonical_goal,
- // FIXME(-Ztrait-solver=next): we do not track what happens in `evaluate_canonical_goal`
- &mut ProofTreeBuilder::new_noop(),
- )?;
- // We only check for modulo regions as we convert all regions in
- // the input to new existentials, even if they're expected to be
- // `'static` or a placeholder region.
- if !new_canonical_response.value.var_values.is_identity_modulo_regions() {
- bug!(
- "unstable result: re-canonicalized goal={canonical_goal:#?} \
- first_response={canonical_response:#?} \
- second_response={new_canonical_response:#?}"
- );
- }
- if certainty != new_canonical_response.value.certainty {
- bug!(
- "unstable certainty: {certainty:#?} re-canonicalized goal={canonical_goal:#?} \
- first_response={canonical_response:#?} \
- second_response={new_canonical_response:#?}"
- );
- }
+ // The nested evaluation has to happen with the original state
+ // of `encountered_overflow`.
+ let from_original_evaluation =
+ self.search_graph.reset_encountered_overflow(encountered_overflow);
+ self.check_evaluate_goal_stable_result(goal, canonical_goal, canonical_response);
+ // In case the evaluation was unstable, we manually make sure that this
+ // debug check does not influence the result of the parent goal.
+ self.search_graph.reset_encountered_overflow(from_original_evaluation);
}
Ok((has_changed, certainty, nested_goals))
}
+ fn check_evaluate_goal_stable_result(
+ &mut self,
+ goal: Goal<'tcx, ty::Predicate<'tcx>>,
+ original_input: CanonicalInput<'tcx>,
+ original_result: CanonicalResponse<'tcx>,
+ ) {
+ let (_orig_values, canonical_goal) = self.canonicalize_goal(goal);
+ let result = EvalCtxt::evaluate_canonical_goal(
+ self.tcx(),
+ self.search_graph,
+ canonical_goal,
+ // FIXME(-Ztrait-solver=next): we do not track what happens in `evaluate_canonical_goal`
+ &mut ProofTreeBuilder::new_noop(),
+ );
+
+ macro_rules! fail {
+ ($msg:expr) => {{
+ let msg = $msg;
+ warn!(
+ "unstable result: {msg}\n\
+ original goal: {original_input:?},\n\
+ original result: {original_result:?}\n\
+ re-canonicalized goal: {canonical_goal:?}\n\
+ second response: {result:?}"
+ );
+ return;
+ }};
+ }
+
+ let Ok(new_canonical_response) = result else { fail!("second response was error") };
+ // We only check for modulo regions as we convert all regions in
+ // the input to new existentials, even if they're expected to be
+ // `'static` or a placeholder region.
+ if !new_canonical_response.value.var_values.is_identity_modulo_regions() {
+ fail!("additional constraints from second response")
+ }
+ if original_result.value.certainty != new_canonical_response.value.certainty {
+ fail!("unstable certainty")
+ }
+ }
+
fn compute_goal(&mut self, goal: Goal<'tcx, ty::Predicate<'tcx>>) -> QueryResult<'tcx> {
let Goal { param_env, predicate } = goal;
let kind = predicate.kind();
@@ -430,11 +468,8 @@ impl<'a, 'tcx> EvalCtxt<'a, 'tcx> {
ty::PredicateKind::Coerce(predicate) => {
self.compute_coerce_goal(Goal { param_env, predicate })
}
- ty::PredicateKind::ClosureKind(def_id, substs, kind) => self
- .compute_closure_kind_goal(Goal {
- param_env,
- predicate: (def_id, substs, kind),
- }),
+ ty::PredicateKind::ClosureKind(def_id, args, kind) => self
+ .compute_closure_kind_goal(Goal { param_env, predicate: (def_id, args, kind) }),
ty::PredicateKind::ObjectSafe(trait_def_id) => {
self.compute_object_safe_goal(trait_def_id)
}
@@ -471,101 +506,22 @@ impl<'a, 'tcx> EvalCtxt<'a, 'tcx> {
let inspect = self.inspect.new_evaluate_added_goals();
let inspect = core::mem::replace(&mut self.inspect, inspect);
- let mut goals = core::mem::replace(&mut self.nested_goals, NestedGoals::new());
- let mut new_goals = NestedGoals::new();
-
- let response = self.repeat_while_none(
- |_| Ok(Certainty::Maybe(MaybeCause::Overflow)),
- |this| {
- this.inspect.evaluate_added_goals_loop_start();
-
- let mut has_changed = Err(Certainty::Yes);
-
- if let Some(goal) = goals.normalizes_to_hack_goal.take() {
- // Replace the goal with an unconstrained infer var, so the
- // RHS does not affect projection candidate assembly.
- let unconstrained_rhs = this.next_term_infer_of_kind(goal.predicate.term);
- let unconstrained_goal = goal.with(
- this.tcx(),
- ty::Binder::dummy(ty::ProjectionPredicate {
- projection_ty: goal.predicate.projection_ty,
- term: unconstrained_rhs,
- }),
- );
-
- let (_, certainty, instantiate_goals) =
- match this.evaluate_goal(IsNormalizesToHack::Yes, unconstrained_goal) {
- Ok(r) => r,
- Err(NoSolution) => return Some(Err(NoSolution)),
- };
- new_goals.goals.extend(instantiate_goals);
-
- // Finally, equate the goal's RHS with the unconstrained var.
- // We put the nested goals from this into goals instead of
- // next_goals to avoid needing to process the loop one extra
- // time if this goal returns something -- I don't think this
- // matters in practice, though.
- match this.eq_and_get_goals(
- goal.param_env,
- goal.predicate.term,
- unconstrained_rhs,
- ) {
- Ok(eq_goals) => {
- goals.goals.extend(eq_goals);
- }
- Err(NoSolution) => return Some(Err(NoSolution)),
- };
-
- // We only look at the `projection_ty` part here rather than
- // looking at the "has changed" return from evaluate_goal,
- // because we expect the `unconstrained_rhs` part of the predicate
- // to have changed -- that means we actually normalized successfully!
- if goal.predicate.projection_ty
- != this.resolve_vars_if_possible(goal.predicate.projection_ty)
- {
- has_changed = Ok(())
- }
-
- match certainty {
- Certainty::Yes => {}
- Certainty::Maybe(_) => {
- // We need to resolve vars here so that we correctly
- // deal with `has_changed` in the next iteration.
- new_goals.normalizes_to_hack_goal =
- Some(this.resolve_vars_if_possible(goal));
- has_changed = has_changed.map_err(|c| c.unify_with(certainty));
- }
- }
+ let mut response = Ok(Certainty::OVERFLOW);
+ for _ in 0..self.local_overflow_limit() {
+ // FIXME: This match is a bit ugly, it might be nice to change the inspect
+ // stuff to use a closure instead. which should hopefully simplify this a bit.
+ match self.evaluate_added_goals_step() {
+ Ok(Some(cert)) => {
+ response = Ok(cert);
+ break;
}
-
- for goal in goals.goals.drain(..) {
- let (changed, certainty, instantiate_goals) =
- match this.evaluate_goal(IsNormalizesToHack::No, goal) {
- Ok(result) => result,
- Err(NoSolution) => return Some(Err(NoSolution)),
- };
- new_goals.goals.extend(instantiate_goals);
-
- if changed {
- has_changed = Ok(());
- }
-
- match certainty {
- Certainty::Yes => {}
- Certainty::Maybe(_) => {
- new_goals.goals.push(goal);
- has_changed = has_changed.map_err(|c| c.unify_with(certainty));
- }
- }
- }
-
- core::mem::swap(&mut new_goals, &mut goals);
- match has_changed {
- Ok(()) => None,
- Err(certainty) => Some(Ok(certainty)),
+ Ok(None) => {}
+ Err(NoSolution) => {
+ response = Err(NoSolution);
+ break;
}
- },
- );
+ }
+ }
self.inspect.eval_added_goals_result(response);
@@ -576,9 +532,84 @@ impl<'a, 'tcx> EvalCtxt<'a, 'tcx> {
let goal_evaluations = std::mem::replace(&mut self.inspect, inspect);
self.inspect.added_goals_evaluation(goal_evaluations);
- self.nested_goals = goals;
response
}
+
+ /// Iterate over all added goals: returning `Ok(Some(_))` in case we can stop rerunning.
+ ///
+ /// Goals for the next step get directly added the the nested goals of the `EvalCtxt`.
+ fn evaluate_added_goals_step(&mut self) -> Result<Option<Certainty>, NoSolution> {
+ let tcx = self.tcx();
+ let mut goals = core::mem::replace(&mut self.nested_goals, NestedGoals::new());
+
+ self.inspect.evaluate_added_goals_loop_start();
+ // If this loop did not result in any progress, what's our final certainty.
+ let mut unchanged_certainty = Some(Certainty::Yes);
+ if let Some(goal) = goals.normalizes_to_hack_goal.take() {
+ // Replace the goal with an unconstrained infer var, so the
+ // RHS does not affect projection candidate assembly.
+ let unconstrained_rhs = self.next_term_infer_of_kind(goal.predicate.term);
+ let unconstrained_goal = goal.with(
+ tcx,
+ ty::ProjectionPredicate {
+ projection_ty: goal.predicate.projection_ty,
+ term: unconstrained_rhs,
+ },
+ );
+
+ let (_, certainty, instantiate_goals) =
+ self.evaluate_goal(IsNormalizesToHack::Yes, unconstrained_goal)?;
+ self.add_goals(instantiate_goals);
+
+ // Finally, equate the goal's RHS with the unconstrained var.
+ // We put the nested goals from this into goals instead of
+ // next_goals to avoid needing to process the loop one extra
+ // time if this goal returns something -- I don't think this
+ // matters in practice, though.
+ let eq_goals =
+ self.eq_and_get_goals(goal.param_env, goal.predicate.term, unconstrained_rhs)?;
+ goals.goals.extend(eq_goals);
+
+ // We only look at the `projection_ty` part here rather than
+ // looking at the "has changed" return from evaluate_goal,
+ // because we expect the `unconstrained_rhs` part of the predicate
+ // to have changed -- that means we actually normalized successfully!
+ if goal.predicate.projection_ty
+ != self.resolve_vars_if_possible(goal.predicate.projection_ty)
+ {
+ unchanged_certainty = None;
+ }
+
+ match certainty {
+ Certainty::Yes => {}
+ Certainty::Maybe(_) => {
+ // We need to resolve vars here so that we correctly
+ // deal with `has_changed` in the next iteration.
+ self.set_normalizes_to_hack_goal(self.resolve_vars_if_possible(goal));
+ unchanged_certainty = unchanged_certainty.map(|c| c.unify_with(certainty));
+ }
+ }
+ }
+
+ for goal in goals.goals.drain(..) {
+ let (has_changed, certainty, instantiate_goals) =
+ self.evaluate_goal(IsNormalizesToHack::No, goal)?;
+ self.add_goals(instantiate_goals);
+ if has_changed {
+ unchanged_certainty = None;
+ }
+
+ match certainty {
+ Certainty::Yes => {}
+ Certainty::Maybe(_) => {
+ self.add_goal(goal);
+ unchanged_certainty = unchanged_certainty.map(|c| c.unify_with(certainty));
+ }
+ }
+ }
+
+ Ok(unchanged_certainty)
+ }
}
impl<'tcx> EvalCtxt<'_, 'tcx> {
@@ -593,10 +624,6 @@ impl<'tcx> EvalCtxt<'_, 'tcx> {
})
}
- pub(super) fn next_region_infer(&self) -> ty::Region<'tcx> {
- self.infcx.next_region_var(RegionVariableOrigin::MiscVariable(DUMMY_SP))
- }
-
pub(super) fn next_const_infer(&self, ty: Ty<'tcx>) -> ty::Const<'tcx> {
self.infcx.next_const_var(
ty,
@@ -774,24 +801,18 @@ impl<'tcx> EvalCtxt<'_, 'tcx> {
self.infcx.resolve_vars_if_possible(value)
}
- pub(super) fn fresh_substs_for_item(&self, def_id: DefId) -> ty::SubstsRef<'tcx> {
- self.infcx.fresh_substs_for_item(DUMMY_SP, def_id)
+ pub(super) fn fresh_args_for_item(&self, def_id: DefId) -> ty::GenericArgsRef<'tcx> {
+ self.infcx.fresh_args_for_item(DUMMY_SP, def_id)
}
- pub(super) fn translate_substs(
+ pub(super) fn translate_args(
&self,
param_env: ty::ParamEnv<'tcx>,
source_impl: DefId,
- source_substs: ty::SubstsRef<'tcx>,
+ source_args: ty::GenericArgsRef<'tcx>,
target_node: specialization_graph::Node,
- ) -> ty::SubstsRef<'tcx> {
- crate::traits::translate_substs(
- self.infcx,
- param_env,
- source_impl,
- source_substs,
- target_node,
- )
+ ) -> ty::GenericArgsRef<'tcx> {
+ crate::traits::translate_args(self.infcx, param_env, source_impl, source_args, target_node)
}
pub(super) fn register_ty_outlives(&self, ty: Ty<'tcx>, lt: ty::Region<'tcx>) {
@@ -863,14 +884,14 @@ impl<'tcx> EvalCtxt<'_, 'tcx> {
pub(super) fn add_item_bounds_for_hidden_type(
&mut self,
opaque_def_id: DefId,
- opaque_substs: ty::SubstsRef<'tcx>,
+ opaque_args: ty::GenericArgsRef<'tcx>,
param_env: ty::ParamEnv<'tcx>,
hidden_ty: Ty<'tcx>,
) {
let mut obligations = Vec::new();
self.infcx.add_item_bounds_for_hidden_type(
opaque_def_id,
- opaque_substs,
+ opaque_args,
ObligationCause::dummy(),
param_env,
hidden_ty,
@@ -896,13 +917,13 @@ impl<'tcx> EvalCtxt<'_, 'tcx> {
continue;
}
values.extend(self.probe_candidate("opaque type storage").enter(|ecx| {
- for (a, b) in std::iter::zip(candidate_key.substs, key.substs) {
+ for (a, b) in std::iter::zip(candidate_key.args, key.args) {
ecx.eq(param_env, a, b)?;
}
ecx.eq(param_env, candidate_ty, ty)?;
ecx.add_item_bounds_for_hidden_type(
candidate_key.def_id.to_def_id(),
- candidate_key.substs,
+ candidate_key.args,
param_env,
candidate_ty,
);
@@ -928,4 +949,39 @@ impl<'tcx> EvalCtxt<'_, 'tcx> {
Err(ErrorHandled::TooGeneric) => None,
}
}
+
+ /// Walk through the vtable of a principal trait ref, executing a `supertrait_visitor`
+ /// for every trait ref encountered (including the principal). Passes both the vtable
+ /// base and the (optional) vptr slot.
+ pub(super) fn walk_vtable(
+ &mut self,
+ principal: ty::PolyTraitRef<'tcx>,
+ mut supertrait_visitor: impl FnMut(&mut Self, ty::PolyTraitRef<'tcx>, usize, Option<usize>),
+ ) {
+ let tcx = self.tcx();
+ let mut offset = 0;
+ prepare_vtable_segments::<()>(tcx, principal, |segment| {
+ match segment {
+ VtblSegment::MetadataDSA => {
+ offset += TyCtxt::COMMON_VTABLE_ENTRIES.len();
+ }
+ VtblSegment::TraitOwnEntries { trait_ref, emit_vptr } => {
+ let own_vtable_entries = count_own_vtable_entries(tcx, trait_ref);
+
+ supertrait_visitor(
+ self,
+ trait_ref,
+ offset,
+ emit_vptr.then(|| offset + own_vtable_entries),
+ );
+
+ offset += own_vtable_entries;
+ if emit_vptr {
+ offset += 1;
+ }
+ }
+ }
+ ControlFlow::Continue(())
+ });
+ }
}
diff --git a/compiler/rustc_trait_selection/src/solve/eval_ctxt/canonical.rs b/compiler/rustc_trait_selection/src/solve/eval_ctxt/canonical.rs
index 637d45888..523841951 100644
--- a/compiler/rustc_trait_selection/src/solve/eval_ctxt/canonical.rs
+++ b/compiler/rustc_trait_selection/src/solve/eval_ctxt/canonical.rs
@@ -1,26 +1,30 @@
-/// Canonicalization is used to separate some goal from its context,
-/// throwing away unnecessary information in the process.
-///
-/// This is necessary to cache goals containing inference variables
-/// and placeholders without restricting them to the current `InferCtxt`.
-///
-/// Canonicalization is fairly involved, for more details see the relevant
-/// section of the [rustc-dev-guide][c].
-///
-/// [c]: https://rustc-dev-guide.rust-lang.org/solve/canonicalization.html
+//! Canonicalization is used to separate some goal from its context,
+//! throwing away unnecessary information in the process.
+//!
+//! This is necessary to cache goals containing inference variables
+//! and placeholders without restricting them to the current `InferCtxt`.
+//!
+//! Canonicalization is fairly involved, for more details see the relevant
+//! section of the [rustc-dev-guide][c].
+//!
+//! [c]: https://rustc-dev-guide.rust-lang.org/solve/canonicalization.html
use super::{CanonicalInput, Certainty, EvalCtxt, Goal};
use crate::solve::canonicalize::{CanonicalizeMode, Canonicalizer};
-use crate::solve::{CanonicalResponse, QueryResult, Response};
+use crate::solve::{response_no_constraints_raw, CanonicalResponse, QueryResult, Response};
use rustc_data_structures::fx::FxHashSet;
use rustc_index::IndexVec;
use rustc_infer::infer::canonical::query_response::make_query_region_constraints;
use rustc_infer::infer::canonical::CanonicalVarValues;
use rustc_infer::infer::canonical::{CanonicalExt, QueryRegionConstraints};
+use rustc_infer::infer::InferCtxt;
use rustc_middle::traits::query::NoSolution;
use rustc_middle::traits::solve::{
- ExternalConstraints, ExternalConstraintsData, MaybeCause, PredefinedOpaquesData, QueryInput,
+ ExternalConstraintsData, MaybeCause, PredefinedOpaquesData, QueryInput,
+};
+use rustc_middle::ty::{
+ self, BoundVar, GenericArgKind, Ty, TyCtxt, TypeFoldable, TypeFolder, TypeSuperFoldable,
+ TypeVisitableExt,
};
-use rustc_middle::ty::{self, BoundVar, GenericArgKind, Ty, TyCtxt, TypeFoldable};
use rustc_span::DUMMY_SP;
use std::iter;
use std::ops::Deref;
@@ -32,6 +36,10 @@ impl<'tcx> EvalCtxt<'_, 'tcx> {
&self,
goal: Goal<'tcx, T>,
) -> (Vec<ty::GenericArg<'tcx>>, CanonicalInput<'tcx, T>) {
+ let opaque_types = self.infcx.clone_opaque_types_for_query_response();
+ let (goal, opaque_types) =
+ (goal, opaque_types).fold_with(&mut EagerResolver { infcx: self.infcx });
+
let mut orig_values = Default::default();
let canonical_goal = Canonicalizer::canonicalize(
self.infcx,
@@ -40,11 +48,9 @@ impl<'tcx> EvalCtxt<'_, 'tcx> {
QueryInput {
goal,
anchor: self.infcx.defining_use_anchor,
- predefined_opaques_in_body: self.tcx().mk_predefined_opaques_in_body(
- PredefinedOpaquesData {
- opaque_types: self.infcx.clone_opaque_types_for_query_response(),
- },
- ),
+ predefined_opaques_in_body: self
+ .tcx()
+ .mk_predefined_opaques_in_body(PredefinedOpaquesData { opaque_types }),
},
);
(orig_values, canonical_goal)
@@ -70,34 +76,43 @@ impl<'tcx> EvalCtxt<'_, 'tcx> {
);
let certainty = certainty.unify_with(goals_certainty);
+ if let Certainty::OVERFLOW = certainty {
+ // If we have overflow, it's probable that we're substituting a type
+ // into itself infinitely and any partial substitutions in the query
+ // response are probably not useful anyways, so just return an empty
+ // query response.
+ //
+ // This may prevent us from potentially useful inference, e.g.
+ // 2 candidates, one ambiguous and one overflow, which both
+ // have the same inference constraints.
+ //
+ // Changing this to retain some constraints in the future
+ // won't be a breaking change, so this is good enough for now.
+ return Ok(self.make_ambiguous_response_no_constraints(MaybeCause::Overflow));
+ }
- let response = match certainty {
- Certainty::Yes | Certainty::Maybe(MaybeCause::Ambiguity) => {
- let external_constraints = self.compute_external_query_constraints()?;
- Response { var_values: self.var_values, external_constraints, certainty }
- }
- Certainty::Maybe(MaybeCause::Overflow) => {
- // If we have overflow, it's probable that we're substituting a type
- // into itself infinitely and any partial substitutions in the query
- // response are probably not useful anyways, so just return an empty
- // query response.
- //
- // This may prevent us from potentially useful inference, e.g.
- // 2 candidates, one ambiguous and one overflow, which both
- // have the same inference constraints.
- //
- // Changing this to retain some constraints in the future
- // won't be a breaking change, so this is good enough for now.
- return Ok(self.make_ambiguous_response_no_constraints(MaybeCause::Overflow));
- }
- };
+ let var_values = self.var_values;
+ let external_constraints = self.compute_external_query_constraints()?;
+
+ let (var_values, mut external_constraints) =
+ (var_values, external_constraints).fold_with(&mut EagerResolver { infcx: self.infcx });
+ // Remove any trivial region constraints once we've resolved regions
+ external_constraints
+ .region_constraints
+ .outlives
+ .retain(|(outlives, _)| outlives.0.as_region().map_or(true, |re| re != outlives.1));
let canonical = Canonicalizer::canonicalize(
self.infcx,
CanonicalizeMode::Response { max_input_universe: self.max_input_universe },
&mut Default::default(),
- response,
+ Response {
+ var_values,
+ certainty,
+ external_constraints: self.tcx().mk_external_constraints(external_constraints),
+ },
);
+
Ok(canonical)
}
@@ -109,34 +124,25 @@ impl<'tcx> EvalCtxt<'_, 'tcx> {
&self,
maybe_cause: MaybeCause,
) -> CanonicalResponse<'tcx> {
- let unconstrained_response = Response {
- var_values: CanonicalVarValues {
- var_values: self.tcx().mk_substs_from_iter(self.var_values.var_values.iter().map(
- |arg| -> ty::GenericArg<'tcx> {
- match arg.unpack() {
- GenericArgKind::Lifetime(_) => self.next_region_infer().into(),
- GenericArgKind::Type(_) => self.next_ty_infer().into(),
- GenericArgKind::Const(ct) => self.next_const_infer(ct.ty()).into(),
- }
- },
- )),
- },
- external_constraints: self
- .tcx()
- .mk_external_constraints(ExternalConstraintsData::default()),
- certainty: Certainty::Maybe(maybe_cause),
- };
-
- Canonicalizer::canonicalize(
- self.infcx,
- CanonicalizeMode::Response { max_input_universe: self.max_input_universe },
- &mut Default::default(),
- unconstrained_response,
+ response_no_constraints_raw(
+ self.tcx(),
+ self.max_input_universe,
+ self.variables,
+ Certainty::Maybe(maybe_cause),
)
}
+ /// Computes the region constraints and *new* opaque types registered when
+ /// proving a goal.
+ ///
+ /// If an opaque was already constrained before proving this goal, then the
+ /// external constraints do not need to record that opaque, since if it is
+ /// further constrained by inference, that will be passed back in the var
+ /// values.
#[instrument(level = "debug", skip(self), ret)]
- fn compute_external_query_constraints(&self) -> Result<ExternalConstraints<'tcx>, NoSolution> {
+ fn compute_external_query_constraints(
+ &self,
+ ) -> Result<ExternalConstraintsData<'tcx>, NoSolution> {
// We only check for leaks from universes which were entered inside
// of the query.
self.infcx.leak_check(self.max_input_universe, None).map_err(|e| {
@@ -166,9 +172,7 @@ impl<'tcx> EvalCtxt<'_, 'tcx> {
self.predefined_opaques_in_body.opaque_types.iter().all(|(pa, _)| pa != a)
});
- Ok(self
- .tcx()
- .mk_external_constraints(ExternalConstraintsData { region_constraints, opaque_types }))
+ Ok(ExternalConstraintsData { region_constraints, opaque_types })
}
/// After calling a canonical query, we apply the constraints returned
@@ -211,7 +215,7 @@ impl<'tcx> EvalCtxt<'_, 'tcx> {
// created inside of the query directly instead of returning them to the
// caller.
let prev_universe = self.infcx.universe();
- let universes_created_in_query = response.max_universe.index() + 1;
+ let universes_created_in_query = response.max_universe.index();
for _ in 0..universes_created_in_query {
self.infcx.create_next_universe();
}
@@ -250,7 +254,7 @@ impl<'tcx> EvalCtxt<'_, 'tcx> {
}
}
- let var_values = self.tcx().mk_substs_from_iter(response.variables.iter().enumerate().map(
+ let var_values = self.tcx().mk_args_from_iter(response.variables.iter().enumerate().map(
|(index, info)| {
if info.universe() != ty::UniverseIndex::ROOT {
// A variable from inside a binder of the query. While ideally these shouldn't
@@ -326,3 +330,65 @@ impl<'tcx> EvalCtxt<'_, 'tcx> {
Ok(())
}
}
+
+/// Resolves ty, region, and const vars to their inferred values or their root vars.
+struct EagerResolver<'a, 'tcx> {
+ infcx: &'a InferCtxt<'tcx>,
+}
+
+impl<'tcx> TypeFolder<TyCtxt<'tcx>> for EagerResolver<'_, 'tcx> {
+ fn interner(&self) -> TyCtxt<'tcx> {
+ self.infcx.tcx
+ }
+
+ fn fold_ty(&mut self, t: Ty<'tcx>) -> Ty<'tcx> {
+ match *t.kind() {
+ ty::Infer(ty::TyVar(vid)) => match self.infcx.probe_ty_var(vid) {
+ Ok(t) => t.fold_with(self),
+ Err(_) => Ty::new_var(self.infcx.tcx, self.infcx.root_var(vid)),
+ },
+ ty::Infer(ty::IntVar(vid)) => self.infcx.opportunistic_resolve_int_var(vid),
+ ty::Infer(ty::FloatVar(vid)) => self.infcx.opportunistic_resolve_float_var(vid),
+ _ => {
+ if t.has_infer() {
+ t.super_fold_with(self)
+ } else {
+ t
+ }
+ }
+ }
+ }
+
+ fn fold_region(&mut self, r: ty::Region<'tcx>) -> ty::Region<'tcx> {
+ match *r {
+ ty::ReVar(vid) => self
+ .infcx
+ .inner
+ .borrow_mut()
+ .unwrap_region_constraints()
+ .opportunistic_resolve_var(self.infcx.tcx, vid),
+ _ => r,
+ }
+ }
+
+ fn fold_const(&mut self, c: ty::Const<'tcx>) -> ty::Const<'tcx> {
+ match c.kind() {
+ ty::ConstKind::Infer(ty::InferConst::Var(vid)) => {
+ // FIXME: we need to fold the ty too, I think.
+ match self.infcx.probe_const_var(vid) {
+ Ok(c) => c.fold_with(self),
+ Err(_) => {
+ ty::Const::new_var(self.infcx.tcx, self.infcx.root_const_var(vid), c.ty())
+ }
+ }
+ }
+ _ => {
+ if c.has_infer() {
+ c.super_fold_with(self)
+ } else {
+ c
+ }
+ }
+ }
+ }
+}
diff --git a/compiler/rustc_trait_selection/src/solve/eval_ctxt/probe.rs b/compiler/rustc_trait_selection/src/solve/eval_ctxt/probe.rs
index 4477ea7d5..317c43baf 100644
--- a/compiler/rustc_trait_selection/src/solve/eval_ctxt/probe.rs
+++ b/compiler/rustc_trait_selection/src/solve/eval_ctxt/probe.rs
@@ -17,6 +17,7 @@ where
let mut nested_ecx = EvalCtxt {
infcx: outer_ecx.infcx,
+ variables: outer_ecx.variables,
var_values: outer_ecx.var_values,
predefined_opaques_in_body: outer_ecx.predefined_opaques_in_body,
max_input_universe: outer_ecx.max_input_universe,
diff --git a/compiler/rustc_trait_selection/src/solve/eval_ctxt/select.rs b/compiler/rustc_trait_selection/src/solve/eval_ctxt/select.rs
index bf6cbef8c..42d7a587c 100644
--- a/compiler/rustc_trait_selection/src/solve/eval_ctxt/select.rs
+++ b/compiler/rustc_trait_selection/src/solve/eval_ctxt/select.rs
@@ -1,24 +1,21 @@
-use std::ops::ControlFlow;
-
+use rustc_hir as hir;
use rustc_hir::def_id::DefId;
-use rustc_infer::infer::{DefineOpaqueTypes, InferCtxt, InferOk};
-use rustc_infer::traits::util::supertraits;
+use rustc_infer::infer::{DefineOpaqueTypes, InferCtxt};
use rustc_infer::traits::{
- Obligation, PolyTraitObligation, PredicateObligation, Selection, SelectionResult,
+ Obligation, PolyTraitObligation, PredicateObligation, Selection, SelectionResult, TraitEngine,
};
use rustc_middle::traits::solve::{CanonicalInput, Certainty, Goal};
use rustc_middle::traits::{
- ImplSource, ImplSourceObjectData, ImplSourceTraitUpcastingData, ImplSourceUserDefinedData,
- ObligationCause, SelectionError,
+ BuiltinImplSource, ImplSource, ImplSourceUserDefinedData, ObligationCause, SelectionError,
};
-use rustc_middle::ty::{self, TyCtxt};
+use rustc_middle::ty::{self, Ty, TyCtxt};
use rustc_span::DUMMY_SP;
-use crate::solve::assembly::{BuiltinImplSource, Candidate, CandidateSource};
+use crate::solve::assembly::{Candidate, CandidateSource};
use crate::solve::eval_ctxt::{EvalCtxt, GenerateProofTree};
use crate::solve::inspect::ProofTreeBuilder;
-use crate::solve::search_graph::OverflowHandler;
-use crate::traits::vtable::{count_own_vtable_entries, prepare_vtable_segments, VtblSegment};
+use crate::traits::StructurallyNormalizeExt;
+use crate::traits::TraitEngineExt;
pub trait InferCtxtSelectExt<'tcx> {
fn select_in_new_trait_solver(
@@ -40,7 +37,7 @@ impl<'tcx> InferCtxtSelectExt<'tcx> for InferCtxt<'tcx> {
self.instantiate_binder_with_placeholders(obligation.predicate),
);
- let (result, _) = EvalCtxt::enter_root(self, GenerateProofTree::No, |ecx| {
+ let (result, _) = EvalCtxt::enter_root(self, GenerateProofTree::Never, |ecx| {
let goal = Goal::new(ecx.tcx(), trait_goal.param_env, trait_goal.predicate);
let (orig_values, canonical_goal) = ecx.canonicalize_goal(goal);
let mut candidates = ecx.compute_canonical_trait_candidates(canonical_goal);
@@ -102,32 +99,34 @@ impl<'tcx> InferCtxtSelectExt<'tcx> for InferCtxt<'tcx> {
rematch_impl(self, goal, def_id, nested_obligations)
}
- // Rematching the dyn upcast or object goal will instantiate the same nested
- // goals that would have caused the ambiguity, so we can still make progress here
- // regardless.
- // FIXME: This doesn't actually check the object bounds hold here.
- (
- _,
- CandidateSource::BuiltinImpl(
- BuiltinImplSource::Object | BuiltinImplSource::TraitUpcasting,
- ),
- ) => rematch_object(self, goal, nested_obligations),
+ // If an unsize goal is ambiguous, then we can manually rematch it to make
+ // selection progress for coercion during HIR typeck. If it is *not* ambiguous,
+ // but is `BuiltinImplSource::Misc`, it may have nested `Unsize` goals,
+ // and we need to rematch those to detect tuple unsizing and trait upcasting.
+ // FIXME: This will be wrong if we have param-env or where-clause bounds
+ // with the unsize goal -- we may need to mark those with different impl
+ // sources.
+ (Certainty::Maybe(_), CandidateSource::BuiltinImpl(src))
+ | (Certainty::Yes, CandidateSource::BuiltinImpl(src @ BuiltinImplSource::Misc))
+ if self.tcx.lang_items().unsize_trait() == Some(goal.predicate.def_id()) =>
+ {
+ rematch_unsize(self, goal, nested_obligations, src, certainty)
+ }
// Technically some builtin impls have nested obligations, but if
// `Certainty::Yes`, then they should've all been verified and don't
// need re-checking.
- (Certainty::Yes, CandidateSource::BuiltinImpl(BuiltinImplSource::Misc)) => {
- Ok(Some(ImplSource::Builtin(nested_obligations)))
+ (Certainty::Yes, CandidateSource::BuiltinImpl(src)) => {
+ Ok(Some(ImplSource::Builtin(src, nested_obligations)))
}
// It's fine not to do anything to rematch these, since there are no
// nested obligations.
(Certainty::Yes, CandidateSource::ParamEnv(_) | CandidateSource::AliasBound) => {
- Ok(Some(ImplSource::Param(nested_obligations, ty::BoundConstness::NotConst)))
+ Ok(Some(ImplSource::Param(nested_obligations)))
}
- (_, CandidateSource::BuiltinImpl(BuiltinImplSource::Ambiguity))
- | (Certainty::Maybe(_), _) => Ok(None),
+ (Certainty::Maybe(_), _) => Ok(None),
}
}
}
@@ -143,7 +142,7 @@ impl<'tcx> EvalCtxt<'_, 'tcx> {
// the cycle anyways one step later.
EvalCtxt::enter_canonical(
self.tcx(),
- self.search_graph(),
+ self.search_graph,
canonical_input,
// FIXME: This is wrong, idk if we even want to track stuff here.
&mut ProofTreeBuilder::new_noop(),
@@ -174,11 +173,12 @@ fn candidate_should_be_dropped_in_favor_of<'tcx>(
}
(_, CandidateSource::ParamEnv(_)) => true,
+ // FIXME: we could prefer earlier vtable bases perhaps...
(
- CandidateSource::BuiltinImpl(BuiltinImplSource::Object),
- CandidateSource::BuiltinImpl(BuiltinImplSource::Object),
+ CandidateSource::BuiltinImpl(BuiltinImplSource::Object { .. }),
+ CandidateSource::BuiltinImpl(BuiltinImplSource::Object { .. }),
) => false,
- (_, CandidateSource::BuiltinImpl(BuiltinImplSource::Object)) => true,
+ (_, CandidateSource::BuiltinImpl(BuiltinImplSource::Object { .. })) => true,
(CandidateSource::Impl(victim_def_id), CandidateSource::Impl(other_def_id)) => {
tcx.specializes((other_def_id, victim_def_id))
@@ -195,8 +195,9 @@ fn rematch_impl<'tcx>(
impl_def_id: DefId,
mut nested: Vec<PredicateObligation<'tcx>>,
) -> SelectionResult<'tcx, Selection<'tcx>> {
- let substs = infcx.fresh_substs_for_item(DUMMY_SP, impl_def_id);
- let impl_trait_ref = infcx.tcx.impl_trait_ref(impl_def_id).unwrap().subst(infcx.tcx, substs);
+ let args = infcx.fresh_args_for_item(DUMMY_SP, impl_def_id);
+ let impl_trait_ref =
+ infcx.tcx.impl_trait_ref(impl_def_id).unwrap().instantiate(infcx.tcx, args);
nested.extend(
infcx
@@ -207,101 +208,191 @@ fn rematch_impl<'tcx>(
);
nested.extend(
- infcx.tcx.predicates_of(impl_def_id).instantiate(infcx.tcx, substs).into_iter().map(
+ infcx.tcx.predicates_of(impl_def_id).instantiate(infcx.tcx, args).into_iter().map(
|(pred, _)| Obligation::new(infcx.tcx, ObligationCause::dummy(), goal.param_env, pred),
),
);
- Ok(Some(ImplSource::UserDefined(ImplSourceUserDefinedData { impl_def_id, substs, nested })))
+ Ok(Some(ImplSource::UserDefined(ImplSourceUserDefinedData { impl_def_id, args, nested })))
}
-fn rematch_object<'tcx>(
+/// The `Unsize` trait is particularly important to coercion, so we try rematch it.
+/// NOTE: This must stay in sync with `consider_builtin_unsize_candidate` in trait
+/// goal assembly in the solver, both for soundness and in order to avoid ICEs.
+fn rematch_unsize<'tcx>(
infcx: &InferCtxt<'tcx>,
goal: Goal<'tcx, ty::TraitPredicate<'tcx>>,
mut nested: Vec<PredicateObligation<'tcx>>,
+ source: BuiltinImplSource,
+ certainty: Certainty,
) -> SelectionResult<'tcx, Selection<'tcx>> {
- let self_ty = goal.predicate.self_ty();
- let ty::Dynamic(data, _, source_kind) = *self_ty.kind()
- else {
- bug!()
- };
- let source_trait_ref = data.principal().unwrap().with_self_ty(infcx.tcx, self_ty);
-
- let (is_upcasting, target_trait_ref_unnormalized) = if Some(goal.predicate.def_id())
- == infcx.tcx.lang_items().unsize_trait()
- {
- assert_eq!(source_kind, ty::Dyn, "cannot upcast dyn*");
- if let ty::Dynamic(data, _, ty::Dyn) = goal.predicate.trait_ref.substs.type_at(1).kind() {
- (true, data.principal().unwrap().with_self_ty(infcx.tcx, self_ty))
- } else {
- bug!()
- }
- } else {
- (false, ty::Binder::dummy(goal.predicate.trait_ref))
- };
-
- let mut target_trait_ref = None;
- for candidate_trait_ref in supertraits(infcx.tcx, source_trait_ref) {
- let result = infcx.commit_if_ok(|_| {
- infcx.at(&ObligationCause::dummy(), goal.param_env).eq(
- DefineOpaqueTypes::No,
- target_trait_ref_unnormalized,
- candidate_trait_ref,
- )
-
- // FIXME: We probably should at least shallowly verify these...
- });
+ let tcx = infcx.tcx;
+ let a_ty = structurally_normalize(goal.predicate.self_ty(), infcx, goal.param_env, &mut nested);
+ let b_ty = structurally_normalize(
+ goal.predicate.trait_ref.args.type_at(1),
+ infcx,
+ goal.param_env,
+ &mut nested,
+ );
- match result {
- Ok(InferOk { value: (), obligations }) => {
- target_trait_ref = Some(candidate_trait_ref);
- nested.extend(obligations);
- break;
+ match (a_ty.kind(), b_ty.kind()) {
+ // Don't try to coerce `?0` to `dyn Trait`
+ (ty::Infer(ty::TyVar(_)), _) | (_, ty::Infer(ty::TyVar(_))) => Ok(None),
+ // Stall any ambiguous upcasting goals, since we can't rematch those
+ (ty::Dynamic(_, _, ty::Dyn), ty::Dynamic(_, _, ty::Dyn)) => match certainty {
+ Certainty::Yes => Ok(Some(ImplSource::Builtin(source, nested))),
+ _ => Ok(None),
+ },
+ // `T` -> `dyn Trait` upcasting
+ (_, &ty::Dynamic(data, region, ty::Dyn)) => {
+ // Check that the type implements all of the predicates of the def-id.
+ // (i.e. the principal, all of the associated types match, and any auto traits)
+ nested.extend(data.iter().map(|pred| {
+ Obligation::new(
+ infcx.tcx,
+ ObligationCause::dummy(),
+ goal.param_env,
+ pred.with_self_ty(tcx, a_ty),
+ )
+ }));
+ // The type must be Sized to be unsized.
+ let sized_def_id = tcx.require_lang_item(hir::LangItem::Sized, None);
+ nested.push(Obligation::new(
+ infcx.tcx,
+ ObligationCause::dummy(),
+ goal.param_env,
+ ty::TraitRef::new(tcx, sized_def_id, [a_ty]),
+ ));
+ // The type must outlive the lifetime of the `dyn` we're unsizing into.
+ nested.push(Obligation::new(
+ infcx.tcx,
+ ObligationCause::dummy(),
+ goal.param_env,
+ ty::OutlivesPredicate(a_ty, region),
+ ));
+
+ Ok(Some(ImplSource::Builtin(source, nested)))
+ }
+ // `[T; n]` -> `[T]` unsizing
+ (&ty::Array(a_elem_ty, ..), &ty::Slice(b_elem_ty)) => {
+ nested.extend(
+ infcx
+ .at(&ObligationCause::dummy(), goal.param_env)
+ .eq(DefineOpaqueTypes::No, a_elem_ty, b_elem_ty)
+ .expect("expected rematch to succeed")
+ .into_obligations(),
+ );
+
+ Ok(Some(ImplSource::Builtin(source, nested)))
+ }
+ // Struct unsizing `Struct<T>` -> `Struct<U>` where `T: Unsize<U>`
+ (&ty::Adt(a_def, a_args), &ty::Adt(b_def, b_args))
+ if a_def.is_struct() && a_def.did() == b_def.did() =>
+ {
+ let unsizing_params = tcx.unsizing_params_for_adt(a_def.did());
+ // We must be unsizing some type parameters. This also implies
+ // that the struct has a tail field.
+ if unsizing_params.is_empty() {
+ bug!("expected rematch to succeed")
}
- Err(_) => continue,
+
+ let tail_field = a_def
+ .non_enum_variant()
+ .fields
+ .raw
+ .last()
+ .expect("expected unsized ADT to have a tail field");
+ let tail_field_ty = tcx.type_of(tail_field.did);
+
+ let a_tail_ty = tail_field_ty.instantiate(tcx, a_args);
+ let b_tail_ty = tail_field_ty.instantiate(tcx, b_args);
+
+ // Substitute just the unsizing params from B into A. The type after
+ // this substitution must be equal to B. This is so we don't unsize
+ // unrelated type parameters.
+ let new_a_args = tcx.mk_args_from_iter(
+ a_args
+ .iter()
+ .enumerate()
+ .map(|(i, a)| if unsizing_params.contains(i as u32) { b_args[i] } else { a }),
+ );
+ let unsized_a_ty = Ty::new_adt(tcx, a_def, new_a_args);
+
+ nested.extend(
+ infcx
+ .at(&ObligationCause::dummy(), goal.param_env)
+ .eq(DefineOpaqueTypes::No, unsized_a_ty, b_ty)
+ .expect("expected rematch to succeed")
+ .into_obligations(),
+ );
+
+ // Finally, we require that `TailA: Unsize<TailB>` for the tail field
+ // types.
+ nested.push(Obligation::new(
+ tcx,
+ ObligationCause::dummy(),
+ goal.param_env,
+ ty::TraitRef::new(tcx, goal.predicate.def_id(), [a_tail_ty, b_tail_ty]),
+ ));
+
+ Ok(Some(ImplSource::Builtin(source, nested)))
+ }
+ // Tuple unsizing `(.., T)` -> `(.., U)` where `T: Unsize<U>`
+ (&ty::Tuple(a_tys), &ty::Tuple(b_tys))
+ if a_tys.len() == b_tys.len() && !a_tys.is_empty() =>
+ {
+ let (a_last_ty, a_rest_tys) = a_tys.split_last().unwrap();
+ let b_last_ty = b_tys.last().unwrap();
+
+ // Substitute just the tail field of B., and require that they're equal.
+ let unsized_a_ty =
+ Ty::new_tup_from_iter(tcx, a_rest_tys.iter().chain([b_last_ty]).copied());
+ nested.extend(
+ infcx
+ .at(&ObligationCause::dummy(), goal.param_env)
+ .eq(DefineOpaqueTypes::No, unsized_a_ty, b_ty)
+ .expect("expected rematch to succeed")
+ .into_obligations(),
+ );
+
+ // Similar to ADTs, require that we can unsize the tail.
+ nested.push(Obligation::new(
+ tcx,
+ ObligationCause::dummy(),
+ goal.param_env,
+ ty::TraitRef::new(tcx, goal.predicate.def_id(), [*a_last_ty, *b_last_ty]),
+ ));
+
+ // We need to be able to detect tuple unsizing to require its feature gate.
+ assert_eq!(
+ source,
+ BuiltinImplSource::TupleUnsizing,
+ "compiler-errors wants to know if this can ever be triggered..."
+ );
+ Ok(Some(ImplSource::Builtin(source, nested)))
+ }
+ _ => {
+ assert_ne!(certainty, Certainty::Yes);
+ Ok(None)
}
}
+}
- let target_trait_ref = target_trait_ref.unwrap();
-
- let mut offset = 0;
- let Some((vtable_base, vtable_vptr_slot)) =
- prepare_vtable_segments(infcx.tcx, source_trait_ref, |segment| {
- match segment {
- VtblSegment::MetadataDSA => {
- offset += TyCtxt::COMMON_VTABLE_ENTRIES.len();
- }
- VtblSegment::TraitOwnEntries { trait_ref, emit_vptr } => {
- let own_vtable_entries = count_own_vtable_entries(infcx.tcx, trait_ref);
-
- if trait_ref == target_trait_ref {
- if emit_vptr {
- return ControlFlow::Break((
- offset,
- Some(offset + count_own_vtable_entries(infcx.tcx, trait_ref)),
- ));
- } else {
- return ControlFlow::Break((offset, None));
- }
- }
-
- offset += own_vtable_entries;
- if emit_vptr {
- offset += 1;
- }
- }
- }
- ControlFlow::Continue(())
- })
- else {
- bug!();
- };
-
- // If we're upcasting, get the offset of the vtable pointer, otherwise get
- // the base of the vtable.
- Ok(Some(if is_upcasting {
- ImplSource::TraitUpcasting(ImplSourceTraitUpcastingData { vtable_vptr_slot, nested })
+fn structurally_normalize<'tcx>(
+ ty: Ty<'tcx>,
+ infcx: &InferCtxt<'tcx>,
+ param_env: ty::ParamEnv<'tcx>,
+ nested: &mut Vec<PredicateObligation<'tcx>>,
+) -> Ty<'tcx> {
+ if matches!(ty.kind(), ty::Alias(..)) {
+ let mut engine = <dyn TraitEngine<'tcx>>::new(infcx);
+ let normalized_ty = infcx
+ .at(&ObligationCause::dummy(), param_env)
+ .structurally_normalize(ty, &mut *engine)
+ .expect("normalization shouldn't fail if we got to here");
+ nested.extend(engine.pending_obligations());
+ normalized_ty
} else {
- ImplSource::Object(ImplSourceObjectData { vtable_base, nested })
- }))
+ ty
+ }
}
diff --git a/compiler/rustc_trait_selection/src/solve/fulfill.rs b/compiler/rustc_trait_selection/src/solve/fulfill.rs
index 88ee14c4d..f1d309122 100644
--- a/compiler/rustc_trait_selection/src/solve/fulfill.rs
+++ b/compiler/rustc_trait_selection/src/solve/fulfill.rs
@@ -57,7 +57,7 @@ impl<'tcx> TraitEngine<'tcx> for FulfillmentCtxt<'tcx> {
.map(|obligation| {
let code = infcx.probe(|_| {
match infcx
- .evaluate_root_goal(obligation.clone().into(), GenerateProofTree::No)
+ .evaluate_root_goal(obligation.clone().into(), GenerateProofTree::IfEnabled)
.0
{
Ok((_, Certainty::Maybe(MaybeCause::Ambiguity), _)) => {
@@ -96,7 +96,7 @@ impl<'tcx> TraitEngine<'tcx> for FulfillmentCtxt<'tcx> {
for obligation in mem::take(&mut self.obligations) {
let goal = obligation.clone().into();
let (changed, certainty, nested_goals) =
- match infcx.evaluate_root_goal(goal, GenerateProofTree::No).0 {
+ match infcx.evaluate_root_goal(goal, GenerateProofTree::IfEnabled).0 {
Ok(result) => result,
Err(NoSolution) => {
errors.push(FulfillmentError {
diff --git a/compiler/rustc_trait_selection/src/solve/inherent_projection.rs b/compiler/rustc_trait_selection/src/solve/inherent_projection.rs
new file mode 100644
index 000000000..28fe59b7f
--- /dev/null
+++ b/compiler/rustc_trait_selection/src/solve/inherent_projection.rs
@@ -0,0 +1,50 @@
+//! Computes a normalizes-to (projection) goal for inherent associated types,
+//! `#![feature(inherent_associated_type)]`. Since astconv already determines
+//! which impl the IAT is being projected from, we just:
+//! 1. instantiate substs,
+//! 2. equate the self type, and
+//! 3. instantiate and register where clauses.
+use rustc_middle::traits::solve::{Certainty, Goal, QueryResult};
+use rustc_middle::ty;
+
+use super::EvalCtxt;
+
+impl<'tcx> EvalCtxt<'_, 'tcx> {
+ pub(super) fn normalize_inherent_associated_type(
+ &mut self,
+ goal: Goal<'tcx, ty::ProjectionPredicate<'tcx>>,
+ ) -> QueryResult<'tcx> {
+ let tcx = self.tcx();
+ let inherent = goal.predicate.projection_ty;
+ let expected = goal.predicate.term.ty().expect("inherent consts are treated separately");
+
+ let impl_def_id = tcx.parent(inherent.def_id);
+ let impl_substs = self.fresh_args_for_item(impl_def_id);
+
+ // Equate impl header and add impl where clauses
+ self.eq(
+ goal.param_env,
+ inherent.self_ty(),
+ tcx.type_of(impl_def_id).instantiate(tcx, impl_substs),
+ )?;
+
+ // Equate IAT with the RHS of the project goal
+ let inherent_substs = inherent.rebase_inherent_args_onto_impl(impl_substs, tcx);
+ self.eq(
+ goal.param_env,
+ expected,
+ tcx.type_of(inherent.def_id).instantiate(tcx, inherent_substs),
+ )
+ .expect("expected goal term to be fully unconstrained");
+
+ // Check both where clauses on the impl and IAT
+ self.add_goals(
+ tcx.predicates_of(inherent.def_id)
+ .instantiate(tcx, inherent_substs)
+ .into_iter()
+ .map(|(pred, _)| goal.with(tcx, pred)),
+ );
+
+ self.evaluate_added_goals_and_make_canonical_response(Certainty::Yes)
+ }
+}
diff --git a/compiler/rustc_trait_selection/src/solve/inspect.rs b/compiler/rustc_trait_selection/src/solve/inspect.rs
index 2d6717fda..cda683963 100644
--- a/compiler/rustc_trait_selection/src/solve/inspect.rs
+++ b/compiler/rustc_trait_selection/src/solve/inspect.rs
@@ -200,30 +200,23 @@ impl<'tcx> ProofTreeBuilder<'tcx> {
tcx: TyCtxt<'tcx>,
generate_proof_tree: GenerateProofTree,
) -> ProofTreeBuilder<'tcx> {
- let generate_proof_tree = match (
- tcx.sess.opts.unstable_opts.dump_solver_proof_tree,
- tcx.sess.opts.unstable_opts.dump_solver_proof_tree_use_cache,
- generate_proof_tree,
- ) {
- (_, Some(use_cache), GenerateProofTree::Yes(_)) => {
- GenerateProofTree::Yes(UseGlobalCache::from_bool(use_cache))
- }
-
- (DumpSolverProofTree::Always, use_cache, GenerateProofTree::No) => {
- let use_cache = use_cache.unwrap_or(true);
- GenerateProofTree::Yes(UseGlobalCache::from_bool(use_cache))
- }
-
- (_, None, GenerateProofTree::Yes(_)) => generate_proof_tree,
- (DumpSolverProofTree::Never, _, _) => generate_proof_tree,
- (DumpSolverProofTree::OnError, _, _) => generate_proof_tree,
- };
-
match generate_proof_tree {
- GenerateProofTree::No => ProofTreeBuilder::new_noop(),
- GenerateProofTree::Yes(global_cache_disabled) => {
- ProofTreeBuilder::new_root(global_cache_disabled)
+ GenerateProofTree::Never => ProofTreeBuilder::new_noop(),
+ GenerateProofTree::IfEnabled => {
+ let opts = &tcx.sess.opts.unstable_opts;
+ match opts.dump_solver_proof_tree {
+ DumpSolverProofTree::Always => {
+ let use_cache = opts.dump_solver_proof_tree_use_cache.unwrap_or(true);
+ ProofTreeBuilder::new_root(UseGlobalCache::from_bool(use_cache))
+ }
+ // `OnError` is handled by reevaluating goals in error
+ // reporting with `GenerateProofTree::Yes`.
+ DumpSolverProofTree::OnError | DumpSolverProofTree::Never => {
+ ProofTreeBuilder::new_noop()
+ }
+ }
}
+ GenerateProofTree::Yes(use_cache) => ProofTreeBuilder::new_root(use_cache),
}
}
diff --git a/compiler/rustc_trait_selection/src/solve/mod.rs b/compiler/rustc_trait_selection/src/solve/mod.rs
index 77809d8d2..75a99f799 100644
--- a/compiler/rustc_trait_selection/src/solve/mod.rs
+++ b/compiler/rustc_trait_selection/src/solve/mod.rs
@@ -1,21 +1,27 @@
-//! The new trait solver, currently still WIP.
+//! The next-generation trait solver, currently still WIP.
//!
-//! As a user of the trait system, you can use `TyCtxt::evaluate_goal` to
-//! interact with this solver.
+//! As a user of rust, you can use `-Ztrait-solver=next` or `next-coherence`
+//! to enable the new trait solver always, or just within coherence, respectively.
+//!
+//! As a developer of rustc, you shouldn't be using the new trait
+//! solver without asking the trait-system-refactor-initiative, but it can
+//! be enabled with `InferCtxtBuilder::with_next_trait_solver`. This will
+//! ensure that trait solving using that inference context will be routed
+//! to the new trait solver.
//!
//! For a high-level overview of how this solver works, check out the relevant
//! section of the rustc-dev-guide.
//!
//! FIXME(@lcnr): Write that section. If you read this before then ask me
//! about it on zulip.
-
use rustc_hir::def_id::DefId;
use rustc_infer::infer::canonical::{Canonical, CanonicalVarValues};
use rustc_infer::traits::query::NoSolution;
+use rustc_middle::infer::canonical::CanonicalVarInfos;
use rustc_middle::traits::solve::{
CanonicalResponse, Certainty, ExternalConstraintsData, Goal, QueryResult, Response,
};
-use rustc_middle::ty::{self, Ty, TyCtxt};
+use rustc_middle::ty::{self, Ty, TyCtxt, UniverseIndex};
use rustc_middle::ty::{
CoercePredicate, RegionOutlivesPredicate, SubtypePredicate, TypeOutlivesPredicate,
};
@@ -25,6 +31,7 @@ mod assembly;
mod canonicalize;
mod eval_ctxt;
mod fulfill;
+mod inherent_projection;
pub mod inspect;
mod normalize;
mod opaques;
@@ -37,7 +44,7 @@ pub use eval_ctxt::{
EvalCtxt, GenerateProofTree, InferCtxtEvalExt, InferCtxtSelectExt, UseGlobalCache,
};
pub use fulfill::FulfillmentCtxt;
-pub(crate) use normalize::deeply_normalize;
+pub(crate) use normalize::{deeply_normalize, deeply_normalize_with_skipped_universes};
#[derive(Debug, Clone, Copy)]
enum SolverMode {
@@ -123,10 +130,10 @@ impl<'a, 'tcx> EvalCtxt<'a, 'tcx> {
#[instrument(level = "debug", skip(self))]
fn compute_closure_kind_goal(
&mut self,
- goal: Goal<'tcx, (DefId, ty::SubstsRef<'tcx>, ty::ClosureKind)>,
+ goal: Goal<'tcx, (DefId, ty::GenericArgsRef<'tcx>, ty::ClosureKind)>,
) -> QueryResult<'tcx> {
- let (_, substs, expected_kind) = goal.predicate;
- let found_kind = substs.as_closure().kind_ty().to_opt_closure_kind();
+ let (_, args, expected_kind) = goal.predicate;
+ let found_kind = args.as_closure().kind_ty().to_opt_closure_kind();
let Some(found_kind) = found_kind else {
return self.evaluate_added_goals_and_make_canonical_response(Certainty::AMBIGUOUS);
@@ -266,33 +273,64 @@ impl<'tcx> EvalCtxt<'_, 'tcx> {
return Err(NoSolution);
}
- let Certainty::Maybe(maybe_cause) = responses.iter().fold(
- Certainty::AMBIGUOUS,
- |certainty, response| {
+ let Certainty::Maybe(maybe_cause) =
+ responses.iter().fold(Certainty::AMBIGUOUS, |certainty, response| {
certainty.unify_with(response.value.certainty)
- },
- ) else {
+ })
+ else {
bug!("expected flounder response to be ambiguous")
};
Ok(self.make_ambiguous_response_no_constraints(maybe_cause))
}
+
+ /// Normalize a type when it is structually matched on.
+ ///
+ /// For self types this is generally already handled through
+ /// `assemble_candidates_after_normalizing_self_ty`, so anything happening
+ /// in [`EvalCtxt::assemble_candidates_via_self_ty`] does not have to normalize
+ /// the self type. It is required when structurally matching on any other
+ /// arguments of a trait goal, e.g. when assembling builtin unsize candidates.
+ fn try_normalize_ty(
+ &mut self,
+ param_env: ty::ParamEnv<'tcx>,
+ mut ty: Ty<'tcx>,
+ ) -> Result<Option<Ty<'tcx>>, NoSolution> {
+ for _ in 0..self.local_overflow_limit() {
+ let ty::Alias(_, projection_ty) = *ty.kind() else {
+ return Ok(Some(ty));
+ };
+
+ let normalized_ty = self.next_ty_infer();
+ let normalizes_to_goal = Goal::new(
+ self.tcx(),
+ param_env,
+ ty::ProjectionPredicate { projection_ty, term: normalized_ty.into() },
+ );
+ self.add_goal(normalizes_to_goal);
+ self.try_evaluate_added_goals()?;
+ ty = self.resolve_vars_if_possible(normalized_ty);
+ }
+
+ Ok(None)
+ }
}
-pub(super) fn response_no_constraints<'tcx>(
+fn response_no_constraints_raw<'tcx>(
tcx: TyCtxt<'tcx>,
- goal: Canonical<'tcx, impl Sized>,
+ max_universe: UniverseIndex,
+ variables: CanonicalVarInfos<'tcx>,
certainty: Certainty,
-) -> QueryResult<'tcx> {
- Ok(Canonical {
- max_universe: goal.max_universe,
- variables: goal.variables,
+) -> CanonicalResponse<'tcx> {
+ Canonical {
+ max_universe,
+ variables,
value: Response {
- var_values: CanonicalVarValues::make_identity(tcx, goal.variables),
+ var_values: CanonicalVarValues::make_identity(tcx, variables),
// FIXME: maybe we should store the "no response" version in tcx, like
// we do for tcx.types and stuff.
external_constraints: tcx.mk_external_constraints(ExternalConstraintsData::default()),
certainty,
},
- })
+ }
}
diff --git a/compiler/rustc_trait_selection/src/solve/normalize.rs b/compiler/rustc_trait_selection/src/solve/normalize.rs
index c388850d8..872f0c879 100644
--- a/compiler/rustc_trait_selection/src/solve/normalize.rs
+++ b/compiler/rustc_trait_selection/src/solve/normalize.rs
@@ -20,8 +20,23 @@ pub(crate) fn deeply_normalize<'tcx, T: TypeFoldable<TyCtxt<'tcx>>>(
at: At<'_, 'tcx>,
value: T,
) -> Result<T, Vec<FulfillmentError<'tcx>>> {
+ assert!(!value.has_escaping_bound_vars());
+ deeply_normalize_with_skipped_universes(at, value, vec![])
+}
+
+/// Deeply normalize all aliases in `value`. This does not handle inference and expects
+/// its input to be already fully resolved.
+///
+/// Additionally takes a list of universes which represents the binders which have been
+/// entered before passing `value` to the function. This is currently needed for
+/// `normalize_erasing_regions`, which skips binders as it walks through a type.
+pub(crate) fn deeply_normalize_with_skipped_universes<'tcx, T: TypeFoldable<TyCtxt<'tcx>>>(
+ at: At<'_, 'tcx>,
+ value: T,
+ universes: Vec<Option<UniverseIndex>>,
+) -> Result<T, Vec<FulfillmentError<'tcx>>> {
let fulfill_cx = FulfillmentCtxt::new(at.infcx);
- let mut folder = NormalizationFolder { at, fulfill_cx, depth: 0, universes: Vec::new() };
+ let mut folder = NormalizationFolder { at, fulfill_cx, depth: 0, universes };
value.try_fold_with(&mut folder)
}
@@ -60,10 +75,7 @@ impl<'tcx> NormalizationFolder<'_, 'tcx> {
tcx,
self.at.cause.clone(),
self.at.param_env,
- ty::Binder::dummy(ty::ProjectionPredicate {
- projection_ty: alias,
- term: new_infer_ty.into(),
- }),
+ ty::ProjectionPredicate { projection_ty: alias, term: new_infer_ty.into() },
);
// Do not emit an error if normalization is known to fail but instead
@@ -116,10 +128,10 @@ impl<'tcx> NormalizationFolder<'_, 'tcx> {
tcx,
self.at.cause.clone(),
self.at.param_env,
- ty::Binder::dummy(ty::ProjectionPredicate {
- projection_ty: tcx.mk_alias_ty(uv.def, uv.substs),
+ ty::ProjectionPredicate {
+ projection_ty: tcx.mk_alias_ty(uv.def, uv.args),
term: new_infer_ct.into(),
- }),
+ },
);
let result = if infcx.predicate_may_hold(&obligation) {
@@ -180,7 +192,7 @@ impl<'tcx> FallibleTypeFolder<TyCtxt<'tcx>> for NormalizationFolder<'_, 'tcx> {
mapped_regions,
mapped_types,
mapped_consts,
- &mut self.universes,
+ &self.universes,
result,
))
} else {
@@ -210,7 +222,7 @@ impl<'tcx> FallibleTypeFolder<TyCtxt<'tcx>> for NormalizationFolder<'_, 'tcx> {
mapped_regions,
mapped_types,
mapped_consts,
- &mut self.universes,
+ &self.universes,
result,
))
} else {
diff --git a/compiler/rustc_trait_selection/src/solve/opaques.rs b/compiler/rustc_trait_selection/src/solve/opaques.rs
index 16194f5ad..f08adc020 100644
--- a/compiler/rustc_trait_selection/src/solve/opaques.rs
+++ b/compiler/rustc_trait_selection/src/solve/opaques.rs
@@ -1,3 +1,6 @@
+//! Computes a normalizes-to (projection) goal for opaque types. This goal
+//! behaves differently depending on the param-env's reveal mode and whether
+//! the opaque is in a defining scope.
use rustc_middle::traits::query::NoSolution;
use rustc_middle::traits::solve::{Certainty, Goal, QueryResult};
use rustc_middle::traits::Reveal;
@@ -26,8 +29,8 @@ impl<'tcx> EvalCtxt<'_, 'tcx> {
if !self.can_define_opaque_ty(opaque_ty_def_id) {
return Err(NoSolution);
}
- // FIXME: This may have issues when the substs contain aliases...
- match self.tcx().uses_unique_placeholders_ignoring_regions(opaque_ty.substs) {
+ // FIXME: This may have issues when the args contain aliases...
+ match self.tcx().uses_unique_placeholders_ignoring_regions(opaque_ty.args) {
Err(NotUniqueParam::NotParam(param)) if param.is_non_region_infer() => {
return self.evaluate_added_goals_and_make_canonical_response(
Certainty::AMBIGUOUS,
@@ -40,7 +43,7 @@ impl<'tcx> EvalCtxt<'_, 'tcx> {
}
// Prefer opaques registered already.
let opaque_type_key =
- ty::OpaqueTypeKey { def_id: opaque_ty_def_id, substs: opaque_ty.substs };
+ ty::OpaqueTypeKey { def_id: opaque_ty_def_id, args: opaque_ty.args };
let matches =
self.unify_existing_opaque_tys(goal.param_env, opaque_type_key, expected);
if !matches.is_empty() {
@@ -54,7 +57,7 @@ impl<'tcx> EvalCtxt<'_, 'tcx> {
self.insert_hidden_type(opaque_type_key, goal.param_env, expected)?;
self.add_item_bounds_for_hidden_type(
opaque_ty.def_id,
- opaque_ty.substs,
+ opaque_ty.args,
goal.param_env,
expected,
);
@@ -65,7 +68,7 @@ impl<'tcx> EvalCtxt<'_, 'tcx> {
// e.g. assigning `impl Copy := NotCopy`
self.add_item_bounds_for_hidden_type(
opaque_ty.def_id,
- opaque_ty.substs,
+ opaque_ty.args,
goal.param_env,
expected,
);
@@ -73,7 +76,7 @@ impl<'tcx> EvalCtxt<'_, 'tcx> {
}
(Reveal::All, _) => {
// FIXME: Add an assertion that opaque type storage is empty.
- let actual = tcx.type_of(opaque_ty.def_id).subst(tcx, opaque_ty.substs);
+ let actual = tcx.type_of(opaque_ty.def_id).instantiate(tcx, opaque_ty.args);
self.eq(goal.param_env, expected, actual)?;
self.evaluate_added_goals_and_make_canonical_response(Certainty::Yes)
}
diff --git a/compiler/rustc_trait_selection/src/solve/project_goals.rs b/compiler/rustc_trait_selection/src/solve/project_goals.rs
index e53b784a7..e47e22877 100644
--- a/compiler/rustc_trait_selection/src/solve/project_goals.rs
+++ b/compiler/rustc_trait_selection/src/solve/project_goals.rs
@@ -1,8 +1,7 @@
-use crate::traits::specialization_graph;
+use crate::traits::{check_args_compatible, specialization_graph};
use super::assembly::{self, structural_traits};
use super::EvalCtxt;
-use rustc_errors::ErrorGuaranteed;
use rustc_hir::def::DefKind;
use rustc_hir::def_id::DefId;
use rustc_hir::LangItem;
@@ -11,11 +10,12 @@ use rustc_infer::traits::specialization_graph::LeafDef;
use rustc_infer::traits::Reveal;
use rustc_middle::traits::solve::inspect::CandidateKind;
use rustc_middle::traits::solve::{CanonicalResponse, Certainty, Goal, QueryResult};
+use rustc_middle::traits::BuiltinImplSource;
use rustc_middle::ty::fast_reject::{DeepRejectCtxt, TreatParams};
use rustc_middle::ty::ProjectionPredicate;
use rustc_middle::ty::{self, Ty, TyCtxt};
use rustc_middle::ty::{ToPredicate, TypeVisitableExt};
-use rustc_span::{sym, DUMMY_SP};
+use rustc_span::{sym, ErrorGuaranteed, DUMMY_SP};
impl<'tcx> EvalCtxt<'_, 'tcx> {
#[instrument(level = "debug", skip(self), ret)]
@@ -48,7 +48,7 @@ impl<'tcx> EvalCtxt<'_, 'tcx> {
self.merge_candidates(candidates)
}
ty::AssocItemContainer::ImplContainer => {
- bug!("IATs not supported here yet")
+ self.normalize_inherent_associated_type(goal)
}
}
} else {
@@ -58,7 +58,7 @@ impl<'tcx> EvalCtxt<'_, 'tcx> {
}
DefKind::AnonConst => self.normalize_anon_const(goal),
DefKind::OpaqueTy => self.normalize_opaque_type(goal),
- DefKind::TyAlias => self.normalize_weak_type(goal),
+ DefKind::TyAlias { .. } => self.normalize_weak_type(goal),
kind => bug!("unknown DefKind {} in projection goal: {goal:#?}", kind.descr(def_id)),
}
}
@@ -72,7 +72,7 @@ impl<'tcx> EvalCtxt<'_, 'tcx> {
goal.param_env,
ty::UnevaluatedConst::new(
goal.predicate.projection_ty.def_id,
- goal.predicate.projection_ty.substs,
+ goal.predicate.projection_ty.args,
),
self.tcx()
.type_of(goal.predicate.projection_ty.def_id)
@@ -112,6 +112,7 @@ impl<'tcx> assembly::GoalKind<'tcx> for ProjectionPredicate<'tcx> {
) -> QueryResult<'tcx> {
if let Some(projection_pred) = assumption.as_projection_clause() {
if projection_pred.projection_def_id() == goal.predicate.def_id() {
+ let tcx = ecx.tcx();
ecx.probe_candidate("assumption").enter(|ecx| {
let assumption_projection_pred =
ecx.instantiate_binder_with_infer(projection_pred);
@@ -122,6 +123,14 @@ impl<'tcx> assembly::GoalKind<'tcx> for ProjectionPredicate<'tcx> {
)?;
ecx.eq(goal.param_env, goal.predicate.term, assumption_projection_pred.term)
.expect("expected goal term to be fully unconstrained");
+
+ // Add GAT where clauses from the trait's definition
+ ecx.add_goals(
+ tcx.predicates_of(goal.predicate.def_id())
+ .instantiate_own(tcx, goal.predicate.projection_ty.args)
+ .map(|(pred, _)| goal.with(tcx, pred)),
+ );
+
then(ecx)
})
} else {
@@ -142,93 +151,116 @@ impl<'tcx> assembly::GoalKind<'tcx> for ProjectionPredicate<'tcx> {
let goal_trait_ref = goal.predicate.projection_ty.trait_ref(tcx);
let impl_trait_ref = tcx.impl_trait_ref(impl_def_id).unwrap();
let drcx = DeepRejectCtxt { treat_obligation_params: TreatParams::ForLookup };
- if !drcx.substs_refs_may_unify(goal_trait_ref.substs, impl_trait_ref.skip_binder().substs) {
+ if !drcx.args_refs_may_unify(goal_trait_ref.args, impl_trait_ref.skip_binder().args) {
return Err(NoSolution);
}
- ecx.probe(
- |r| CandidateKind::Candidate { name: "impl".into(), result: *r }).enter(
- |ecx| {
- let impl_substs = ecx.fresh_substs_for_item(impl_def_id);
- let impl_trait_ref = impl_trait_ref.subst(tcx, impl_substs);
-
- ecx.eq(goal.param_env, goal_trait_ref, impl_trait_ref)?;
-
- let where_clause_bounds = tcx
- .predicates_of(impl_def_id)
- .instantiate(tcx, impl_substs)
- .predicates
- .into_iter()
- .map(|pred| goal.with(tcx, pred));
- ecx.add_goals(where_clause_bounds);
-
- // In case the associated item is hidden due to specialization, we have to
- // return ambiguity this would otherwise be incomplete, resulting in
- // unsoundness during coherence (#105782).
- let Some(assoc_def) = fetch_eligible_assoc_item_def(
- ecx,
- goal.param_env,
- goal_trait_ref,
- goal.predicate.def_id(),
- impl_def_id
- )? else {
- return ecx.evaluate_added_goals_and_make_canonical_response(Certainty::AMBIGUOUS);
+ ecx.probe(|r| CandidateKind::Candidate { name: "impl".into(), result: *r }).enter(|ecx| {
+ let impl_args = ecx.fresh_args_for_item(impl_def_id);
+ let impl_trait_ref = impl_trait_ref.instantiate(tcx, impl_args);
+
+ ecx.eq(goal.param_env, goal_trait_ref, impl_trait_ref)?;
+
+ let where_clause_bounds = tcx
+ .predicates_of(impl_def_id)
+ .instantiate(tcx, impl_args)
+ .predicates
+ .into_iter()
+ .map(|pred| goal.with(tcx, pred));
+ ecx.add_goals(where_clause_bounds);
+
+ // Add GAT where clauses from the trait's definition
+ ecx.add_goals(
+ tcx.predicates_of(goal.predicate.def_id())
+ .instantiate_own(tcx, goal.predicate.projection_ty.args)
+ .map(|(pred, _)| goal.with(tcx, pred)),
+ );
+
+ // In case the associated item is hidden due to specialization, we have to
+ // return ambiguity this would otherwise be incomplete, resulting in
+ // unsoundness during coherence (#105782).
+ let Some(assoc_def) = fetch_eligible_assoc_item_def(
+ ecx,
+ goal.param_env,
+ goal_trait_ref,
+ goal.predicate.def_id(),
+ impl_def_id,
+ )?
+ else {
+ return ecx.evaluate_added_goals_and_make_canonical_response(Certainty::AMBIGUOUS);
+ };
+
+ let error_response = |ecx: &mut EvalCtxt<'_, 'tcx>, reason| {
+ let guar = tcx.sess.delay_span_bug(tcx.def_span(assoc_def.item.def_id), reason);
+ let error_term = match assoc_def.item.kind {
+ ty::AssocKind::Const => ty::Const::new_error(
+ tcx,
+ guar,
+ tcx.type_of(goal.predicate.def_id())
+ .instantiate(tcx, goal.predicate.projection_ty.args),
+ )
+ .into(),
+ ty::AssocKind::Type => Ty::new_error(tcx, guar).into(),
+ ty::AssocKind::Fn => unreachable!(),
};
+ ecx.eq(goal.param_env, goal.predicate.term, error_term)
+ .expect("expected goal term to be fully unconstrained");
+ ecx.evaluate_added_goals_and_make_canonical_response(Certainty::Yes)
+ };
- if !assoc_def.item.defaultness(tcx).has_value() {
- let guar = tcx.sess.delay_span_bug(
- tcx.def_span(assoc_def.item.def_id),
- "missing value for assoc item in impl",
- );
- let error_term = match assoc_def.item.kind {
- ty::AssocKind::Const => ty::Const::new_error(tcx,
- guar,
- tcx.type_of(goal.predicate.def_id())
- .subst(tcx, goal.predicate.projection_ty.substs),
- )
- .into(),
- ty::AssocKind::Type => Ty::new_error(tcx,guar).into(),
- ty::AssocKind::Fn => unreachable!(),
- };
- ecx.eq(goal.param_env, goal.predicate.term, error_term)
- .expect("expected goal term to be fully unconstrained");
- return ecx.evaluate_added_goals_and_make_canonical_response(Certainty::Yes);
- }
+ if !assoc_def.item.defaultness(tcx).has_value() {
+ return error_response(ecx, "missing value for assoc item in impl");
+ }
- // Getting the right substitutions here is complex, e.g. given:
- // - a goal `<Vec<u32> as Trait<i32>>::Assoc<u64>`
- // - the applicable impl `impl<T> Trait<i32> for Vec<T>`
- // - and the impl which defines `Assoc` being `impl<T, U> Trait<U> for Vec<T>`
- //
- // We first rebase the goal substs onto the impl, going from `[Vec<u32>, i32, u64]`
- // to `[u32, u64]`.
- //
- // And then map these substs to the substs of the defining impl of `Assoc`, going
- // from `[u32, u64]` to `[u32, i32, u64]`.
- let impl_substs_with_gat = goal.predicate.projection_ty.substs.rebase_onto(
- tcx,
- goal_trait_ref.def_id,
- impl_substs,
- );
- let substs = ecx.translate_substs(
- goal.param_env,
- impl_def_id,
- impl_substs_with_gat,
- assoc_def.defining_node,
+ // Getting the right args here is complex, e.g. given:
+ // - a goal `<Vec<u32> as Trait<i32>>::Assoc<u64>`
+ // - the applicable impl `impl<T> Trait<i32> for Vec<T>`
+ // - and the impl which defines `Assoc` being `impl<T, U> Trait<U> for Vec<T>`
+ //
+ // We first rebase the goal args onto the impl, going from `[Vec<u32>, i32, u64]`
+ // to `[u32, u64]`.
+ //
+ // And then map these args to the args of the defining impl of `Assoc`, going
+ // from `[u32, u64]` to `[u32, i32, u64]`.
+ let impl_args_with_gat = goal.predicate.projection_ty.args.rebase_onto(
+ tcx,
+ goal_trait_ref.def_id,
+ impl_args,
+ );
+ let args = ecx.translate_args(
+ goal.param_env,
+ impl_def_id,
+ impl_args_with_gat,
+ assoc_def.defining_node,
+ );
+
+ if !check_args_compatible(tcx, assoc_def.item, args) {
+ return error_response(
+ ecx,
+ "associated item has mismatched generic item arguments",
);
+ }
- // Finally we construct the actual value of the associated type.
- let term = match assoc_def.item.kind {
- ty::AssocKind::Type => tcx.type_of(assoc_def.item.def_id).map_bound(|ty| ty.into()),
- ty::AssocKind::Const => bug!("associated const projection is not supported yet"),
- ty::AssocKind::Fn => unreachable!("we should never project to a fn"),
- };
+ // Finally we construct the actual value of the associated type.
+ let term = match assoc_def.item.kind {
+ ty::AssocKind::Type => tcx.type_of(assoc_def.item.def_id).map_bound(|ty| ty.into()),
+ ty::AssocKind::Const => bug!("associated const projection is not supported yet"),
+ ty::AssocKind::Fn => unreachable!("we should never project to a fn"),
+ };
- ecx.eq(goal.param_env, goal.predicate.term, term.subst(tcx, substs))
- .expect("expected goal term to be fully unconstrained");
- ecx.evaluate_added_goals_and_make_canonical_response(Certainty::Yes)
- },
- )
+ ecx.eq(goal.param_env, goal.predicate.term, term.instantiate(tcx, args))
+ .expect("expected goal term to be fully unconstrained");
+ ecx.evaluate_added_goals_and_make_canonical_response(Certainty::Yes)
+ })
+ }
+
+ /// Fail to normalize if the predicate contains an error, alternatively, we could normalize to `ty::Error`
+ /// and succeed. Can experiment with this to figure out what results in better error messages.
+ fn consider_error_guaranteed_candidate(
+ _ecx: &mut EvalCtxt<'_, 'tcx>,
+ _guar: ErrorGuaranteed,
+ ) -> QueryResult<'tcx> {
+ Err(NoSolution)
}
fn consider_auto_trait_candidate(
@@ -350,7 +382,7 @@ impl<'tcx> assembly::GoalKind<'tcx> for ProjectionPredicate<'tcx> {
ty::Dynamic(_, _, _) => {
let dyn_metadata = tcx.require_lang_item(LangItem::DynMetadata, None);
tcx.type_of(dyn_metadata)
- .subst(tcx, &[ty::GenericArg::from(goal.predicate.self_ty())])
+ .instantiate(tcx, &[ty::GenericArg::from(goal.predicate.self_ty())])
}
ty::Alias(_, _) | ty::Param(_) | ty::Placeholder(..) => {
@@ -365,29 +397,21 @@ impl<'tcx> assembly::GoalKind<'tcx> for ProjectionPredicate<'tcx> {
tcx.types.unit
}
- ty::Adt(def, substs) if def.is_struct() => {
- match def.non_enum_variant().tail_opt() {
- None => tcx.types.unit,
- Some(field_def) => {
- let self_ty = field_def.ty(tcx, substs);
- ecx.add_goal(goal.with(
- tcx,
- ty::Binder::dummy(goal.predicate.with_self_ty(tcx, self_ty)),
- ));
- return ecx
- .evaluate_added_goals_and_make_canonical_response(Certainty::Yes);
- }
+ ty::Adt(def, args) if def.is_struct() => match def.non_enum_variant().tail_opt() {
+ None => tcx.types.unit,
+ Some(field_def) => {
+ let self_ty = field_def.ty(tcx, args);
+ ecx.add_goal(goal.with(tcx, goal.predicate.with_self_ty(tcx, self_ty)));
+ return ecx
+ .evaluate_added_goals_and_make_canonical_response(Certainty::Yes);
}
- }
+ },
ty::Adt(_, _) => tcx.types.unit,
ty::Tuple(elements) => match elements.last() {
None => tcx.types.unit,
Some(&self_ty) => {
- ecx.add_goal(goal.with(
- tcx,
- ty::Binder::dummy(goal.predicate.with_self_ty(tcx, self_ty)),
- ));
+ ecx.add_goal(goal.with(tcx, goal.predicate.with_self_ty(tcx, self_ty)));
return ecx
.evaluate_added_goals_and_make_canonical_response(Certainty::Yes);
}
@@ -413,7 +437,7 @@ impl<'tcx> assembly::GoalKind<'tcx> for ProjectionPredicate<'tcx> {
goal: Goal<'tcx, Self>,
) -> QueryResult<'tcx> {
let self_ty = goal.predicate.self_ty();
- let ty::Generator(def_id, substs, _) = *self_ty.kind() else {
+ let ty::Generator(def_id, args, _) = *self_ty.kind() else {
return Err(NoSolution);
};
@@ -423,15 +447,15 @@ impl<'tcx> assembly::GoalKind<'tcx> for ProjectionPredicate<'tcx> {
return Err(NoSolution);
}
- let term = substs.as_generator().return_ty().into();
+ let term = args.as_generator().return_ty().into();
Self::consider_implied_clause(
ecx,
goal,
- ty::Binder::dummy(ty::ProjectionPredicate {
+ ty::ProjectionPredicate {
projection_ty: ecx.tcx().mk_alias_ty(goal.predicate.def_id(), [self_ty]),
term,
- })
+ }
.to_predicate(tcx),
// Technically, we need to check that the future type is Sized,
// but that's already proven by the generator being WF.
@@ -444,7 +468,7 @@ impl<'tcx> assembly::GoalKind<'tcx> for ProjectionPredicate<'tcx> {
goal: Goal<'tcx, Self>,
) -> QueryResult<'tcx> {
let self_ty = goal.predicate.self_ty();
- let ty::Generator(def_id, substs, _) = *self_ty.kind() else {
+ let ty::Generator(def_id, args, _) = *self_ty.kind() else {
return Err(NoSolution);
};
@@ -454,7 +478,7 @@ impl<'tcx> assembly::GoalKind<'tcx> for ProjectionPredicate<'tcx> {
return Err(NoSolution);
}
- let generator = substs.as_generator();
+ let generator = args.as_generator();
let name = tcx.associated_item(goal.predicate.def_id()).name;
let term = if name == sym::Return {
@@ -468,12 +492,12 @@ impl<'tcx> assembly::GoalKind<'tcx> for ProjectionPredicate<'tcx> {
Self::consider_implied_clause(
ecx,
goal,
- ty::Binder::dummy(ty::ProjectionPredicate {
+ ty::ProjectionPredicate {
projection_ty: ecx
.tcx()
.mk_alias_ty(goal.predicate.def_id(), [self_ty, generator.resume_ty()]),
term,
- })
+ }
.to_predicate(tcx),
// Technically, we need to check that the future type is Sized,
// but that's already proven by the generator being WF.
@@ -481,17 +505,17 @@ impl<'tcx> assembly::GoalKind<'tcx> for ProjectionPredicate<'tcx> {
)
}
- fn consider_builtin_unsize_candidate(
+ fn consider_unsize_to_dyn_candidate(
_ecx: &mut EvalCtxt<'_, 'tcx>,
goal: Goal<'tcx, Self>,
) -> QueryResult<'tcx> {
- bug!("`Unsize` does not have an associated type: {:?}", goal);
+ bug!("`Unsize` does not have an associated type: {:?}", goal)
}
- fn consider_builtin_dyn_upcast_candidates(
+ fn consider_structural_builtin_unsize_candidates(
_ecx: &mut EvalCtxt<'_, 'tcx>,
goal: Goal<'tcx, Self>,
- ) -> Vec<CanonicalResponse<'tcx>> {
+ ) -> Vec<(CanonicalResponse<'tcx>, BuiltinImplSource)> {
bug!("`Unsize` does not have an associated type: {:?}", goal);
}
diff --git a/compiler/rustc_trait_selection/src/solve/search_graph/cache.rs b/compiler/rustc_trait_selection/src/solve/search_graph/cache.rs
index 56f126e91..be48447e2 100644
--- a/compiler/rustc_trait_selection/src/solve/search_graph/cache.rs
+++ b/compiler/rustc_trait_selection/src/solve/search_graph/cache.rs
@@ -19,21 +19,25 @@ rustc_index::newtype_index! {
#[derive(Debug, Clone)]
pub(super) struct ProvisionalEntry<'tcx> {
- // In case we have a coinductive cycle, this is the
- // the currently least restrictive result of this goal.
- pub(super) response: QueryResult<'tcx>,
- // In case of a cycle, the position of deepest stack entry involved
- // in that cycle. This is monotonically decreasing in the stack as all
- // elements between the current stack element in the deepest stack entry
- // involved have to also be involved in that cycle.
- //
- // We can only move entries to the global cache once we're complete done
- // with the cycle. If this entry has not been involved in a cycle,
- // this is just its own depth.
+ /// In case we have a coinductive cycle, this is the
+ /// the current provisional result of this goal.
+ ///
+ /// This starts out as `None` for all goals and gets to some
+ /// when the goal gets popped from the stack or we rerun evaluation
+ /// for this goal to reach a fixpoint.
+ pub(super) response: Option<QueryResult<'tcx>>,
+ /// In case of a cycle, the position of deepest stack entry involved
+ /// in that cycle. This is monotonically decreasing in the stack as all
+ /// elements between the current stack element in the deepest stack entry
+ /// involved have to also be involved in that cycle.
+ ///
+ /// We can only move entries to the global cache once we're complete done
+ /// with the cycle. If this entry has not been involved in a cycle,
+ /// this is just its own depth.
pub(super) depth: StackDepth,
- // The goal for this entry. Should always be equal to the corresponding goal
- // in the lookup table.
+ /// The goal for this entry. Should always be equal to the corresponding goal
+ /// in the lookup table.
pub(super) input: CanonicalInput<'tcx>,
}
@@ -92,7 +96,7 @@ impl<'tcx> ProvisionalCache<'tcx> {
self.entries[entry_index].depth
}
- pub(super) fn provisional_result(&self, entry_index: EntryIndex) -> QueryResult<'tcx> {
+ pub(super) fn provisional_result(&self, entry_index: EntryIndex) -> Option<QueryResult<'tcx>> {
self.entries[entry_index].response
}
}
diff --git a/compiler/rustc_trait_selection/src/solve/search_graph/mod.rs b/compiler/rustc_trait_selection/src/solve/search_graph/mod.rs
index f00456e26..49ebfa4e6 100644
--- a/compiler/rustc_trait_selection/src/solve/search_graph/mod.rs
+++ b/compiler/rustc_trait_selection/src/solve/search_graph/mod.rs
@@ -1,37 +1,51 @@
mod cache;
-mod overflow;
-
-pub(super) use overflow::OverflowHandler;
-use rustc_middle::traits::solve::inspect::CacheHit;
use self::cache::ProvisionalEntry;
+use super::inspect::ProofTreeBuilder;
+use super::SolverMode;
use cache::ProvisionalCache;
-use overflow::OverflowData;
+use rustc_data_structures::fx::FxHashSet;
+use rustc_index::Idx;
use rustc_index::IndexVec;
use rustc_middle::dep_graph::DepKind;
-use rustc_middle::traits::solve::{CanonicalInput, Certainty, MaybeCause, QueryResult};
+use rustc_middle::traits::solve::inspect::CacheHit;
+use rustc_middle::traits::solve::CacheData;
+use rustc_middle::traits::solve::{CanonicalInput, Certainty, EvaluationCache, QueryResult};
use rustc_middle::ty::TyCtxt;
-use std::{collections::hash_map::Entry, mem};
-
-use super::inspect::ProofTreeBuilder;
-use super::SolverMode;
+use rustc_session::Limit;
+use std::collections::hash_map::Entry;
rustc_index::newtype_index! {
pub struct StackDepth {}
}
-struct StackElem<'tcx> {
+#[derive(Debug)]
+struct StackEntry<'tcx> {
input: CanonicalInput<'tcx>,
+ available_depth: Limit,
+ // The maximum depth reached by this stack entry, only up-to date
+ // for the top of the stack and lazily updated for the rest.
+ reached_depth: StackDepth,
+ encountered_overflow: bool,
has_been_used: bool,
+
+ /// We put only the root goal of a coinductive cycle into the global cache.
+ ///
+ /// If we were to use that result when later trying to prove another cycle
+ /// participant, we can end up with unstable query results.
+ ///
+ /// See tests/ui/new-solver/coinduction/incompleteness-unstable-result.rs for
+ /// an example of where this is needed.
+ cycle_participants: FxHashSet<CanonicalInput<'tcx>>,
}
pub(super) struct SearchGraph<'tcx> {
mode: SolverMode,
+ local_overflow_limit: usize,
/// The stack of goals currently being computed.
///
/// An element is *deeper* in the stack if its index is *lower*.
- stack: IndexVec<StackDepth, StackElem<'tcx>>,
- overflow_data: OverflowData,
+ stack: IndexVec<StackDepth, StackEntry<'tcx>>,
provisional_cache: ProvisionalCache<'tcx>,
}
@@ -39,8 +53,8 @@ impl<'tcx> SearchGraph<'tcx> {
pub(super) fn new(tcx: TyCtxt<'tcx>, mode: SolverMode) -> SearchGraph<'tcx> {
Self {
mode,
+ local_overflow_limit: tcx.recursion_limit().0.ilog2() as usize,
stack: Default::default(),
- overflow_data: OverflowData::new(tcx),
provisional_cache: ProvisionalCache::empty(),
}
}
@@ -49,19 +63,42 @@ impl<'tcx> SearchGraph<'tcx> {
self.mode
}
- /// We do not use the global cache during coherence.
+ pub(super) fn local_overflow_limit(&self) -> usize {
+ self.local_overflow_limit
+ }
+
+ /// Update the stack and reached depths on cache hits.
+ #[instrument(level = "debug", skip(self))]
+ fn on_cache_hit(&mut self, additional_depth: usize, encountered_overflow: bool) {
+ let reached_depth = self.stack.next_index().plus(additional_depth);
+ if let Some(last) = self.stack.raw.last_mut() {
+ last.reached_depth = last.reached_depth.max(reached_depth);
+ last.encountered_overflow |= encountered_overflow;
+ }
+ }
+
+ /// Pops the highest goal from the stack, lazily updating the
+ /// the next goal in the stack.
///
+ /// Directly popping from the stack instead of using this method
+ /// would cause us to not track overflow and recursion depth correctly.
+ fn pop_stack(&mut self) -> StackEntry<'tcx> {
+ let elem = self.stack.pop().unwrap();
+ if let Some(last) = self.stack.raw.last_mut() {
+ last.reached_depth = last.reached_depth.max(elem.reached_depth);
+ last.encountered_overflow |= elem.encountered_overflow;
+ }
+ elem
+ }
+
/// The trait solver behavior is different for coherence
- /// so we would have to add the solver mode to the cache key.
- /// This is probably not worth it as trait solving during
- /// coherence tends to already be incredibly fast.
- ///
- /// We could add another global cache for coherence instead,
- /// but that's effort so let's only do it if necessary.
- pub(super) fn should_use_global_cache(&self) -> bool {
+ /// so we use a separate cache. Alternatively we could use
+ /// a single cache and share it between coherence and ordinary
+ /// trait solving.
+ pub(super) fn global_cache(&self, tcx: TyCtxt<'tcx>) -> &'tcx EvaluationCache<'tcx> {
match self.mode {
- SolverMode::Normal => true,
- SolverMode::Coherence => false,
+ SolverMode::Normal => &tcx.new_solver_evaluation_cache,
+ SolverMode::Coherence => &tcx.new_solver_coherence_evaluation_cache,
}
}
@@ -87,36 +124,111 @@ impl<'tcx> SearchGraph<'tcx> {
}
}
- /// Tries putting the new goal on the stack, returning an error if it is already cached.
+ /// Fetches whether the current goal encountered overflow.
+ ///
+ /// This should only be used for the check in `evaluate_goal`.
+ pub(super) fn encountered_overflow(&self) -> bool {
+ if let Some(last) = self.stack.raw.last() { last.encountered_overflow } else { false }
+ }
+
+ /// Resets `encountered_overflow` of the current goal.
+ ///
+ /// This should only be used for the check in `evaluate_goal`.
+ pub(super) fn reset_encountered_overflow(&mut self, encountered_overflow: bool) -> bool {
+ if let Some(last) = self.stack.raw.last_mut() {
+ let prev = last.encountered_overflow;
+ last.encountered_overflow = encountered_overflow;
+ prev
+ } else {
+ false
+ }
+ }
+
+ /// Returns the remaining depth allowed for nested goals.
+ ///
+ /// This is generally simply one less than the current depth.
+ /// However, if we encountered overflow, we significantly reduce
+ /// the remaining depth of all nested goals to prevent hangs
+ /// in case there is exponential blowup.
+ fn allowed_depth_for_nested(
+ tcx: TyCtxt<'tcx>,
+ stack: &IndexVec<StackDepth, StackEntry<'tcx>>,
+ ) -> Option<Limit> {
+ if let Some(last) = stack.raw.last() {
+ if last.available_depth.0 == 0 {
+ return None;
+ }
+
+ Some(if last.encountered_overflow {
+ Limit(last.available_depth.0 / 4)
+ } else {
+ Limit(last.available_depth.0 - 1)
+ })
+ } else {
+ Some(tcx.recursion_limit())
+ }
+ }
+
+ /// Probably the most involved method of the whole solver.
///
- /// This correctly updates the provisional cache if there is a cycle.
- #[instrument(level = "debug", skip(self, tcx, inspect), ret)]
- fn try_push_stack(
+ /// Given some goal which is proven via the `prove_goal` closure, this
+ /// handles caching, overflow, and coinductive cycles.
+ pub(super) fn with_new_goal(
&mut self,
tcx: TyCtxt<'tcx>,
input: CanonicalInput<'tcx>,
inspect: &mut ProofTreeBuilder<'tcx>,
- ) -> Result<(), QueryResult<'tcx>> {
- // Look at the provisional cache to check for cycles.
+ mut prove_goal: impl FnMut(&mut Self, &mut ProofTreeBuilder<'tcx>) -> QueryResult<'tcx>,
+ ) -> QueryResult<'tcx> {
+ // Check for overflow.
+ let Some(available_depth) = Self::allowed_depth_for_nested(tcx, &self.stack) else {
+ if let Some(last) = self.stack.raw.last_mut() {
+ last.encountered_overflow = true;
+ }
+ return Self::response_no_constraints(tcx, input, Certainty::OVERFLOW);
+ };
+
+ // Try to fetch the goal from the global cache.
+ if inspect.use_global_cache() {
+ if let Some(CacheData { result, reached_depth, encountered_overflow }) =
+ self.global_cache(tcx).get(
+ tcx,
+ input,
+ |cycle_participants| {
+ self.stack.iter().any(|entry| cycle_participants.contains(&entry.input))
+ },
+ available_depth,
+ )
+ {
+ self.on_cache_hit(reached_depth, encountered_overflow);
+ return result;
+ }
+ }
+
+ // Look at the provisional cache to detect cycles.
let cache = &mut self.provisional_cache;
match cache.lookup_table.entry(input) {
- // No entry, simply push this goal on the stack after dealing with overflow.
+ // No entry, we push this goal on the stack and try to prove it.
Entry::Vacant(v) => {
- if self.overflow_data.has_overflow(self.stack.len()) {
- return Err(self.deal_with_overflow(tcx, input));
- }
-
- let depth = self.stack.push(StackElem { input, has_been_used: false });
- let response = super::response_no_constraints(tcx, input, Certainty::Yes);
- let entry_index = cache.entries.push(ProvisionalEntry { response, depth, input });
+ let depth = self.stack.next_index();
+ let entry = StackEntry {
+ input,
+ available_depth,
+ reached_depth: depth,
+ encountered_overflow: false,
+ has_been_used: false,
+ cycle_participants: Default::default(),
+ };
+ assert_eq!(self.stack.push(entry), depth);
+ let entry_index =
+ cache.entries.push(ProvisionalEntry { response: None, depth, input });
v.insert(entry_index);
- Ok(())
}
// We have a nested goal which relies on a goal `root` deeper in the stack.
//
- // We first store that we may have to rerun `evaluate_goal` for `root` in case the
- // provisional response is not equal to the final response. We also update the depth
- // of all goals which recursively depend on our current goal to depend on `root`
+ // We first store that we may have to reprove `root` in case the provisional
+ // response is not equal to the final response. We also update the depth of all
+ // goals which recursively depend on our current goal to depend on `root`
// instead.
//
// Finally we can return either the provisional response for that goal if we have a
@@ -125,169 +237,144 @@ impl<'tcx> SearchGraph<'tcx> {
inspect.cache_hit(CacheHit::Provisional);
let entry_index = *entry_index.get();
-
let stack_depth = cache.depth(entry_index);
debug!("encountered cycle with depth {stack_depth:?}");
cache.add_dependency_of_leaf_on(entry_index);
+ let mut iter = self.stack.iter_mut();
+ let root = iter.nth(stack_depth.as_usize()).unwrap();
+ for e in iter {
+ root.cycle_participants.insert(e.input);
+ }
+ // If we're in a cycle, we have to retry proving the current goal
+ // until we reach a fixpoint.
self.stack[stack_depth].has_been_used = true;
- // NOTE: The goals on the stack aren't the only goals involved in this cycle.
- // We can also depend on goals which aren't part of the stack but coinductively
- // depend on the stack themselves. We already checked whether all the goals
- // between these goals and their root on the stack. This means that as long as
- // each goal in a cycle is checked for coinductivity by itself, simply checking
- // the stack is enough.
- if self.stack.raw[stack_depth.index()..]
- .iter()
- .all(|g| g.input.value.goal.predicate.is_coinductive(tcx))
- {
- Err(cache.provisional_result(entry_index))
+ return if let Some(result) = cache.provisional_result(entry_index) {
+ result
} else {
- Err(super::response_no_constraints(
- tcx,
- input,
- Certainty::Maybe(MaybeCause::Overflow),
- ))
- }
- }
- }
- }
-
- /// We cannot simply store the result of [super::EvalCtxt::compute_goal] as we have to deal with
- /// coinductive cycles.
- ///
- /// When we encounter a coinductive cycle, we have to prove the final result of that cycle
- /// while we are still computing that result. Because of this we continuously recompute the
- /// cycle until the result of the previous iteration is equal to the final result, at which
- /// point we are done.
- ///
- /// This function returns `true` if we were able to finalize the goal and `false` if it has
- /// updated the provisional cache and we have to recompute the current goal.
- ///
- /// FIXME: Refer to the rustc-dev-guide entry once it exists.
- #[instrument(level = "debug", skip(self, actual_input), ret)]
- fn try_finalize_goal(
- &mut self,
- actual_input: CanonicalInput<'tcx>,
- response: QueryResult<'tcx>,
- ) -> bool {
- let stack_elem = self.stack.pop().unwrap();
- let StackElem { input, has_been_used } = stack_elem;
- assert_eq!(input, actual_input);
-
- let cache = &mut self.provisional_cache;
- let provisional_entry_index = *cache.lookup_table.get(&input).unwrap();
- let provisional_entry = &mut cache.entries[provisional_entry_index];
- // We eagerly update the response in the cache here. If we have to reevaluate
- // this goal we use the new response when hitting a cycle, and we definitely
- // want to access the final response whenever we look at the cache.
- let prev_response = mem::replace(&mut provisional_entry.response, response);
-
- // Was the current goal the root of a cycle and was the provisional response
- // different from the final one.
- if has_been_used && prev_response != response {
- // If so, remove all entries whose result depends on this goal
- // from the provisional cache...
- //
- // That's not completely correct, as a nested goal can also
- // depend on a goal which is lower in the stack so it doesn't
- // actually depend on the current goal. This should be fairly
- // rare and is hopefully not relevant for performance.
- #[allow(rustc::potential_query_instability)]
- cache.lookup_table.retain(|_key, index| *index <= provisional_entry_index);
- cache.entries.truncate(provisional_entry_index.index() + 1);
-
- // ...and finally push our goal back on the stack and reevaluate it.
- self.stack.push(StackElem { input, has_been_used: false });
- false
- } else {
- true
- }
- }
+ // If we don't have a provisional result yet, the goal has to
+ // still be on the stack.
+ let mut goal_on_stack = false;
+ let mut is_coinductive = true;
+ for entry in self.stack.raw[stack_depth.index()..]
+ .iter()
+ .skip_while(|entry| entry.input != input)
+ {
+ goal_on_stack = true;
+ is_coinductive &= entry.input.value.goal.predicate.is_coinductive(tcx);
+ }
+ debug_assert!(goal_on_stack);
- pub(super) fn with_new_goal(
- &mut self,
- tcx: TyCtxt<'tcx>,
- canonical_input: CanonicalInput<'tcx>,
- inspect: &mut ProofTreeBuilder<'tcx>,
- mut loop_body: impl FnMut(&mut Self, &mut ProofTreeBuilder<'tcx>) -> QueryResult<'tcx>,
- ) -> QueryResult<'tcx> {
- if self.should_use_global_cache() && inspect.use_global_cache() {
- if let Some(result) = tcx.new_solver_evaluation_cache.get(&canonical_input, tcx) {
- debug!(?canonical_input, ?result, "cache hit");
- inspect.cache_hit(CacheHit::Global);
- return result;
+ if is_coinductive {
+ Self::response_no_constraints(tcx, input, Certainty::Yes)
+ } else {
+ Self::response_no_constraints(tcx, input, Certainty::OVERFLOW)
+ }
+ };
}
}
- match self.try_push_stack(tcx, canonical_input, inspect) {
- Ok(()) => {}
- // Our goal is already on the stack, eager return.
- Err(response) => return response,
- }
-
// This is for global caching, so we properly track query dependencies.
- // Everything that affects the `Result` should be performed within this
+ // Everything that affects the `result` should be performed within this
// `with_anon_task` closure.
- let (result, dep_node) = tcx.dep_graph.with_anon_task(tcx, DepKind::TraitSelect, || {
- self.repeat_while_none(
- |this| {
- let result = this.deal_with_overflow(tcx, canonical_input);
- let _ = this.stack.pop().unwrap();
- result
- },
- |this| {
- let result = loop_body(this, inspect);
- this.try_finalize_goal(canonical_input, result).then(|| result)
- },
- )
- });
+ let ((final_entry, result), dep_node) =
+ tcx.dep_graph.with_anon_task(tcx, DepKind::TraitSelect, || {
+ // When we encounter a coinductive cycle, we have to fetch the
+ // result of that cycle while we are still computing it. Because
+ // of this we continuously recompute the cycle until the result
+ // of the previous iteration is equal to the final result, at which
+ // point we are done.
+ for _ in 0..self.local_overflow_limit() {
+ let response = prove_goal(self, inspect);
+ // Check whether the current goal is the root of a cycle and whether
+ // we have to rerun because its provisional result differed from the
+ // final result.
+ //
+ // Also update the response for this goal stored in the provisional
+ // cache.
+ let stack_entry = self.pop_stack();
+ debug_assert_eq!(stack_entry.input, input);
+ let cache = &mut self.provisional_cache;
+ let provisional_entry_index =
+ *cache.lookup_table.get(&stack_entry.input).unwrap();
+ let provisional_entry = &mut cache.entries[provisional_entry_index];
+ if stack_entry.has_been_used
+ && provisional_entry.response.map_or(true, |r| r != response)
+ {
+ // If so, update the provisional result for this goal and remove
+ // all entries whose result depends on this goal from the provisional
+ // cache...
+ //
+ // That's not completely correct, as a nested goal can also only
+ // depend on a goal which is lower in the stack so it doesn't
+ // actually depend on the current goal. This should be fairly
+ // rare and is hopefully not relevant for performance.
+ provisional_entry.response = Some(response);
+ #[allow(rustc::potential_query_instability)]
+ cache.lookup_table.retain(|_key, index| *index <= provisional_entry_index);
+ cache.entries.truncate(provisional_entry_index.index() + 1);
+
+ // ...and finally push our goal back on the stack and reevaluate it.
+ self.stack.push(StackEntry { has_been_used: false, ..stack_entry });
+ } else {
+ return (stack_entry, response);
+ }
+ }
+
+ debug!("canonical cycle overflow");
+ let current_entry = self.pop_stack();
+ let result = Self::response_no_constraints(tcx, input, Certainty::OVERFLOW);
+ (current_entry, result)
+ });
+
+ // We're now done with this goal. In case this goal is involved in a larger cycle
+ // do not remove it from the provisional cache and update its provisional result.
+ // We only add the root of cycles to the global cache.
+ //
+ // It is not possible for any nested goal to depend on something deeper on the
+ // stack, as this would have also updated the depth of the current goal.
let cache = &mut self.provisional_cache;
- let provisional_entry_index = *cache.lookup_table.get(&canonical_input).unwrap();
+ let provisional_entry_index = *cache.lookup_table.get(&input).unwrap();
let provisional_entry = &mut cache.entries[provisional_entry_index];
let depth = provisional_entry.depth;
-
- // If not, we're done with this goal.
- //
- // Check whether that this goal doesn't depend on a goal deeper on the stack
- // and if so, move it to the global cache.
- //
- // Note that if any nested goal were to depend on something deeper on the stack,
- // this would have also updated the depth of the current goal.
if depth == self.stack.next_index() {
- // If the current goal is the head of a cycle, we drop all other
- // cycle participants without moving them to the global cache.
- let other_cycle_participants = provisional_entry_index.index() + 1;
- for (i, entry) in cache.entries.drain_enumerated(other_cycle_participants..) {
+ for (i, entry) in cache.entries.drain_enumerated(provisional_entry_index.index()..) {
let actual_index = cache.lookup_table.remove(&entry.input);
debug_assert_eq!(Some(i), actual_index);
debug_assert!(entry.depth == depth);
}
- let current_goal = cache.entries.pop().unwrap();
- let actual_index = cache.lookup_table.remove(&current_goal.input);
- debug_assert_eq!(Some(provisional_entry_index), actual_index);
- debug_assert!(current_goal.depth == depth);
-
- // We move the root goal to the global cache if we either did not hit an overflow or if it's
- // the root goal as that will now always hit the same overflow limit.
- //
- // NOTE: We cannot move any non-root goals to the global cache. When replaying the root goal's
- // dependencies, our non-root goal may no longer appear as child of the root goal.
+ // When encountering a cycle, both inductive and coinductive, we only
+ // move the root into the global cache. We also store all other cycle
+ // participants involved.
//
- // See https://github.com/rust-lang/rust/pull/108071 for some additional context.
- let can_cache = !self.overflow_data.did_overflow() || self.stack.is_empty();
- if self.should_use_global_cache() && can_cache {
- tcx.new_solver_evaluation_cache.insert(
- current_goal.input,
- dep_node,
- current_goal.response,
- );
- }
+ // We disable the global cache entry of the root goal if a cycle
+ // participant is on the stack. This is necessary to prevent unstable
+ // results. See the comment of `StackEntry::cycle_participants` for
+ // more details.
+ let reached_depth = final_entry.reached_depth.as_usize() - self.stack.len();
+ self.global_cache(tcx).insert(
+ input,
+ reached_depth,
+ final_entry.encountered_overflow,
+ final_entry.cycle_participants,
+ dep_node,
+ result,
+ )
+ } else {
+ provisional_entry.response = Some(result);
}
result
}
+
+ fn response_no_constraints(
+ tcx: TyCtxt<'tcx>,
+ goal: CanonicalInput<'tcx>,
+ certainty: Certainty,
+ ) -> QueryResult<'tcx> {
+ Ok(super::response_no_constraints_raw(tcx, goal.max_universe, goal.variables, certainty))
+ }
}
diff --git a/compiler/rustc_trait_selection/src/solve/search_graph/overflow.rs b/compiler/rustc_trait_selection/src/solve/search_graph/overflow.rs
deleted file mode 100644
index e0a2e0c5c..000000000
--- a/compiler/rustc_trait_selection/src/solve/search_graph/overflow.rs
+++ /dev/null
@@ -1,120 +0,0 @@
-use rustc_infer::infer::canonical::Canonical;
-use rustc_infer::traits::query::NoSolution;
-use rustc_middle::traits::solve::{Certainty, MaybeCause, QueryResult};
-use rustc_middle::ty::TyCtxt;
-use rustc_session::Limit;
-
-use super::SearchGraph;
-use crate::solve::{response_no_constraints, EvalCtxt};
-
-/// When detecting a solver overflow, we return ambiguity. Overflow can be
-/// *hidden* by either a fatal error in an **AND** or a trivial success in an **OR**.
-///
-/// This is in issue in case of exponential blowup, e.g. if each goal on the stack
-/// has multiple nested (overflowing) candidates. To deal with this, we reduce the limit
-/// used by the solver when hitting the default limit for the first time.
-///
-/// FIXME: Get tests where always using the `default_limit` results in a hang and refer
-/// to them here. We can also improve the overflow strategy if necessary.
-pub(super) struct OverflowData {
- default_limit: Limit,
- current_limit: Limit,
- /// When proving an **AND** we have to repeatedly iterate over the yet unproven goals.
- ///
- /// Because of this each iteration also increases the depth in addition to the stack
- /// depth.
- additional_depth: usize,
-}
-
-impl OverflowData {
- pub(super) fn new(tcx: TyCtxt<'_>) -> OverflowData {
- let default_limit = tcx.recursion_limit();
- OverflowData { default_limit, current_limit: default_limit, additional_depth: 0 }
- }
-
- #[inline]
- pub(super) fn did_overflow(&self) -> bool {
- self.default_limit.0 != self.current_limit.0
- }
-
- #[inline]
- pub(super) fn has_overflow(&self, depth: usize) -> bool {
- !self.current_limit.value_within_limit(depth + self.additional_depth)
- }
-
- /// Updating the current limit when hitting overflow.
- fn deal_with_overflow(&mut self) {
- // When first hitting overflow we reduce the overflow limit
- // for all future goals to prevent hangs if there's an exponential
- // blowup.
- self.current_limit.0 = self.default_limit.0 / 8;
- }
-}
-
-pub(in crate::solve) trait OverflowHandler<'tcx> {
- fn search_graph(&mut self) -> &mut SearchGraph<'tcx>;
-
- fn repeat_while_none<T>(
- &mut self,
- on_overflow: impl FnOnce(&mut Self) -> Result<T, NoSolution>,
- mut loop_body: impl FnMut(&mut Self) -> Option<Result<T, NoSolution>>,
- ) -> Result<T, NoSolution> {
- let start_depth = self.search_graph().overflow_data.additional_depth;
- let depth = self.search_graph().stack.len();
- while !self.search_graph().overflow_data.has_overflow(depth) {
- if let Some(result) = loop_body(self) {
- self.search_graph().overflow_data.additional_depth = start_depth;
- return result;
- }
-
- self.search_graph().overflow_data.additional_depth += 1;
- }
- self.search_graph().overflow_data.additional_depth = start_depth;
- self.search_graph().overflow_data.deal_with_overflow();
- on_overflow(self)
- }
-
- // Increment the `additional_depth` by one and evaluate `body`, or `on_overflow`
- // if the depth is overflown.
- fn with_incremented_depth<T>(
- &mut self,
- on_overflow: impl FnOnce(&mut Self) -> T,
- body: impl FnOnce(&mut Self) -> T,
- ) -> T {
- let depth = self.search_graph().stack.len();
- self.search_graph().overflow_data.additional_depth += 1;
-
- let result = if self.search_graph().overflow_data.has_overflow(depth) {
- self.search_graph().overflow_data.deal_with_overflow();
- on_overflow(self)
- } else {
- body(self)
- };
-
- self.search_graph().overflow_data.additional_depth -= 1;
- result
- }
-}
-
-impl<'tcx> OverflowHandler<'tcx> for EvalCtxt<'_, 'tcx> {
- fn search_graph(&mut self) -> &mut SearchGraph<'tcx> {
- &mut self.search_graph
- }
-}
-
-impl<'tcx> OverflowHandler<'tcx> for SearchGraph<'tcx> {
- fn search_graph(&mut self) -> &mut SearchGraph<'tcx> {
- self
- }
-}
-
-impl<'tcx> SearchGraph<'tcx> {
- pub fn deal_with_overflow(
- &mut self,
- tcx: TyCtxt<'tcx>,
- goal: Canonical<'tcx, impl Sized>,
- ) -> QueryResult<'tcx> {
- self.overflow_data.deal_with_overflow();
- response_no_constraints(tcx, goal, Certainty::Maybe(MaybeCause::Overflow))
- }
-}
diff --git a/compiler/rustc_trait_selection/src/solve/trait_goals.rs b/compiler/rustc_trait_selection/src/solve/trait_goals.rs
index ef5f25b1f..8685f3100 100644
--- a/compiler/rustc_trait_selection/src/solve/trait_goals.rs
+++ b/compiler/rustc_trait_selection/src/solve/trait_goals.rs
@@ -5,13 +5,13 @@ use super::{EvalCtxt, SolverMode};
use rustc_hir::def_id::DefId;
use rustc_hir::{LangItem, Movability};
use rustc_infer::traits::query::NoSolution;
-use rustc_infer::traits::util::supertraits;
+use rustc_middle::traits::solve::inspect::CandidateKind;
use rustc_middle::traits::solve::{CanonicalResponse, Certainty, Goal, QueryResult};
-use rustc_middle::traits::Reveal;
+use rustc_middle::traits::{BuiltinImplSource, Reveal};
use rustc_middle::ty::fast_reject::{DeepRejectCtxt, TreatParams, TreatProjections};
use rustc_middle::ty::{self, ToPredicate, Ty, TyCtxt};
use rustc_middle::ty::{TraitPredicate, TypeVisitableExt};
-use rustc_span::DUMMY_SP;
+use rustc_span::{ErrorGuaranteed, DUMMY_SP};
impl<'tcx> assembly::GoalKind<'tcx> for TraitPredicate<'tcx> {
fn self_ty(self) -> Ty<'tcx> {
@@ -39,10 +39,9 @@ impl<'tcx> assembly::GoalKind<'tcx> for TraitPredicate<'tcx> {
let impl_trait_ref = tcx.impl_trait_ref(impl_def_id).unwrap();
let drcx = DeepRejectCtxt { treat_obligation_params: TreatParams::ForLookup };
- if !drcx.substs_refs_may_unify(
- goal.predicate.trait_ref.substs,
- impl_trait_ref.skip_binder().substs,
- ) {
+ if !drcx
+ .args_refs_may_unify(goal.predicate.trait_ref.args, impl_trait_ref.skip_binder().args)
+ {
return Err(NoSolution);
}
@@ -63,13 +62,13 @@ impl<'tcx> assembly::GoalKind<'tcx> for TraitPredicate<'tcx> {
};
ecx.probe_candidate("impl").enter(|ecx| {
- let impl_substs = ecx.fresh_substs_for_item(impl_def_id);
- let impl_trait_ref = impl_trait_ref.subst(tcx, impl_substs);
+ let impl_args = ecx.fresh_args_for_item(impl_def_id);
+ let impl_trait_ref = impl_trait_ref.instantiate(tcx, impl_args);
ecx.eq(goal.param_env, goal.predicate.trait_ref, impl_trait_ref)?;
let where_clause_bounds = tcx
.predicates_of(impl_def_id)
- .instantiate(tcx, impl_substs)
+ .instantiate(tcx, impl_args)
.predicates
.into_iter()
.map(|pred| goal.with(tcx, pred));
@@ -79,6 +78,13 @@ impl<'tcx> assembly::GoalKind<'tcx> for TraitPredicate<'tcx> {
})
}
+ fn consider_error_guaranteed_candidate(
+ ecx: &mut EvalCtxt<'_, 'tcx>,
+ _guar: ErrorGuaranteed,
+ ) -> QueryResult<'tcx> {
+ ecx.evaluate_added_goals_and_make_canonical_response(Certainty::Yes)
+ }
+
fn probe_and_match_goal_against_assumption(
ecx: &mut EvalCtxt<'_, 'tcx>,
goal: Goal<'tcx, Self>,
@@ -164,7 +170,7 @@ impl<'tcx> assembly::GoalKind<'tcx> for TraitPredicate<'tcx> {
ecx.probe_candidate("trait alias").enter(|ecx| {
let nested_obligations = tcx
.predicates_of(goal.predicate.def_id())
- .instantiate(tcx, goal.predicate.trait_ref.substs);
+ .instantiate(tcx, goal.predicate.trait_ref.args);
ecx.add_goals(nested_obligations.predicates.into_iter().map(|p| goal.with(tcx, p)));
ecx.evaluate_added_goals_and_make_canonical_response(Certainty::Yes)
})
@@ -337,7 +343,7 @@ impl<'tcx> assembly::GoalKind<'tcx> for TraitPredicate<'tcx> {
}
let self_ty = goal.predicate.self_ty();
- let ty::Generator(def_id, substs, _) = *self_ty.kind() else {
+ let ty::Generator(def_id, args, _) = *self_ty.kind() else {
return Err(NoSolution);
};
@@ -347,7 +353,7 @@ impl<'tcx> assembly::GoalKind<'tcx> for TraitPredicate<'tcx> {
return Err(NoSolution);
}
- let generator = substs.as_generator();
+ let generator = args.as_generator();
Self::consider_implied_clause(
ecx,
goal,
@@ -359,7 +365,7 @@ impl<'tcx> assembly::GoalKind<'tcx> for TraitPredicate<'tcx> {
)
}
- fn consider_builtin_unsize_candidate(
+ fn consider_builtin_discriminant_kind_candidate(
ecx: &mut EvalCtxt<'_, 'tcx>,
goal: Goal<'tcx, Self>,
) -> QueryResult<'tcx> {
@@ -367,131 +373,205 @@ impl<'tcx> assembly::GoalKind<'tcx> for TraitPredicate<'tcx> {
return Err(NoSolution);
}
- let tcx = ecx.tcx();
- let a_ty = goal.predicate.self_ty();
- let b_ty = goal.predicate.trait_ref.substs.type_at(1);
- if b_ty.is_ty_var() {
- return ecx.evaluate_added_goals_and_make_canonical_response(Certainty::AMBIGUOUS);
+ // `DiscriminantKind` is automatically implemented for every type.
+ ecx.evaluate_added_goals_and_make_canonical_response(Certainty::Yes)
+ }
+
+ fn consider_builtin_destruct_candidate(
+ ecx: &mut EvalCtxt<'_, 'tcx>,
+ goal: Goal<'tcx, Self>,
+ ) -> QueryResult<'tcx> {
+ if goal.predicate.polarity != ty::ImplPolarity::Positive {
+ return Err(NoSolution);
}
- ecx.probe_candidate("builtin unsize").enter(|ecx| {
- match (a_ty.kind(), b_ty.kind()) {
- // Trait upcasting, or `dyn Trait + Auto + 'a` -> `dyn Trait + 'b`
- (&ty::Dynamic(_, _, ty::Dyn), &ty::Dynamic(_, _, ty::Dyn)) => {
- // Dyn upcasting is handled separately, since due to upcasting,
- // when there are two supertraits that differ by substs, we
- // may return more than one query response.
- Err(NoSolution)
- }
- // `T` -> `dyn Trait` unsizing
- (_, &ty::Dynamic(data, region, ty::Dyn)) => {
- // Can only unsize to an object-safe type
- if data
- .principal_def_id()
- .is_some_and(|def_id| !tcx.check_is_object_safe(def_id))
- {
- return Err(NoSolution);
- }
- let Some(sized_def_id) = tcx.lang_items().sized_trait() else {
- return Err(NoSolution);
- };
- // Check that the type implements all of the predicates of the def-id.
- // (i.e. the principal, all of the associated types match, and any auto traits)
- ecx.add_goals(
- data.iter().map(|pred| goal.with(tcx, pred.with_self_ty(tcx, a_ty))),
- );
- // The type must be Sized to be unsized.
- ecx.add_goal(goal.with(tcx, ty::TraitRef::new(tcx, sized_def_id, [a_ty])));
- // The type must outlive the lifetime of the `dyn` we're unsizing into.
- ecx.add_goal(
- goal.with(tcx, ty::Binder::dummy(ty::OutlivesPredicate(a_ty, region))),
- );
- ecx.evaluate_added_goals_and_make_canonical_response(Certainty::Yes)
- }
- // `[T; n]` -> `[T]` unsizing
- (&ty::Array(a_elem_ty, ..), &ty::Slice(b_elem_ty)) => {
- // We just require that the element type stays the same
- ecx.eq(goal.param_env, a_elem_ty, b_elem_ty)?;
- ecx.evaluate_added_goals_and_make_canonical_response(Certainty::Yes)
- }
- // Struct unsizing `Struct<T>` -> `Struct<U>` where `T: Unsize<U>`
- (&ty::Adt(a_def, a_substs), &ty::Adt(b_def, b_substs))
- if a_def.is_struct() && a_def.did() == b_def.did() =>
- {
- let unsizing_params = tcx.unsizing_params_for_adt(a_def.did());
- // We must be unsizing some type parameters. This also implies
- // that the struct has a tail field.
- if unsizing_params.is_empty() {
- return Err(NoSolution);
- }
+ // FIXME(-Ztrait-solver=next): Implement this when we get const working in the new solver
- let tail_field = a_def.non_enum_variant().tail();
- let tail_field_ty = tcx.type_of(tail_field.did);
-
- let a_tail_ty = tail_field_ty.subst(tcx, a_substs);
- let b_tail_ty = tail_field_ty.subst(tcx, b_substs);
-
- // Substitute just the unsizing params from B into A. The type after
- // this substitution must be equal to B. This is so we don't unsize
- // unrelated type parameters.
- let new_a_substs =
- tcx.mk_substs_from_iter(a_substs.iter().enumerate().map(|(i, a)| {
- if unsizing_params.contains(i as u32) { b_substs[i] } else { a }
- }));
- let unsized_a_ty = Ty::new_adt(tcx, a_def, new_a_substs);
-
- // Finally, we require that `TailA: Unsize<TailB>` for the tail field
- // types.
- ecx.eq(goal.param_env, unsized_a_ty, b_ty)?;
- ecx.add_goal(goal.with(
- tcx,
- ty::TraitRef::new(tcx, goal.predicate.def_id(), [a_tail_ty, b_tail_ty]),
- ));
- ecx.evaluate_added_goals_and_make_canonical_response(Certainty::Yes)
- }
- // Tuple unsizing `(.., T)` -> `(.., U)` where `T: Unsize<U>`
- (&ty::Tuple(a_tys), &ty::Tuple(b_tys))
- if a_tys.len() == b_tys.len() && !a_tys.is_empty() =>
- {
- let (a_last_ty, a_rest_tys) = a_tys.split_last().unwrap();
- let b_last_ty = b_tys.last().unwrap();
-
- // Substitute just the tail field of B., and require that they're equal.
- let unsized_a_ty =
- Ty::new_tup_from_iter(tcx, a_rest_tys.iter().chain([b_last_ty]).copied());
- ecx.eq(goal.param_env, unsized_a_ty, b_ty)?;
-
- // Similar to ADTs, require that the rest of the fields are equal.
- ecx.add_goal(goal.with(
- tcx,
- ty::TraitRef::new(tcx, goal.predicate.def_id(), [*a_last_ty, *b_last_ty]),
- ));
- ecx.evaluate_added_goals_and_make_canonical_response(Certainty::Yes)
- }
- _ => Err(NoSolution),
+ // `Destruct` is automatically implemented for every type in
+ // non-const environments.
+ ecx.evaluate_added_goals_and_make_canonical_response(Certainty::Yes)
+ }
+
+ fn consider_builtin_transmute_candidate(
+ ecx: &mut EvalCtxt<'_, 'tcx>,
+ goal: Goal<'tcx, Self>,
+ ) -> QueryResult<'tcx> {
+ if goal.predicate.polarity != ty::ImplPolarity::Positive {
+ return Err(NoSolution);
+ }
+
+ // `rustc_transmute` does not have support for type or const params
+ if goal.has_non_region_placeholders() {
+ return Err(NoSolution);
+ }
+
+ // Erase regions because we compute layouts in `rustc_transmute`,
+ // which will ICE for region vars.
+ let args = ecx.tcx().erase_regions(goal.predicate.trait_ref.args);
+
+ let Some(assume) =
+ rustc_transmute::Assume::from_const(ecx.tcx(), goal.param_env, args.const_at(3))
+ else {
+ return Err(NoSolution);
+ };
+
+ let certainty = ecx.is_transmutable(
+ rustc_transmute::Types { dst: args.type_at(0), src: args.type_at(1) },
+ args.type_at(2),
+ assume,
+ )?;
+ ecx.evaluate_added_goals_and_make_canonical_response(certainty)
+ }
+
+ fn consider_unsize_to_dyn_candidate(
+ ecx: &mut EvalCtxt<'_, 'tcx>,
+ goal: Goal<'tcx, Self>,
+ ) -> QueryResult<'tcx> {
+ ecx.probe(|_| CandidateKind::UnsizeAssembly).enter(|ecx| {
+ let a_ty = goal.predicate.self_ty();
+ // We need to normalize the b_ty since it's destructured as a `dyn Trait`.
+ let Some(b_ty) =
+ ecx.try_normalize_ty(goal.param_env, goal.predicate.trait_ref.args.type_at(1))?
+ else {
+ return ecx.evaluate_added_goals_and_make_canonical_response(Certainty::OVERFLOW);
+ };
+
+ let ty::Dynamic(b_data, b_region, ty::Dyn) = *b_ty.kind() else {
+ return Err(NoSolution);
+ };
+
+ let tcx = ecx.tcx();
+
+ // Can only unsize to an object-safe trait.
+ if b_data.principal_def_id().is_some_and(|def_id| !tcx.check_is_object_safe(def_id)) {
+ return Err(NoSolution);
+ }
+
+ // Check that the type implements all of the predicates of the trait object.
+ // (i.e. the principal, all of the associated types match, and any auto traits)
+ ecx.add_goals(b_data.iter().map(|pred| goal.with(tcx, pred.with_self_ty(tcx, a_ty))));
+
+ // The type must be `Sized` to be unsized.
+ if let Some(sized_def_id) = tcx.lang_items().sized_trait() {
+ ecx.add_goal(goal.with(tcx, ty::TraitRef::new(tcx, sized_def_id, [a_ty])));
+ } else {
+ return Err(NoSolution);
}
+
+ // The type must outlive the lifetime of the `dyn` we're unsizing into.
+ ecx.add_goal(goal.with(tcx, ty::OutlivesPredicate(a_ty, b_region)));
+ ecx.evaluate_added_goals_and_make_canonical_response(Certainty::Yes)
})
}
- fn consider_builtin_dyn_upcast_candidates(
+ /// ```ignore (builtin impl example)
+ /// trait Trait {
+ /// fn foo(&self);
+ /// }
+ /// // results in the following builtin impl
+ /// impl<'a, T: Trait + 'a> Unsize<dyn Trait + 'a> for T {}
+ /// ```
+ fn consider_structural_builtin_unsize_candidates(
ecx: &mut EvalCtxt<'_, 'tcx>,
goal: Goal<'tcx, Self>,
- ) -> Vec<CanonicalResponse<'tcx>> {
+ ) -> Vec<(CanonicalResponse<'tcx>, BuiltinImplSource)> {
if goal.predicate.polarity != ty::ImplPolarity::Positive {
return vec![];
}
- let tcx = ecx.tcx();
-
- let a_ty = goal.predicate.self_ty();
- let b_ty = goal.predicate.trait_ref.substs.type_at(1);
- let ty::Dynamic(a_data, a_region, ty::Dyn) = *a_ty.kind() else {
- return vec![];
+ let misc_candidate = |ecx: &mut EvalCtxt<'_, 'tcx>, certainty| {
+ (
+ ecx.evaluate_added_goals_and_make_canonical_response(certainty).unwrap(),
+ BuiltinImplSource::Misc,
+ )
};
- let ty::Dynamic(b_data, b_region, ty::Dyn) = *b_ty.kind() else {
- return vec![];
+
+ let result_to_single = |result, source| match result {
+ Ok(resp) => vec![(resp, source)],
+ Err(NoSolution) => vec![],
};
+ ecx.probe(|_| CandidateKind::UnsizeAssembly).enter(|ecx| {
+ let a_ty = goal.predicate.self_ty();
+ // We need to normalize the b_ty since it's matched structurally
+ // in the other functions below.
+ let b_ty = match ecx
+ .try_normalize_ty(goal.param_env, goal.predicate.trait_ref.args.type_at(1))
+ {
+ Ok(Some(b_ty)) => b_ty,
+ Ok(None) => return vec![misc_candidate(ecx, Certainty::OVERFLOW)],
+ Err(_) => return vec![],
+ };
+
+ let goal = goal.with(ecx.tcx(), (a_ty, b_ty));
+ match (a_ty.kind(), b_ty.kind()) {
+ (ty::Infer(ty::TyVar(..)), ..) => bug!("unexpected infer {a_ty:?} {b_ty:?}"),
+ (_, ty::Infer(ty::TyVar(..))) => vec![misc_candidate(ecx, Certainty::AMBIGUOUS)],
+
+ // Trait upcasting, or `dyn Trait + Auto + 'a` -> `dyn Trait + 'b`.
+ (
+ &ty::Dynamic(a_data, a_region, ty::Dyn),
+ &ty::Dynamic(b_data, b_region, ty::Dyn),
+ ) => ecx.consider_builtin_dyn_upcast_candidates(
+ goal, a_data, a_region, b_data, b_region,
+ ),
+
+ // `T` -> `dyn Trait` unsizing is handled separately in `consider_unsize_to_dyn_candidate`
+ (_, &ty::Dynamic(..)) => vec![],
+
+ // `[T; N]` -> `[T]` unsizing
+ (&ty::Array(a_elem_ty, ..), &ty::Slice(b_elem_ty)) => result_to_single(
+ ecx.consider_builtin_array_unsize(goal, a_elem_ty, b_elem_ty),
+ BuiltinImplSource::Misc,
+ ),
+
+ // `Struct<T>` -> `Struct<U>` where `T: Unsize<U>`
+ (&ty::Adt(a_def, a_args), &ty::Adt(b_def, b_args))
+ if a_def.is_struct() && a_def == b_def =>
+ {
+ result_to_single(
+ ecx.consider_builtin_struct_unsize(goal, a_def, a_args, b_args),
+ BuiltinImplSource::Misc,
+ )
+ }
+
+ // `(A, B, T)` -> `(A, B, U)` where `T: Unsize<U>`
+ (&ty::Tuple(a_tys), &ty::Tuple(b_tys))
+ if a_tys.len() == b_tys.len() && !a_tys.is_empty() =>
+ {
+ result_to_single(
+ ecx.consider_builtin_tuple_unsize(goal, a_tys, b_tys),
+ BuiltinImplSource::TupleUnsizing,
+ )
+ }
+
+ _ => vec![],
+ }
+ })
+ }
+}
+
+impl<'tcx> EvalCtxt<'_, 'tcx> {
+ /// Trait upcasting allows for coercions between trait objects:
+ /// ```ignore (builtin impl example)
+ /// trait Super {}
+ /// trait Trait: Super {}
+ /// // results in builtin impls upcasting to a super trait
+ /// impl<'a, 'b: 'a> Unsize<dyn Super + 'a> for dyn Trait + 'b {}
+ /// // and impls removing auto trait bounds.
+ /// impl<'a, 'b: 'a> Unsize<dyn Trait + 'a> for dyn Trait + Send + 'b {}
+ /// ```
+ fn consider_builtin_dyn_upcast_candidates(
+ &mut self,
+ goal: Goal<'tcx, (Ty<'tcx>, Ty<'tcx>)>,
+ a_data: &'tcx ty::List<ty::PolyExistentialPredicate<'tcx>>,
+ a_region: ty::Region<'tcx>,
+ b_data: &'tcx ty::List<ty::PolyExistentialPredicate<'tcx>>,
+ b_region: ty::Region<'tcx>,
+ ) -> Vec<(CanonicalResponse<'tcx>, BuiltinImplSource)> {
+ let tcx = self.tcx();
+ let Goal { predicate: (a_ty, _b_ty), .. } = goal;
+
// All of a's auto traits need to be in b's auto traits.
let auto_traits_compatible =
b_data.auto_traits().all(|b| a_data.auto_traits().any(|a| a == b));
@@ -499,125 +579,241 @@ impl<'tcx> assembly::GoalKind<'tcx> for TraitPredicate<'tcx> {
return vec![];
}
- let mut unsize_dyn_to_principal = |principal: Option<ty::PolyExistentialTraitRef<'tcx>>| {
- ecx.probe_candidate("upcast dyn to principle").enter(|ecx| -> Result<_, NoSolution> {
- // Require that all of the trait predicates from A match B, except for
- // the auto traits. We do this by constructing a new A type with B's
- // auto traits, and equating these types.
- let new_a_data = principal
- .into_iter()
- .map(|trait_ref| trait_ref.map_bound(ty::ExistentialPredicate::Trait))
- .chain(a_data.iter().filter(|a| {
- matches!(a.skip_binder(), ty::ExistentialPredicate::Projection(_))
- }))
- .chain(
- b_data
- .auto_traits()
- .map(ty::ExistentialPredicate::AutoTrait)
- .map(ty::Binder::dummy),
- );
- let new_a_data = tcx.mk_poly_existential_predicates_from_iter(new_a_data);
- let new_a_ty = Ty::new_dynamic(tcx, new_a_data, b_region, ty::Dyn);
-
- // We also require that A's lifetime outlives B's lifetime.
- ecx.eq(goal.param_env, new_a_ty, b_ty)?;
- ecx.add_goal(
- goal.with(tcx, ty::Binder::dummy(ty::OutlivesPredicate(a_region, b_region))),
- );
- ecx.evaluate_added_goals_and_make_canonical_response(Certainty::Yes)
- })
- };
-
let mut responses = vec![];
// If the principal def ids match (or are both none), then we're not doing
// trait upcasting. We're just removing auto traits (or shortening the lifetime).
if a_data.principal_def_id() == b_data.principal_def_id() {
- if let Ok(response) = unsize_dyn_to_principal(a_data.principal()) {
- responses.push(response);
- }
- } else if let Some(a_principal) = a_data.principal()
- && let Some(b_principal) = b_data.principal()
- {
- for super_trait_ref in supertraits(tcx, a_principal.with_self_ty(tcx, a_ty)) {
- if super_trait_ref.def_id() != b_principal.def_id() {
- continue;
- }
- let erased_trait_ref = super_trait_ref
- .map_bound(|trait_ref| ty::ExistentialTraitRef::erase_self_ty(tcx, trait_ref));
- if let Ok(response) = unsize_dyn_to_principal(Some(erased_trait_ref)) {
- responses.push(response);
- }
+ if let Ok(resp) = self.consider_builtin_upcast_to_principal(
+ goal,
+ a_data,
+ a_region,
+ b_data,
+ b_region,
+ a_data.principal(),
+ ) {
+ responses.push((resp, BuiltinImplSource::Misc));
}
+ } else if let Some(a_principal) = a_data.principal() {
+ self.walk_vtable(
+ a_principal.with_self_ty(tcx, a_ty),
+ |ecx, new_a_principal, _, vtable_vptr_slot| {
+ if let Ok(resp) = ecx.probe_candidate("dyn upcast").enter(|ecx| {
+ ecx.consider_builtin_upcast_to_principal(
+ goal,
+ a_data,
+ a_region,
+ b_data,
+ b_region,
+ Some(new_a_principal.map_bound(|trait_ref| {
+ ty::ExistentialTraitRef::erase_self_ty(tcx, trait_ref)
+ })),
+ )
+ }) {
+ responses
+ .push((resp, BuiltinImplSource::TraitUpcasting { vtable_vptr_slot }));
+ }
+ },
+ );
}
responses
}
- fn consider_builtin_discriminant_kind_candidate(
- ecx: &mut EvalCtxt<'_, 'tcx>,
- goal: Goal<'tcx, Self>,
+ fn consider_builtin_upcast_to_principal(
+ &mut self,
+ goal: Goal<'tcx, (Ty<'tcx>, Ty<'tcx>)>,
+ a_data: &'tcx ty::List<ty::PolyExistentialPredicate<'tcx>>,
+ a_region: ty::Region<'tcx>,
+ b_data: &'tcx ty::List<ty::PolyExistentialPredicate<'tcx>>,
+ b_region: ty::Region<'tcx>,
+ upcast_principal: Option<ty::PolyExistentialTraitRef<'tcx>>,
) -> QueryResult<'tcx> {
- if goal.predicate.polarity != ty::ImplPolarity::Positive {
- return Err(NoSolution);
+ let param_env = goal.param_env;
+
+ // More than one projection in a_ty's bounds may match the projection
+ // in b_ty's bound. Use this to first determine *which* apply without
+ // having any inference side-effects. We process obligations because
+ // unification may initially succeed due to deferred projection equality.
+ let projection_may_match =
+ |ecx: &mut Self,
+ source_projection: ty::PolyExistentialProjection<'tcx>,
+ target_projection: ty::PolyExistentialProjection<'tcx>| {
+ source_projection.item_def_id() == target_projection.item_def_id()
+ && ecx
+ .probe(|_| CandidateKind::UpcastProbe)
+ .enter(|ecx| -> Result<(), NoSolution> {
+ ecx.eq(param_env, source_projection, target_projection)?;
+ let _ = ecx.try_evaluate_added_goals()?;
+ Ok(())
+ })
+ .is_ok()
+ };
+
+ for bound in b_data {
+ match bound.skip_binder() {
+ // Check that a's supertrait (upcast_principal) is compatible
+ // with the target (b_ty).
+ ty::ExistentialPredicate::Trait(target_principal) => {
+ self.eq(param_env, upcast_principal.unwrap(), bound.rebind(target_principal))?;
+ }
+ // Check that b_ty's projection is satisfied by exactly one of
+ // a_ty's projections. First, we look through the list to see if
+ // any match. If not, error. Then, if *more* than one matches, we
+ // return ambiguity. Otherwise, if exactly one matches, equate
+ // it with b_ty's projection.
+ ty::ExistentialPredicate::Projection(target_projection) => {
+ let target_projection = bound.rebind(target_projection);
+ let mut matching_projections =
+ a_data.projection_bounds().filter(|source_projection| {
+ projection_may_match(self, *source_projection, target_projection)
+ });
+ let Some(source_projection) = matching_projections.next() else {
+ return Err(NoSolution);
+ };
+ if matching_projections.next().is_some() {
+ return self.evaluate_added_goals_and_make_canonical_response(
+ Certainty::AMBIGUOUS,
+ );
+ }
+ self.eq(param_env, source_projection, target_projection)?;
+ }
+ // Check that b_ty's auto traits are present in a_ty's bounds.
+ ty::ExistentialPredicate::AutoTrait(def_id) => {
+ if !a_data.auto_traits().any(|source_def_id| source_def_id == def_id) {
+ return Err(NoSolution);
+ }
+ }
+ }
}
- // `DiscriminantKind` is automatically implemented for every type.
- ecx.evaluate_added_goals_and_make_canonical_response(Certainty::Yes)
+ // Also require that a_ty's lifetime outlives b_ty's lifetime.
+ self.add_goal(Goal::new(
+ self.tcx(),
+ param_env,
+ ty::Binder::dummy(ty::OutlivesPredicate(a_region, b_region)),
+ ));
+
+ self.evaluate_added_goals_and_make_canonical_response(Certainty::Yes)
}
- fn consider_builtin_destruct_candidate(
- ecx: &mut EvalCtxt<'_, 'tcx>,
- goal: Goal<'tcx, Self>,
+ /// We have the following builtin impls for arrays:
+ /// ```ignore (builtin impl example)
+ /// impl<T: ?Sized, const N: usize> Unsize<[T]> for [T; N] {}
+ /// ```
+ /// While the impl itself could theoretically not be builtin,
+ /// the actual unsizing behavior is builtin. Its also easier to
+ /// make all impls of `Unsize` builtin as we're able to use
+ /// `#[rustc_deny_explicit_impl]` in this case.
+ fn consider_builtin_array_unsize(
+ &mut self,
+ goal: Goal<'tcx, (Ty<'tcx>, Ty<'tcx>)>,
+ a_elem_ty: Ty<'tcx>,
+ b_elem_ty: Ty<'tcx>,
) -> QueryResult<'tcx> {
- if goal.predicate.polarity != ty::ImplPolarity::Positive {
- return Err(NoSolution);
- }
-
- if !goal.param_env.is_const() {
- // `Destruct` is automatically implemented for every type in
- // non-const environments.
- ecx.evaluate_added_goals_and_make_canonical_response(Certainty::Yes)
- } else {
- // FIXME(-Ztrait-solver=next): Implement this when we get const working in the new solver
- Err(NoSolution)
- }
+ self.eq(goal.param_env, a_elem_ty, b_elem_ty)?;
+ self.evaluate_added_goals_and_make_canonical_response(Certainty::Yes)
}
- fn consider_builtin_transmute_candidate(
- ecx: &mut EvalCtxt<'_, 'tcx>,
- goal: Goal<'tcx, Self>,
+ /// We generate a builtin `Unsize` impls for structs with generic parameters only
+ /// mentioned by the last field.
+ /// ```ignore (builtin impl example)
+ /// struct Foo<T, U: ?Sized> {
+ /// sized_field: Vec<T>,
+ /// unsizable: Box<U>,
+ /// }
+ /// // results in the following builtin impl
+ /// impl<T: ?Sized, U: ?Sized, V: ?Sized> Unsize<Foo<T, V>> for Foo<T, U>
+ /// where
+ /// Box<U>: Unsize<Box<V>>,
+ /// {}
+ /// ```
+ fn consider_builtin_struct_unsize(
+ &mut self,
+ goal: Goal<'tcx, (Ty<'tcx>, Ty<'tcx>)>,
+ def: ty::AdtDef<'tcx>,
+ a_args: ty::GenericArgsRef<'tcx>,
+ b_args: ty::GenericArgsRef<'tcx>,
) -> QueryResult<'tcx> {
- if goal.predicate.polarity != ty::ImplPolarity::Positive {
- return Err(NoSolution);
- }
+ let tcx = self.tcx();
+ let Goal { predicate: (_a_ty, b_ty), .. } = goal;
- // `rustc_transmute` does not have support for type or const params
- if goal.has_non_region_placeholders() {
+ let unsizing_params = tcx.unsizing_params_for_adt(def.did());
+ // We must be unsizing some type parameters. This also implies
+ // that the struct has a tail field.
+ if unsizing_params.is_empty() {
return Err(NoSolution);
}
- // Erase regions because we compute layouts in `rustc_transmute`,
- // which will ICE for region vars.
- let substs = ecx.tcx().erase_regions(goal.predicate.trait_ref.substs);
+ let tail_field = def.non_enum_variant().tail();
+ let tail_field_ty = tcx.type_of(tail_field.did);
+
+ let a_tail_ty = tail_field_ty.instantiate(tcx, a_args);
+ let b_tail_ty = tail_field_ty.instantiate(tcx, b_args);
+
+ // Substitute just the unsizing params from B into A. The type after
+ // this substitution must be equal to B. This is so we don't unsize
+ // unrelated type parameters.
+ let new_a_args = tcx.mk_args_from_iter(
+ a_args
+ .iter()
+ .enumerate()
+ .map(|(i, a)| if unsizing_params.contains(i as u32) { b_args[i] } else { a }),
+ );
+ let unsized_a_ty = Ty::new_adt(tcx, def, new_a_args);
+
+ // Finally, we require that `TailA: Unsize<TailB>` for the tail field
+ // types.
+ self.eq(goal.param_env, unsized_a_ty, b_ty)?;
+ self.add_goal(goal.with(
+ tcx,
+ ty::TraitRef::new(
+ tcx,
+ tcx.lang_items().unsize_trait().unwrap(),
+ [a_tail_ty, b_tail_ty],
+ ),
+ ));
+ self.evaluate_added_goals_and_make_canonical_response(Certainty::Yes)
+ }
- let Some(assume) = rustc_transmute::Assume::from_const(
- ecx.tcx(),
- goal.param_env,
- substs.const_at(3),
- ) else {
- return Err(NoSolution);
- };
+ /// We generate the following builtin impl for tuples of all sizes.
+ ///
+ /// This impl is still unstable and we emit a feature error when it
+ /// when it is used by a coercion.
+ /// ```ignore (builtin impl example)
+ /// impl<T: ?Sized, U: ?Sized, V: ?Sized> Unsize<(T, V)> for (T, U)
+ /// where
+ /// U: Unsize<V>,
+ /// {}
+ /// ```
+ fn consider_builtin_tuple_unsize(
+ &mut self,
+ goal: Goal<'tcx, (Ty<'tcx>, Ty<'tcx>)>,
+ a_tys: &'tcx ty::List<Ty<'tcx>>,
+ b_tys: &'tcx ty::List<Ty<'tcx>>,
+ ) -> QueryResult<'tcx> {
+ let tcx = self.tcx();
+ let Goal { predicate: (_a_ty, b_ty), .. } = goal;
- let certainty = ecx.is_transmutable(
- rustc_transmute::Types { dst: substs.type_at(0), src: substs.type_at(1) },
- substs.type_at(2),
- assume,
- )?;
- ecx.evaluate_added_goals_and_make_canonical_response(certainty)
+ let (&a_last_ty, a_rest_tys) = a_tys.split_last().unwrap();
+ let &b_last_ty = b_tys.last().unwrap();
+
+ // Substitute just the tail field of B., and require that they're equal.
+ let unsized_a_ty =
+ Ty::new_tup_from_iter(tcx, a_rest_tys.iter().copied().chain([b_last_ty]));
+ self.eq(goal.param_env, unsized_a_ty, b_ty)?;
+
+ // Similar to ADTs, require that we can unsize the tail.
+ self.add_goal(goal.with(
+ tcx,
+ ty::TraitRef::new(
+ tcx,
+ tcx.lang_items().unsize_trait().unwrap(),
+ [a_last_ty, b_last_ty],
+ ),
+ ));
+ self.evaluate_added_goals_and_make_canonical_response(Certainty::Yes)
}
-}
-impl<'tcx> EvalCtxt<'_, 'tcx> {
// Return `Some` if there is an impl (built-in or user provided) that may
// hold for the self type of the goal, which for coherence and soundness
// purposes must disqualify the built-in auto impl assembled by considering
@@ -689,7 +885,7 @@ impl<'tcx> EvalCtxt<'_, 'tcx> {
| ty::Tuple(_)
| ty::Adt(_, _)
// FIXME: Handling opaques here is kinda sus. Especially because we
- // simplify them to PlaceholderSimplifiedType.
+ // simplify them to SimplifiedType::Placeholder.
| ty::Alias(ty::Opaque, _) => {
let mut disqualifying_impl = None;
self.tcx().for_each_relevant_impl_treating_projections(
@@ -726,12 +922,7 @@ impl<'tcx> EvalCtxt<'_, 'tcx> {
ecx.add_goals(
constituent_tys(ecx, goal.predicate.self_ty())?
.into_iter()
- .map(|ty| {
- goal.with(
- ecx.tcx(),
- ty::Binder::dummy(goal.predicate.with_self_ty(ecx.tcx(), ty)),
- )
- })
+ .map(|ty| goal.with(ecx.tcx(), goal.predicate.with_self_ty(ecx.tcx(), ty)))
.collect::<Vec<_>>(),
);
ecx.evaluate_added_goals_and_make_canonical_response(Certainty::Yes)
diff --git a/compiler/rustc_trait_selection/src/solve/weak_types.rs b/compiler/rustc_trait_selection/src/solve/weak_types.rs
index b095b54c5..54de32cf6 100644
--- a/compiler/rustc_trait_selection/src/solve/weak_types.rs
+++ b/compiler/rustc_trait_selection/src/solve/weak_types.rs
@@ -1,3 +1,8 @@
+//! Computes a normalizes-to (projection) goal for inherent associated types,
+//! `#![feature(lazy_type_alias)]` and `#![feature(type_alias_impl_trait)]`.
+//!
+//! Since a weak alias is not ambiguous, this just computes the `type_of` of
+//! the alias and registers the where-clauses of the type alias.
use rustc_middle::traits::solve::{Certainty, Goal, QueryResult};
use rustc_middle::ty;
@@ -12,8 +17,18 @@ impl<'tcx> EvalCtxt<'_, 'tcx> {
let weak_ty = goal.predicate.projection_ty;
let expected = goal.predicate.term.ty().expect("no such thing as a const alias");
- let actual = tcx.type_of(weak_ty.def_id).subst(tcx, weak_ty.substs);
+ let actual = tcx.type_of(weak_ty.def_id).instantiate(tcx, weak_ty.args);
self.eq(goal.param_env, expected, actual)?;
+
+ // Check where clauses
+ self.add_goals(
+ tcx.predicates_of(weak_ty.def_id)
+ .instantiate(tcx, weak_ty.args)
+ .predicates
+ .into_iter()
+ .map(|pred| goal.with(tcx, pred)),
+ );
+
self.evaluate_added_goals_and_make_canonical_response(Certainty::Yes)
}
}
diff --git a/compiler/rustc_trait_selection/src/traits/auto_trait.rs b/compiler/rustc_trait_selection/src/traits/auto_trait.rs
index cb38d0ac8..ba5000da6 100644
--- a/compiler/rustc_trait_selection/src/traits/auto_trait.rs
+++ b/compiler/rustc_trait_selection/src/traits/auto_trait.rs
@@ -97,7 +97,6 @@ impl<'tcx> AutoTraitFinder<'tcx> {
orig_env,
ty::TraitPredicate {
trait_ref,
- constness: ty::BoundConstness::NotConst,
polarity: if polarity {
ImplPolarity::Positive
} else {
@@ -152,21 +151,16 @@ impl<'tcx> AutoTraitFinder<'tcx> {
// traits::project will see that 'T: SomeTrait' is in our ParamEnv, allowing
// SelectionContext to return it back to us.
- let Some((new_env, user_env)) = self.evaluate_predicates(
- &infcx,
- trait_did,
- ty,
- orig_env,
- orig_env,
- &mut fresh_preds,
- ) else {
+ let Some((new_env, user_env)) =
+ self.evaluate_predicates(&infcx, trait_did, ty, orig_env, orig_env, &mut fresh_preds)
+ else {
return AutoTraitResult::NegativeImpl;
};
let (full_env, full_user_env) = self
.evaluate_predicates(&infcx, trait_did, ty, new_env, user_env, &mut fresh_preds)
.unwrap_or_else(|| {
- panic!("Failed to fully process: {:?} {:?} {:?}", ty, trait_did, orig_env)
+ panic!("Failed to fully process: {ty:?} {trait_did:?} {orig_env:?}")
});
debug!(
@@ -183,7 +177,7 @@ impl<'tcx> AutoTraitFinder<'tcx> {
ocx.register_bound(ObligationCause::dummy(), full_env, ty, trait_did);
let errors = ocx.select_all_or_error();
if !errors.is_empty() {
- panic!("Unable to fulfill trait {:?} for '{:?}': {:?}", trait_did, ty, errors);
+ panic!("Unable to fulfill trait {trait_did:?} for '{ty:?}': {errors:?}");
}
let outlives_env = OutlivesEnvironment::new(full_env);
@@ -265,7 +259,6 @@ impl<'tcx> AutoTraitFinder<'tcx> {
predicates.push_back(ty::Binder::dummy(ty::TraitPredicate {
trait_ref: ty::TraitRef::new(infcx.tcx, trait_did, [ty]),
- constness: ty::BoundConstness::NotConst,
// Auto traits are positive
polarity: ty::ImplPolarity::Positive,
}));
@@ -329,7 +322,7 @@ impl<'tcx> AutoTraitFinder<'tcx> {
}
Ok(None) => {}
Err(SelectionError::Unimplemented) => {
- if self.is_param_no_infer(pred.skip_binder().trait_ref.substs) {
+ if self.is_param_no_infer(pred.skip_binder().trait_ref.args) {
already_visited.remove(&pred);
self.add_user_pred(&mut user_computed_preds, pred.to_predicate(self.tcx));
predicates.push_back(pred);
@@ -339,12 +332,12 @@ impl<'tcx> AutoTraitFinder<'tcx> {
{:?} {:?} {:?}",
ty,
pred,
- pred.skip_binder().trait_ref.substs
+ pred.skip_binder().trait_ref.args
);
return None;
}
}
- _ => panic!("Unexpected error for '{:?}': {:?}", ty, result),
+ _ => panic!("Unexpected error for '{ty:?}': {result:?}"),
};
let normalized_preds =
@@ -352,14 +345,12 @@ impl<'tcx> AutoTraitFinder<'tcx> {
new_env = ty::ParamEnv::new(
tcx.mk_clauses_from_iter(normalized_preds.filter_map(|p| p.as_clause())),
param_env.reveal(),
- param_env.constness(),
);
}
let final_user_env = ty::ParamEnv::new(
tcx.mk_clauses_from_iter(user_computed_preds.into_iter().filter_map(|p| p.as_clause())),
user_env.reveal(),
- user_env.constness(),
);
debug!(
"evaluate_nested_obligations(ty={:?}, trait_did={:?}): succeeded with '{:?}' \
@@ -406,17 +397,17 @@ impl<'tcx> AutoTraitFinder<'tcx> {
) = (new_pred.kind().skip_binder(), old_pred.kind().skip_binder())
{
if new_trait.def_id() == old_trait.def_id() {
- let new_substs = new_trait.trait_ref.substs;
- let old_substs = old_trait.trait_ref.substs;
+ let new_args = new_trait.trait_ref.args;
+ let old_args = old_trait.trait_ref.args;
- if !new_substs.types().eq(old_substs.types()) {
+ if !new_args.types().eq(old_args.types()) {
// We can't compare lifetimes if the types are different,
// so skip checking `old_pred`.
return true;
}
for (new_region, old_region) in
- iter::zip(new_substs.regions(), old_substs.regions())
+ iter::zip(new_args.regions(), old_args.regions())
{
match (*new_region, *old_region) {
// If both predicates have an `ReLateBound` (a HRTB) in the
@@ -569,8 +560,8 @@ impl<'tcx> AutoTraitFinder<'tcx> {
finished_map
}
- fn is_param_no_infer(&self, substs: SubstsRef<'_>) -> bool {
- self.is_of_param(substs.type_at(0)) && !substs.types().any(|t| t.has_infer_types())
+ fn is_param_no_infer(&self, args: GenericArgsRef<'_>) -> bool {
+ self.is_of_param(args.type_at(0)) && !args.types().any(|t| t.has_infer_types())
}
pub fn is_of_param(&self, ty: Ty<'_>) -> bool {
@@ -641,7 +632,7 @@ impl<'tcx> AutoTraitFinder<'tcx> {
// an inference variable.
// Additionally, we check if we've seen this predicate before,
// to avoid rendering duplicate bounds to the user.
- if self.is_param_no_infer(p.skip_binder().projection_ty.substs)
+ if self.is_param_no_infer(p.skip_binder().projection_ty.args)
&& !p.term().skip_binder().has_infer_types()
&& is_new_pred
{
@@ -754,7 +745,7 @@ impl<'tcx> AutoTraitFinder<'tcx> {
// subobligations or getting an error) when we started off with
// inference variables
if p.term().skip_binder().has_infer_types() {
- panic!("Unexpected result when selecting {:?} {:?}", ty, obligation)
+ panic!("Unexpected result when selecting {ty:?} {obligation:?}")
}
}
}
diff --git a/compiler/rustc_trait_selection/src/traits/coherence.rs b/compiler/rustc_trait_selection/src/traits/coherence.rs
index 1b1285e1b..5746781ae 100644
--- a/compiler/rustc_trait_selection/src/traits/coherence.rs
+++ b/compiler/rustc_trait_selection/src/traits/coherence.rs
@@ -7,7 +7,7 @@
use crate::infer::outlives::env::OutlivesEnvironment;
use crate::infer::InferOk;
use crate::traits::outlives_bounds::InferCtxtExt as _;
-use crate::traits::select::IntercrateAmbiguityCause;
+use crate::traits::select::{IntercrateAmbiguityCause, TreatInductiveCycleAs};
use crate::traits::util::impl_subject_and_oblig;
use crate::traits::SkipLeakCheck;
use crate::traits::{
@@ -24,6 +24,7 @@ use rustc_middle::traits::DefiningAnchor;
use rustc_middle::ty::fast_reject::{DeepRejectCtxt, TreatParams};
use rustc_middle::ty::visit::{TypeVisitable, TypeVisitableExt};
use rustc_middle::ty::{self, Ty, TyCtxt, TypeVisitor};
+use rustc_session::lint::builtin::COINDUCTIVE_OVERLAP_IN_COHERENCE;
use rustc_span::symbol::sym;
use rustc_span::DUMMY_SP;
use std::fmt::Debug;
@@ -96,9 +97,7 @@ pub fn overlapping_impls(
let impl1_ref = tcx.impl_trait_ref(impl1_def_id);
let impl2_ref = tcx.impl_trait_ref(impl2_def_id);
let may_overlap = match (impl1_ref, impl2_ref) {
- (Some(a), Some(b)) => {
- drcx.substs_refs_may_unify(a.skip_binder().substs, b.skip_binder().substs)
- }
+ (Some(a), Some(b)) => drcx.args_refs_may_unify(a.skip_binder().args, b.skip_binder().args),
(None, None) => {
let self_ty1 = tcx.type_of(impl1_def_id).skip_binder();
let self_ty2 = tcx.type_of(impl2_def_id).skip_binder();
@@ -143,24 +142,26 @@ fn with_fresh_ty_vars<'cx, 'tcx>(
impl_def_id: DefId,
) -> ty::ImplHeader<'tcx> {
let tcx = selcx.tcx();
- let impl_substs = selcx.infcx.fresh_substs_for_item(DUMMY_SP, impl_def_id);
+ let impl_args = selcx.infcx.fresh_args_for_item(DUMMY_SP, impl_def_id);
let header = ty::ImplHeader {
impl_def_id,
- self_ty: tcx.type_of(impl_def_id).subst(tcx, impl_substs),
- trait_ref: tcx.impl_trait_ref(impl_def_id).map(|i| i.subst(tcx, impl_substs)),
+ self_ty: tcx.type_of(impl_def_id).instantiate(tcx, impl_args),
+ trait_ref: tcx.impl_trait_ref(impl_def_id).map(|i| i.instantiate(tcx, impl_args)),
predicates: tcx
.predicates_of(impl_def_id)
- .instantiate(tcx, impl_substs)
+ .instantiate(tcx, impl_args)
.iter()
- .map(|(c, _)| c.as_predicate())
+ .map(|(c, s)| (c.as_predicate(), s))
.collect(),
};
- let InferOk { value: mut header, obligations } =
- selcx.infcx.at(&ObligationCause::dummy(), param_env).normalize(header);
+ let InferOk { value: mut header, obligations } = selcx
+ .infcx
+ .at(&ObligationCause::dummy_with_span(tcx.def_span(impl_def_id)), param_env)
+ .normalize(header);
- header.predicates.extend(obligations.into_iter().map(|o| o.predicate));
+ header.predicates.extend(obligations.into_iter().map(|o| (o.predicate, o.cause.span)));
header
}
@@ -209,16 +210,76 @@ fn overlap<'tcx>(
let equate_obligations = equate_impl_headers(selcx.infcx, &impl1_header, &impl2_header)?;
debug!("overlap: unification check succeeded");
- if overlap_mode.use_implicit_negative()
- && impl_intersection_has_impossible_obligation(
- selcx,
- param_env,
- &impl1_header,
- impl2_header,
- equate_obligations,
- )
- {
- return None;
+ if overlap_mode.use_implicit_negative() {
+ for mode in [TreatInductiveCycleAs::Ambig, TreatInductiveCycleAs::Recur] {
+ if let Some(failing_obligation) = selcx.with_treat_inductive_cycle_as(mode, |selcx| {
+ impl_intersection_has_impossible_obligation(
+ selcx,
+ param_env,
+ &impl1_header,
+ &impl2_header,
+ &equate_obligations,
+ )
+ }) {
+ if matches!(mode, TreatInductiveCycleAs::Recur) {
+ let first_local_impl = impl1_header
+ .impl_def_id
+ .as_local()
+ .or(impl2_header.impl_def_id.as_local())
+ .expect("expected one of the impls to be local");
+ infcx.tcx.struct_span_lint_hir(
+ COINDUCTIVE_OVERLAP_IN_COHERENCE,
+ infcx.tcx.local_def_id_to_hir_id(first_local_impl),
+ infcx.tcx.def_span(first_local_impl),
+ format!(
+ "implementations {} will conflict in the future",
+ match impl1_header.trait_ref {
+ Some(trait_ref) => {
+ let trait_ref = infcx.resolve_vars_if_possible(trait_ref);
+ format!(
+ "of `{}` for `{}`",
+ trait_ref.print_only_trait_path(),
+ trait_ref.self_ty()
+ )
+ }
+ None => format!(
+ "for `{}`",
+ infcx.resolve_vars_if_possible(impl1_header.self_ty)
+ ),
+ },
+ ),
+ |lint| {
+ lint.note(
+ "impls that are not considered to overlap may be considered to \
+ overlap in the future",
+ )
+ .span_label(
+ infcx.tcx.def_span(impl1_header.impl_def_id),
+ "the first impl is here",
+ )
+ .span_label(
+ infcx.tcx.def_span(impl2_header.impl_def_id),
+ "the second impl is here",
+ );
+ if !failing_obligation.cause.span.is_dummy() {
+ lint.span_label(
+ failing_obligation.cause.span,
+ format!(
+ "`{}` may be considered to hold in future releases, \
+ causing the impls to overlap",
+ infcx
+ .resolve_vars_if_possible(failing_obligation.predicate)
+ ),
+ );
+ }
+ lint
+ },
+ );
+ }
+
+ return None;
+ }
+ }
}
// We toggle the `leak_check` by using `skip_leak_check` when constructing the
@@ -286,40 +347,30 @@ fn impl_intersection_has_impossible_obligation<'cx, 'tcx>(
selcx: &mut SelectionContext<'cx, 'tcx>,
param_env: ty::ParamEnv<'tcx>,
impl1_header: &ty::ImplHeader<'tcx>,
- impl2_header: ty::ImplHeader<'tcx>,
- obligations: PredicateObligations<'tcx>,
-) -> bool {
+ impl2_header: &ty::ImplHeader<'tcx>,
+ obligations: &PredicateObligations<'tcx>,
+) -> Option<PredicateObligation<'tcx>> {
let infcx = selcx.infcx;
- let obligation_guaranteed_to_fail = move |obligation: &PredicateObligation<'tcx>| {
- if infcx.next_trait_solver() {
- infcx.evaluate_obligation(obligation).map_or(false, |result| !result.may_apply())
- } else {
- // We use `evaluate_root_obligation` to correctly track
- // intercrate ambiguity clauses. We do not need this in the
- // new solver.
- selcx.evaluate_root_obligation(obligation).map_or(
- false, // Overflow has occurred, and treat the obligation as possibly holding.
- |result| !result.may_apply(),
- )
- }
- };
-
- let opt_failing_obligation = [&impl1_header.predicates, &impl2_header.predicates]
+ [&impl1_header.predicates, &impl2_header.predicates]
.into_iter()
.flatten()
- .map(|&predicate| {
- Obligation::new(infcx.tcx, ObligationCause::dummy(), param_env, predicate)
+ .map(|&(predicate, span)| {
+ Obligation::new(infcx.tcx, ObligationCause::dummy_with_span(span), param_env, predicate)
+ })
+ .chain(obligations.into_iter().cloned())
+ .find(|obligation: &PredicateObligation<'tcx>| {
+ if infcx.next_trait_solver() {
+ infcx.evaluate_obligation(obligation).map_or(false, |result| !result.may_apply())
+ } else {
+ // We use `evaluate_root_obligation` to correctly track intercrate
+ // ambiguity clauses. We cannot use this in the new solver.
+ selcx.evaluate_root_obligation(obligation).map_or(
+ false, // Overflow has occurred, and treat the obligation as possibly holding.
+ |result| !result.may_apply(),
+ )
+ }
})
- .chain(obligations)
- .find(obligation_guaranteed_to_fail);
-
- if let Some(failing_obligation) = opt_failing_obligation {
- debug!("overlap: obligation unsatisfiable {:?}", failing_obligation);
- true
- } else {
- false
- }
}
/// Check if both impls can be satisfied by a common type by considering whether
@@ -353,13 +404,13 @@ fn impl_intersection_has_negative_obligation(
&infcx,
ObligationCause::dummy(),
impl_env,
- tcx.impl_subject(impl1_def_id).subst_identity(),
+ tcx.impl_subject(impl1_def_id).instantiate_identity(),
) {
Ok(s) => s,
Err(err) => {
tcx.sess.delay_span_bug(
tcx.def_span(impl1_def_id),
- format!("failed to fully normalize {:?}: {:?}", impl1_def_id, err),
+ format!("failed to fully normalize {impl1_def_id:?}: {err:?}"),
);
return false;
}
@@ -367,16 +418,16 @@ fn impl_intersection_has_negative_obligation(
// Attempt to prove that impl2 applies, given all of the above.
let selcx = &mut SelectionContext::new(&infcx);
- let impl2_substs = infcx.fresh_substs_for_item(DUMMY_SP, impl2_def_id);
+ let impl2_args = infcx.fresh_args_for_item(DUMMY_SP, impl2_def_id);
let (subject2, normalization_obligations) =
- impl_subject_and_oblig(selcx, impl_env, impl2_def_id, impl2_substs, |_, _| {
+ impl_subject_and_oblig(selcx, impl_env, impl2_def_id, impl2_args, |_, _| {
ObligationCause::dummy()
});
// do the impls unify? If not, then it's not currently possible to prove any
// obligations about their intersection.
let Ok(InferOk { obligations: equate_obligations, .. }) =
- infcx.at(&ObligationCause::dummy(), impl_env).eq(DefineOpaqueTypes::No,subject1, subject2)
+ infcx.at(&ObligationCause::dummy(), impl_env).eq(DefineOpaqueTypes::No, subject1, subject2)
else {
debug!("explicit_disjoint: {:?} does not unify with {:?}", subject1, subject2);
return false;
@@ -437,8 +488,7 @@ fn prove_negated_obligation<'tcx>(
let body_def_id = body_def_id.as_local().unwrap_or(CRATE_DEF_ID);
let ocx = ObligationCtxt::new(&infcx);
- let Ok(wf_tys) = ocx.assumed_wf_types(param_env, body_def_id)
- else {
+ let Ok(wf_tys) = ocx.assumed_wf_types(param_env, body_def_id) else {
return false;
};
@@ -455,22 +505,23 @@ fn prove_negated_obligation<'tcx>(
/// This both checks whether any downstream or sibling crates could
/// implement it and whether an upstream crate can add this impl
/// without breaking backwards compatibility.
-#[instrument(level = "debug", skip(tcx), ret)]
-pub fn trait_ref_is_knowable<'tcx>(
+#[instrument(level = "debug", skip(tcx, lazily_normalize_ty), ret)]
+pub fn trait_ref_is_knowable<'tcx, E: Debug>(
tcx: TyCtxt<'tcx>,
trait_ref: ty::TraitRef<'tcx>,
-) -> Result<(), Conflict> {
+ mut lazily_normalize_ty: impl FnMut(Ty<'tcx>) -> Result<Ty<'tcx>, E>,
+) -> Result<Result<(), Conflict>, E> {
if Some(trait_ref.def_id) == tcx.lang_items().fn_ptr_trait() {
// The only types implementing `FnPtr` are function pointers,
// so if there's no impl of `FnPtr` in the current crate,
// then such an impl will never be added in the future.
- return Ok(());
+ return Ok(Ok(()));
}
- if orphan_check_trait_ref(trait_ref, InCrate::Remote).is_ok() {
+ if orphan_check_trait_ref(trait_ref, InCrate::Remote, &mut lazily_normalize_ty)?.is_ok() {
// A downstream or cousin crate is allowed to implement some
// substitution of this trait-ref.
- return Err(Conflict::Downstream);
+ return Ok(Err(Conflict::Downstream));
}
if trait_ref_is_local_or_fundamental(tcx, trait_ref) {
@@ -479,7 +530,7 @@ pub fn trait_ref_is_knowable<'tcx>(
// allowed to implement a substitution of this trait ref, which
// means impls could only come from dependencies of this crate,
// which we already know about.
- return Ok(());
+ return Ok(Ok(()));
}
// This is a remote non-fundamental trait, so if another crate
@@ -490,10 +541,10 @@ pub fn trait_ref_is_knowable<'tcx>(
// and if we are an intermediate owner, then we don't care
// about future-compatibility, which means that we're OK if
// we are an owner.
- if orphan_check_trait_ref(trait_ref, InCrate::Local).is_ok() {
- Ok(())
+ if orphan_check_trait_ref(trait_ref, InCrate::Local, &mut lazily_normalize_ty)?.is_ok() {
+ Ok(Ok(()))
} else {
- Err(Conflict::Upstream)
+ Ok(Err(Conflict::Upstream))
}
}
@@ -520,7 +571,7 @@ pub enum OrphanCheckErr<'tcx> {
pub fn orphan_check(tcx: TyCtxt<'_>, impl_def_id: DefId) -> Result<(), OrphanCheckErr<'_>> {
// We only except this routine to be invoked on implementations
// of a trait, not inherent implementations.
- let trait_ref = tcx.impl_trait_ref(impl_def_id).unwrap().subst_identity();
+ let trait_ref = tcx.impl_trait_ref(impl_def_id).unwrap().instantiate_identity();
debug!(?trait_ref);
// If the *trait* is local to the crate, ok.
@@ -529,7 +580,7 @@ pub fn orphan_check(tcx: TyCtxt<'_>, impl_def_id: DefId) -> Result<(), OrphanChe
return Ok(());
}
- orphan_check_trait_ref(trait_ref, InCrate::Local)
+ orphan_check_trait_ref::<!>(trait_ref, InCrate::Local, |ty| Ok(ty)).unwrap()
}
/// Checks whether a trait-ref is potentially implementable by a crate.
@@ -618,11 +669,12 @@ pub fn orphan_check(tcx: TyCtxt<'_>, impl_def_id: DefId) -> Result<(), OrphanChe
///
/// Note that this function is never called for types that have both type
/// parameters and inference variables.
-#[instrument(level = "trace", ret)]
-fn orphan_check_trait_ref<'tcx>(
+#[instrument(level = "trace", skip(lazily_normalize_ty), ret)]
+fn orphan_check_trait_ref<'tcx, E: Debug>(
trait_ref: ty::TraitRef<'tcx>,
in_crate: InCrate,
-) -> Result<(), OrphanCheckErr<'tcx>> {
+ lazily_normalize_ty: impl FnMut(Ty<'tcx>) -> Result<Ty<'tcx>, E>,
+) -> Result<Result<(), OrphanCheckErr<'tcx>>, E> {
if trait_ref.has_infer() && trait_ref.has_param() {
bug!(
"can't orphan check a trait ref with both params and inference variables {:?}",
@@ -630,9 +682,10 @@ fn orphan_check_trait_ref<'tcx>(
);
}
- let mut checker = OrphanChecker::new(in_crate);
- match trait_ref.visit_with(&mut checker) {
+ let mut checker = OrphanChecker::new(in_crate, lazily_normalize_ty);
+ Ok(match trait_ref.visit_with(&mut checker) {
ControlFlow::Continue(()) => Err(OrphanCheckErr::NonLocalInputType(checker.non_local_tys)),
+ ControlFlow::Break(OrphanCheckEarlyExit::NormalizationFailure(err)) => return Err(err),
ControlFlow::Break(OrphanCheckEarlyExit::ParamTy(ty)) => {
// Does there exist some local type after the `ParamTy`.
checker.search_first_local_ty = true;
@@ -645,34 +698,39 @@ fn orphan_check_trait_ref<'tcx>(
}
}
ControlFlow::Break(OrphanCheckEarlyExit::LocalTy(_)) => Ok(()),
- }
+ })
}
-struct OrphanChecker<'tcx> {
+struct OrphanChecker<'tcx, F> {
in_crate: InCrate,
in_self_ty: bool,
+ lazily_normalize_ty: F,
/// Ignore orphan check failures and exclusively search for the first
/// local type.
search_first_local_ty: bool,
non_local_tys: Vec<(Ty<'tcx>, bool)>,
}
-impl<'tcx> OrphanChecker<'tcx> {
- fn new(in_crate: InCrate) -> Self {
+impl<'tcx, F, E> OrphanChecker<'tcx, F>
+where
+ F: FnOnce(Ty<'tcx>) -> Result<Ty<'tcx>, E>,
+{
+ fn new(in_crate: InCrate, lazily_normalize_ty: F) -> Self {
OrphanChecker {
in_crate,
in_self_ty: true,
+ lazily_normalize_ty,
search_first_local_ty: false,
non_local_tys: Vec::new(),
}
}
- fn found_non_local_ty(&mut self, t: Ty<'tcx>) -> ControlFlow<OrphanCheckEarlyExit<'tcx>> {
+ fn found_non_local_ty(&mut self, t: Ty<'tcx>) -> ControlFlow<OrphanCheckEarlyExit<'tcx, E>> {
self.non_local_tys.push((t, self.in_self_ty));
ControlFlow::Continue(())
}
- fn found_param_ty(&mut self, t: Ty<'tcx>) -> ControlFlow<OrphanCheckEarlyExit<'tcx>> {
+ fn found_param_ty(&mut self, t: Ty<'tcx>) -> ControlFlow<OrphanCheckEarlyExit<'tcx, E>> {
if self.search_first_local_ty {
ControlFlow::Continue(())
} else {
@@ -688,18 +746,28 @@ impl<'tcx> OrphanChecker<'tcx> {
}
}
-enum OrphanCheckEarlyExit<'tcx> {
+enum OrphanCheckEarlyExit<'tcx, E> {
+ NormalizationFailure(E),
ParamTy(Ty<'tcx>),
LocalTy(Ty<'tcx>),
}
-impl<'tcx> TypeVisitor<TyCtxt<'tcx>> for OrphanChecker<'tcx> {
- type BreakTy = OrphanCheckEarlyExit<'tcx>;
+impl<'tcx, F, E> TypeVisitor<TyCtxt<'tcx>> for OrphanChecker<'tcx, F>
+where
+ F: FnMut(Ty<'tcx>) -> Result<Ty<'tcx>, E>,
+{
+ type BreakTy = OrphanCheckEarlyExit<'tcx, E>;
fn visit_region(&mut self, _r: ty::Region<'tcx>) -> ControlFlow<Self::BreakTy> {
ControlFlow::Continue(())
}
fn visit_ty(&mut self, ty: Ty<'tcx>) -> ControlFlow<Self::BreakTy> {
+ // Need to lazily normalize here in with `-Ztrait-solver=next-coherence`.
+ let ty = match (self.lazily_normalize_ty)(ty) {
+ Ok(ty) => ty,
+ Err(err) => return ControlFlow::Break(OrphanCheckEarlyExit::NormalizationFailure(err)),
+ };
+
let result = match *ty.kind() {
ty::Bool
| ty::Char
@@ -729,11 +797,11 @@ impl<'tcx> TypeVisitor<TyCtxt<'tcx>> for OrphanChecker<'tcx> {
// For fundamental types, we just look inside of them.
ty::Ref(_, ty, _) => ty.visit_with(self),
- ty::Adt(def, substs) => {
+ ty::Adt(def, args) => {
if self.def_id_is_local(def.did()) {
ControlFlow::Break(OrphanCheckEarlyExit::LocalTy(ty))
} else if def.is_fundamental() {
- substs.visit_with(self)
+ args.visit_with(self)
} else {
self.found_non_local_ty(ty)
}
diff --git a/compiler/rustc_trait_selection/src/traits/const_evaluatable.rs b/compiler/rustc_trait_selection/src/traits/const_evaluatable.rs
index 8dc13b827..3d0d3812d 100644
--- a/compiler/rustc_trait_selection/src/traits/const_evaluatable.rs
+++ b/compiler/rustc_trait_selection/src/traits/const_evaluatable.rs
@@ -191,7 +191,7 @@ fn satisfied_from_param_env<'tcx>(
if let ty::ConstKind::Expr(e) = c.kind() {
e.visit_with(self)
} else {
- // FIXME(generic_const_exprs): This doesn't recurse into `<T as Trait<U>>::ASSOC`'s substs.
+ // FIXME(generic_const_exprs): This doesn't recurse into `<T as Trait<U>>::ASSOC`'s args.
// This is currently unobservable as `<T as Trait<{ U + 1 }>>::ASSOC` creates an anon const
// with its own `ConstEvaluatable` bound in the param env which we will visit separately.
//
diff --git a/compiler/rustc_trait_selection/src/traits/engine.rs b/compiler/rustc_trait_selection/src/traits/engine.rs
index 61f693e1b..820973dc0 100644
--- a/compiler/rustc_trait_selection/src/traits/engine.rs
+++ b/compiler/rustc_trait_selection/src/traits/engine.rs
@@ -97,7 +97,7 @@ impl<'a, 'tcx> ObligationCtxt<'a, 'tcx> {
cause,
recursion_depth: 0,
param_env,
- predicate: ty::Binder::dummy(trait_ref).without_const().to_predicate(tcx),
+ predicate: ty::Binder::dummy(trait_ref).to_predicate(tcx),
});
}
diff --git a/compiler/rustc_trait_selection/src/traits/error_reporting/ambiguity.rs b/compiler/rustc_trait_selection/src/traits/error_reporting/ambiguity.rs
index f785c4eaf..fd813ca4e 100644
--- a/compiler/rustc_trait_selection/src/traits/error_reporting/ambiguity.rs
+++ b/compiler/rustc_trait_selection/src/traits/error_reporting/ambiguity.rs
@@ -26,8 +26,8 @@ pub fn recompute_applicable_impls<'tcx>(
let obligation_trait_ref =
ocx.normalize(&ObligationCause::dummy(), param_env, placeholder_obligation.trait_ref);
- let impl_substs = infcx.fresh_substs_for_item(DUMMY_SP, impl_def_id);
- let impl_trait_ref = tcx.impl_trait_ref(impl_def_id).unwrap().subst(tcx, impl_substs);
+ let impl_args = infcx.fresh_args_for_item(DUMMY_SP, impl_def_id);
+ let impl_trait_ref = tcx.impl_trait_ref(impl_def_id).unwrap().instantiate(tcx, impl_args);
let impl_trait_ref = ocx.normalize(&ObligationCause::dummy(), param_env, impl_trait_ref);
if let Err(_) =
@@ -36,7 +36,7 @@ pub fn recompute_applicable_impls<'tcx>(
return false;
}
- let impl_predicates = tcx.predicates_of(impl_def_id).instantiate(tcx, impl_substs);
+ let impl_predicates = tcx.predicates_of(impl_def_id).instantiate(tcx, impl_args);
ocx.register_obligations(impl_predicates.predicates.iter().map(|&predicate| {
Obligation::new(tcx, ObligationCause::dummy(), param_env, predicate)
}));
diff --git a/compiler/rustc_trait_selection/src/traits/error_reporting/mod.rs b/compiler/rustc_trait_selection/src/traits/error_reporting/mod.rs
index f34218059..457d5420c 100644
--- a/compiler/rustc_trait_selection/src/traits/error_reporting/mod.rs
+++ b/compiler/rustc_trait_selection/src/traits/error_reporting/mod.rs
@@ -7,6 +7,7 @@ use super::{
ObligationCauseCode, ObligationCtxt, OutputTypeParameterMismatch, Overflow,
PredicateObligation, SelectionError, TraitNotObjectSafe,
};
+use crate::errors::{ClosureFnMutLabel, ClosureFnOnceLabel, ClosureKindMismatch};
use crate::infer::error_reporting::{TyCategory, TypeAnnotationNeeded as ErrorCode};
use crate::infer::type_variable::{TypeVariableOrigin, TypeVariableOriginKind};
use crate::infer::{self, InferCtxt};
@@ -100,7 +101,6 @@ pub trait InferCtxtExt<'tcx> {
&self,
param_env: ty::ParamEnv<'tcx>,
ty: ty::Binder<'tcx, Ty<'tcx>>,
- constness: ty::BoundConstness,
polarity: ty::ImplPolarity,
) -> Result<(ty::ClosureKind, ty::Binder<'tcx, Ty<'tcx>>), ()>;
}
@@ -226,7 +226,7 @@ impl<'tcx> InferCtxtExt<'tcx> for InferCtxt<'tcx> {
let span = variant_data.ctor_hir_id().map_or(DUMMY_SP, |id| hir.span(id));
(span, None, vec![ArgKind::empty(); variant_data.fields().len()])
}
- _ => panic!("non-FnLike node found: {:?}", node),
+ _ => panic!("non-FnLike node found: {node:?}"),
})
}
@@ -273,10 +273,10 @@ impl<'tcx> InferCtxtExt<'tcx> for InferCtxt<'tcx> {
found_str,
);
- err.span_label(span, format!("expected {} that takes {}", kind, expected_str));
+ err.span_label(span, format!("expected {kind} that takes {expected_str}"));
if let Some(found_span) = found_span {
- err.span_label(found_span, format!("takes {}", found_str));
+ err.span_label(found_span, format!("takes {found_str}"));
// Suggest to take and ignore the arguments with expected_args_length `_`s if
// found arguments is empty (assume the user just wants to ignore args in this case).
@@ -289,7 +289,7 @@ impl<'tcx> InferCtxtExt<'tcx> for InferCtxt<'tcx> {
"consider changing the closure to take and ignore the expected argument{}",
pluralize!(expected_args.len())
),
- format!("|{}|", underscores),
+ format!("|{underscores}|"),
Applicability::MachineApplicable,
);
}
@@ -304,7 +304,7 @@ impl<'tcx> InferCtxtExt<'tcx> for InferCtxt<'tcx> {
err.span_suggestion_verbose(
found_span,
"change the closure to take multiple arguments instead of a single tuple",
- format!("|{}|", sugg),
+ format!("|{sugg}|"),
Applicability::MachineApplicable,
);
}
@@ -356,7 +356,6 @@ impl<'tcx> InferCtxtExt<'tcx> for InferCtxt<'tcx> {
&self,
param_env: ty::ParamEnv<'tcx>,
ty: ty::Binder<'tcx, Ty<'tcx>>,
- constness: ty::BoundConstness,
polarity: ty::ImplPolarity,
) -> Result<(ty::ClosureKind, ty::Binder<'tcx, Ty<'tcx>>), ()> {
self.commit_if_ok(|_| {
@@ -372,12 +371,13 @@ impl<'tcx> InferCtxtExt<'tcx> for InferCtxt<'tcx> {
span: DUMMY_SP,
kind: TypeVariableOriginKind::MiscVariable,
});
+ // FIXME(effects)
let trait_ref = ty::TraitRef::new(self.tcx, trait_def_id, [ty.skip_binder(), var]);
let obligation = Obligation::new(
self.tcx,
ObligationCause::dummy(),
param_env,
- ty.rebind(ty::TraitPredicate { trait_ref, constness, polarity }),
+ ty.rebind(ty::TraitPredicate { trait_ref, polarity }),
);
let ocx = ObligationCtxt::new(self);
ocx.register_obligation(obligation);
@@ -687,11 +687,10 @@ impl<'tcx> TypeErrCtxtExt<'tcx> for TypeErrCtxt<'_, 'tcx> {
match bound_predicate.skip_binder() {
ty::PredicateKind::Clause(ty::ClauseKind::Trait(trait_predicate)) => {
let trait_predicate = bound_predicate.rebind(trait_predicate);
- let mut trait_predicate = self.resolve_vars_if_possible(trait_predicate);
+ let trait_predicate = self.resolve_vars_if_possible(trait_predicate);
- trait_predicate.remap_constness_diag(obligation.param_env);
- let predicate_is_const = ty::BoundConstness::ConstIfConst
- == trait_predicate.skip_binder().constness;
+ // FIXME(effects)
+ let predicate_is_const = false;
if self.tcx.sess.has_errors().is_some()
&& trait_predicate.references_error()
@@ -704,9 +703,9 @@ impl<'tcx> TypeErrCtxtExt<'tcx> for TypeErrCtxt<'_, 'tcx> {
.get_parent_trait_ref(obligation.cause.code())
.map(|(t, s)| {
(
- format!(" in `{}`", t),
- format!("within `{}`, ", t),
- s.map(|s| (format!("within this `{}`", t), s)),
+ format!(" in `{t}`"),
+ format!("within `{t}`, "),
+ s.map(|s| (format!("within this `{t}`"), s)),
)
})
.unwrap_or_default();
@@ -1050,8 +1049,8 @@ impl<'tcx> TypeErrCtxtExt<'tcx> for TypeErrCtxt<'_, 'tcx> {
report_object_safety_error(self.tcx, span, trait_def_id, violations)
}
- ty::PredicateKind::ClosureKind(closure_def_id, closure_substs, kind) => {
- let found_kind = self.closure_kind(closure_substs).unwrap();
+ ty::PredicateKind::ClosureKind(closure_def_id, closure_args, kind) => {
+ let found_kind = self.closure_kind(closure_args).unwrap();
self.report_closure_error(&obligation, closure_def_id, found_kind, kind)
}
@@ -1071,7 +1070,7 @@ impl<'tcx> TypeErrCtxtExt<'tcx> for TypeErrCtxt<'_, 'tcx> {
// which bounds actually failed to hold.
self.tcx.sess.struct_span_err(
span,
- format!("the type `{}` is not well-formed", ty),
+ format!("the type `{ty}` is not well-formed"),
)
}
}
@@ -1109,7 +1108,7 @@ impl<'tcx> TypeErrCtxtExt<'tcx> for TypeErrCtxt<'_, 'tcx> {
ty::PredicateKind::Clause(ty::ClauseKind::ConstArgHasType(ct, ty)) => {
let mut diag = self.tcx.sess.struct_span_err(
span,
- format!("the constant `{}` is not of type `{}`", ct, ty),
+ format!("the constant `{ct}` is not of type `{ty}`"),
);
self.note_type_err(
&mut diag,
@@ -1627,19 +1626,21 @@ impl<'tcx> InferCtxtPrivExt<'tcx> for TypeErrCtxt<'_, 'tcx> {
ty::TermKind::Ty(_) => Ty::new_projection(
self.tcx,
data.projection_ty.def_id,
- data.projection_ty.substs,
+ data.projection_ty.args,
)
.into(),
ty::TermKind::Const(ct) => ty::Const::new_unevaluated(
self.tcx,
ty::UnevaluatedConst {
def: data.projection_ty.def_id,
- substs: data.projection_ty.substs,
+ args: data.projection_ty.args,
},
ct.ty(),
)
.into(),
};
+ // FIXME(-Ztrait-solver=next): For diagnostic purposes, it would be nice
+ // to deeply normalize this type.
let normalized_term =
ocx.normalize(&obligation.cause, obligation.param_env, unnormalized_term);
@@ -1908,9 +1909,6 @@ impl<'tcx> InferCtxtPrivExt<'tcx> for TypeErrCtxt<'_, 'tcx> {
.all_impls(trait_pred.def_id())
.filter_map(|def_id| {
if self.tcx.impl_polarity(def_id) == ty::ImplPolarity::Negative
- || !trait_pred
- .skip_binder()
- .is_constness_satisfied_by(self.tcx.constness(def_id))
|| !self.tcx.is_user_visible_dep(def_id.krate)
{
return None;
@@ -1970,7 +1968,7 @@ impl<'tcx> InferCtxtPrivExt<'tcx> for TypeErrCtxt<'_, 'tcx> {
traits.sort();
traits.dedup();
// FIXME: this could use a better heuristic, like just checking
- // that substs[1..] is the same.
+ // that args[1..] is the same.
let all_traits_equal = traits.len() == 1;
let candidates: Vec<String> = candidates
@@ -1979,7 +1977,7 @@ impl<'tcx> InferCtxtPrivExt<'tcx> for TypeErrCtxt<'_, 'tcx> {
if all_traits_equal {
format!("\n {}", c.self_ty())
} else {
- format!("\n {}", c)
+ format!("\n {c}")
}
})
.collect();
@@ -2016,7 +2014,7 @@ impl<'tcx> InferCtxtPrivExt<'tcx> for TypeErrCtxt<'_, 'tcx> {
|| self.tcx.is_automatically_derived(def_id)
})
.filter_map(|def_id| self.tcx.impl_trait_ref(def_id))
- .map(ty::EarlyBinder::subst_identity)
+ .map(ty::EarlyBinder::instantiate_identity)
.filter(|trait_ref| {
let self_ty = trait_ref.self_ty();
// Avoid mentioning type parameters.
@@ -2177,10 +2175,8 @@ impl<'tcx> InferCtxtPrivExt<'tcx> for TypeErrCtxt<'_, 'tcx> {
format!("trait impl{} with same name found", pluralize!(trait_impls.len())),
);
let trait_crate = self.tcx.crate_name(trait_with_same_path.krate);
- let crate_msg = format!(
- "perhaps two different versions of crate `{}` are being used?",
- trait_crate
- );
+ let crate_msg =
+ format!("perhaps two different versions of crate `{trait_crate}` are being used?");
err.note(crate_msg);
suggested = true;
}
@@ -2265,7 +2261,7 @@ impl<'tcx> InferCtxtPrivExt<'tcx> for TypeErrCtxt<'_, 'tcx> {
// Pick the first substitution that still contains inference variables as the one
// we're going to emit an error for. If there are none (see above), fall back to
// a more general error.
- let subst = data.trait_ref.substs.iter().find(|s| s.has_non_region_infer());
+ let subst = data.trait_ref.args.iter().find(|s| s.has_non_region_infer());
let mut err = if let Some(subst) = subst {
self.emit_inference_failure_err(
@@ -2290,7 +2286,7 @@ impl<'tcx> InferCtxtPrivExt<'tcx> for TypeErrCtxt<'_, 'tcx> {
&obligation.with(self.tcx, trait_ref),
);
let has_non_region_infer =
- trait_ref.skip_binder().substs.types().any(|t| !t.is_ty_or_numeric_infer());
+ trait_ref.skip_binder().args.types().any(|t| !t.is_ty_or_numeric_infer());
// It doesn't make sense to talk about applicable impls if there are more
// than a handful of them.
if ambiguities.len() > 1 && ambiguities.len() < 10 && has_non_region_infer {
@@ -2308,7 +2304,7 @@ impl<'tcx> InferCtxtPrivExt<'tcx> for TypeErrCtxt<'_, 'tcx> {
err.cancel();
return;
}
- err.note(format!("cannot satisfy `{}`", predicate));
+ err.note(format!("cannot satisfy `{predicate}`"));
let impl_candidates = self
.find_similar_impl_candidates(predicate.to_opt_poly_trait_pred().unwrap());
if impl_candidates.len() < 10 {
@@ -2328,7 +2324,7 @@ impl<'tcx> InferCtxtPrivExt<'tcx> for TypeErrCtxt<'_, 'tcx> {
self.suggest_fully_qualified_path(&mut err, def_id, span, trait_ref.def_id());
}
- if let Some(ty::subst::GenericArgKind::Type(_)) = subst.map(|subst| subst.unpack())
+ if let Some(ty::GenericArgKind::Type(_)) = subst.map(|subst| subst.unpack())
&& let Some(body_id) = self.tcx.hir().maybe_body_owned_by(obligation.cause.body_id)
{
let mut expr_finder = FindExprBySpan::new(span);
@@ -2372,7 +2368,7 @@ impl<'tcx> InferCtxtPrivExt<'tcx> for TypeErrCtxt<'_, 'tcx> {
if let Some(local_def_id) = data.trait_ref.def_id.as_local()
&& let Some(hir::Node::Item(hir::Item { ident: trait_name, kind: hir::ItemKind::Trait(_, _, _, _, trait_item_refs), .. })) = self.tcx.hir().find_by_def_id(local_def_id)
&& let Some(method_ref) = trait_item_refs.iter().find(|item_ref| item_ref.ident == *assoc_item_name) {
- err.span_label(method_ref.span, format!("`{}::{}` defined here", trait_name, assoc_item_name));
+ err.span_label(method_ref.span, format!("`{trait_name}::{assoc_item_name}` defined here"));
}
err.span_label(span, format!("cannot {verb} associated {noun} of trait"));
@@ -2386,14 +2382,11 @@ impl<'tcx> InferCtxtPrivExt<'tcx> for TypeErrCtxt<'_, 'tcx> {
// If there is only one implementation of the trait, suggest using it.
// Otherwise, use a placeholder comment for the implementation.
let (message, impl_suggestion) = if non_blanket_impl_count == 1 {(
- "use the fully-qualified path to the only available implementation".to_string(),
- format!("<{} as ", self.tcx.type_of(impl_def_id).subst_identity())
- )} else {(
- format!(
- "use a fully-qualified path to a specific available implementation ({} found)",
- non_blanket_impl_count
- ),
- "</* self type */ as ".to_string()
+ "use the fully-qualified path to the only available implementation",
+ format!("<{} as ", self.tcx.type_of(impl_def_id).instantiate_identity())
+ )} else {
+ ("use a fully-qualified path to a specific available implementation",
+ "</* self type */ as ".to_string()
)};
let mut suggestions = vec![(
path.span.shrink_to_lo(),
@@ -2464,7 +2457,7 @@ impl<'tcx> InferCtxtPrivExt<'tcx> for TypeErrCtxt<'_, 'tcx> {
}
let subst = data
.projection_ty
- .substs
+ .args
.iter()
.chain(Some(data.term.into_arg()))
.find(|g| g.has_non_region_infer());
@@ -2476,7 +2469,7 @@ impl<'tcx> InferCtxtPrivExt<'tcx> for TypeErrCtxt<'_, 'tcx> {
ErrorCode::E0284,
true,
);
- err.note(format!("cannot satisfy `{}`", predicate));
+ err.note(format!("cannot satisfy `{predicate}`"));
err
} else {
// If we can't find a substitution, just print a generic error
@@ -2487,7 +2480,7 @@ impl<'tcx> InferCtxtPrivExt<'tcx> for TypeErrCtxt<'_, 'tcx> {
"type annotations needed: cannot satisfy `{}`",
predicate,
);
- err.span_label(span, format!("cannot satisfy `{}`", predicate));
+ err.span_label(span, format!("cannot satisfy `{predicate}`"));
err
}
}
@@ -2515,7 +2508,7 @@ impl<'tcx> InferCtxtPrivExt<'tcx> for TypeErrCtxt<'_, 'tcx> {
"type annotations needed: cannot satisfy `{}`",
predicate,
);
- err.span_label(span, format!("cannot satisfy `{}`", predicate));
+ err.span_label(span, format!("cannot satisfy `{predicate}`"));
err
}
}
@@ -2530,7 +2523,7 @@ impl<'tcx> InferCtxtPrivExt<'tcx> for TypeErrCtxt<'_, 'tcx> {
"type annotations needed: cannot satisfy `{}`",
predicate,
);
- err.span_label(span, format!("cannot satisfy `{}`", predicate));
+ err.span_label(span, format!("cannot satisfy `{predicate}`"));
err
}
};
@@ -2567,7 +2560,7 @@ impl<'tcx> InferCtxtPrivExt<'tcx> for TypeErrCtxt<'_, 'tcx> {
}
}
}
- let mut crate_names: Vec<_> = crates.iter().map(|n| format!("`{}`", n)).collect();
+ let mut crate_names: Vec<_> = crates.iter().map(|n| format!("`{n}`")).collect();
crate_names.sort();
crate_names.dedup();
post.sort();
@@ -2594,7 +2587,7 @@ impl<'tcx> InferCtxtPrivExt<'tcx> for TypeErrCtxt<'_, 'tcx> {
predicate
);
let post = if post.len() > 1 || (post.len() == 1 && post[0].contains('\n')) {
- format!(":\n{}", post.iter().map(|p| format!("- {}", p)).collect::<Vec<_>>().join("\n"),)
+ format!(":\n{}", post.iter().map(|p| format!("- {p}")).collect::<Vec<_>>().join("\n"),)
} else if post.len() == 1 {
format!(": `{}`", post[0])
} else {
@@ -2603,7 +2596,7 @@ impl<'tcx> InferCtxtPrivExt<'tcx> for TypeErrCtxt<'_, 'tcx> {
match (spans.len(), crates.len(), crate_names.len()) {
(0, 0, 0) => {
- err.note(format!("cannot satisfy `{}`", predicate));
+ err.note(format!("cannot satisfy `{predicate}`"));
}
(0, _, 1) => {
err.note(format!("{} in the `{}` crate{}", msg, crates[0], post,));
@@ -2706,10 +2699,17 @@ impl<'tcx> InferCtxtPrivExt<'tcx> for TypeErrCtxt<'_, 'tcx> {
err: &mut Diagnostic,
obligation: &PredicateObligation<'tcx>,
) {
- let ty::PredicateKind::Clause(ty::ClauseKind::Trait(pred)) = obligation.predicate.kind().skip_binder() else { return; };
+ let ty::PredicateKind::Clause(ty::ClauseKind::Trait(pred)) =
+ obligation.predicate.kind().skip_binder()
+ else {
+ return;
+ };
let (ObligationCauseCode::BindingObligation(item_def_id, span)
- | ObligationCauseCode::ExprBindingObligation(item_def_id, span, ..))
- = *obligation.cause.code().peel_derives() else { return; };
+ | ObligationCauseCode::ExprBindingObligation(item_def_id, span, ..)) =
+ *obligation.cause.code().peel_derives()
+ else {
+ return;
+ };
debug!(?pred, ?item_def_id, ?span);
let (Some(node), true) = (
@@ -2767,7 +2767,7 @@ impl<'tcx> InferCtxtPrivExt<'tcx> for TypeErrCtxt<'_, 'tcx> {
err.span_suggestion_verbose(
span,
"consider relaxing the implicit `Sized` restriction",
- format!("{} ?Sized", separator),
+ format!("{separator} ?Sized"),
Applicability::MachineApplicable,
);
}
@@ -2820,9 +2820,9 @@ impl<'tcx> InferCtxtPrivExt<'tcx> for TypeErrCtxt<'_, 'tcx> {
if obligated_types.iter().any(|ot| ot == &self_ty) {
return true;
}
- if let ty::Adt(def, substs) = self_ty.kind()
- && let [arg] = &substs[..]
- && let ty::subst::GenericArgKind::Type(ty) = arg.unpack()
+ if let ty::Adt(def, args) = self_ty.kind()
+ && let [arg] = &args[..]
+ && let ty::GenericArgKind::Type(ty) = arg.unpack()
&& let ty::Adt(inner_def, _) = ty.kind()
&& inner_def == def
{
@@ -2858,7 +2858,7 @@ impl<'tcx> InferCtxtPrivExt<'tcx> for TypeErrCtxt<'_, 'tcx> {
}
})
.unwrap_or_else(|| {
- format!("the trait bound `{}` is not satisfied{}", trait_predicate, post_message)
+ format!("the trait bound `{trait_predicate}` is not satisfied{post_message}")
})
}
@@ -2874,14 +2874,20 @@ impl<'tcx> InferCtxtPrivExt<'tcx> for TypeErrCtxt<'_, 'tcx> {
let trait_ref = self.tcx.erase_regions(self.tcx.erase_late_bound_regions(trait_ref));
let src_and_dst = rustc_transmute::Types {
- dst: trait_ref.substs.type_at(0),
- src: trait_ref.substs.type_at(1),
+ dst: trait_ref.args.type_at(0),
+ src: trait_ref.args.type_at(1),
+ };
+ let scope = trait_ref.args.type_at(2);
+ let Some(assume) = rustc_transmute::Assume::from_const(
+ self.infcx.tcx,
+ obligation.param_env,
+ trait_ref.args.const_at(3),
+ ) else {
+ span_bug!(
+ span,
+ "Unable to construct rustc_transmute::Assume where it was previously possible"
+ );
};
- let scope = trait_ref.substs.type_at(2);
- let Some(assume) =
- rustc_transmute::Assume::from_const(self.infcx.tcx, obligation.param_env, trait_ref.substs.const_at(3)) else {
- span_bug!(span, "Unable to construct rustc_transmute::Assume where it was previously possible");
- };
match rustc_transmute::TransmuteTypeEnv::new(self.infcx).is_transmutable(
obligation.cause,
@@ -2890,8 +2896,8 @@ impl<'tcx> InferCtxtPrivExt<'tcx> for TypeErrCtxt<'_, 'tcx> {
assume,
) {
Answer::No(reason) => {
- let dst = trait_ref.substs.type_at(0);
- let src = trait_ref.substs.type_at(1);
+ let dst = trait_ref.args.type_at(0);
+ let src = trait_ref.args.type_at(1);
let err_msg = format!(
"`{src}` cannot be safely transmuted into `{dst}` in the defining scope of `{scope}`"
);
@@ -2982,12 +2988,19 @@ impl<'tcx> InferCtxtPrivExt<'tcx> for TypeErrCtxt<'_, 'tcx> {
unsatisfied_const: bool,
) {
let body_def_id = obligation.cause.body_id;
+ let span = if let ObligationCauseCode::BinOp { rhs_span: Some(rhs_span), .. } =
+ obligation.cause.code()
+ {
+ *rhs_span
+ } else {
+ span
+ };
+
// Try to report a help message
if is_fn_trait
&& let Ok((implemented_kind, params)) = self.type_implements_fn_trait(
obligation.param_env,
trait_ref.self_ty(),
- trait_predicate.skip_binder().constness,
trait_predicate.skip_binder().polarity,
)
{
@@ -3021,8 +3034,9 @@ impl<'tcx> InferCtxtPrivExt<'tcx> for TypeErrCtxt<'_, 'tcx> {
self.report_similar_impl_candidates_for_root_obligation(&obligation, *trait_predicate, body_def_id, err);
}
- self.maybe_suggest_convert_to_slice(
+ self.suggest_convert_to_slice(
err,
+ obligation,
trait_ref,
impl_candidates.as_slice(),
span,
@@ -3058,7 +3072,7 @@ impl<'tcx> InferCtxtPrivExt<'tcx> for TypeErrCtxt<'_, 'tcx> {
// Note any argument mismatches
let given_ty = params.skip_binder();
- let expected_ty = trait_ref.skip_binder().substs.type_at(1);
+ let expected_ty = trait_ref.skip_binder().args.type_at(1);
if let ty::Tuple(given) = given_ty.kind()
&& let ty::Tuple(expected) = expected_ty.kind()
{
@@ -3089,34 +3103,14 @@ impl<'tcx> InferCtxtPrivExt<'tcx> for TypeErrCtxt<'_, 'tcx> {
fn maybe_add_note_for_unsatisfied_const(
&self,
- obligation: &PredicateObligation<'tcx>,
- trait_ref: ty::PolyTraitRef<'tcx>,
- trait_predicate: &ty::PolyTraitPredicate<'tcx>,
- err: &mut Diagnostic,
- span: Span,
+ _obligation: &PredicateObligation<'tcx>,
+ _trait_ref: ty::PolyTraitRef<'tcx>,
+ _trait_predicate: &ty::PolyTraitPredicate<'tcx>,
+ _err: &mut Diagnostic,
+ _span: Span,
) -> UnsatisfiedConst {
- let mut unsatisfied_const = UnsatisfiedConst(false);
- if trait_predicate.is_const_if_const() && obligation.param_env.is_const() {
- let non_const_predicate = trait_ref.without_const();
- let non_const_obligation = Obligation {
- cause: obligation.cause.clone(),
- param_env: obligation.param_env.without_const(),
- predicate: non_const_predicate.to_predicate(self.tcx),
- recursion_depth: obligation.recursion_depth,
- };
- if self.predicate_may_hold(&non_const_obligation) {
- unsatisfied_const = UnsatisfiedConst(true);
- err.span_note(
- span,
- format!(
- "the trait `{}` is implemented for `{}`, \
- but that implementation is not `const`",
- non_const_predicate.print_modifiers_and_trait_path(),
- trait_ref.skip_binder().self_ty(),
- ),
- );
- }
- }
+ let unsatisfied_const = UnsatisfiedConst(false);
+ // FIXME(effects)
unsatisfied_const
}
@@ -3128,24 +3122,15 @@ impl<'tcx> InferCtxtPrivExt<'tcx> for TypeErrCtxt<'_, 'tcx> {
kind: ty::ClosureKind,
) -> DiagnosticBuilder<'tcx, ErrorGuaranteed> {
let closure_span = self.tcx.def_span(closure_def_id);
- let mut err = struct_span_err!(
- self.tcx.sess,
- closure_span,
- E0525,
- "expected a closure that implements the `{}` trait, \
- but this closure only implements `{}`",
- kind,
- found_kind
- );
- err.span_label(
+ let mut err = ClosureKindMismatch {
closure_span,
- format!("this closure implements `{}`, not `{}`", found_kind, kind),
- );
- err.span_label(
- obligation.cause.span,
- format!("the requirement to implement `{}` derives from here", kind),
- );
+ expected: kind,
+ found: found_kind,
+ cause_span: obligation.cause.span,
+ fn_once_label: None,
+ fn_mut_label: None,
+ };
// Additional context information explaining why the closure only implements
// a particular trait.
@@ -3153,30 +3138,22 @@ impl<'tcx> InferCtxtPrivExt<'tcx> for TypeErrCtxt<'_, 'tcx> {
let hir_id = self.tcx.hir().local_def_id_to_hir_id(closure_def_id.expect_local());
match (found_kind, typeck_results.closure_kind_origins().get(hir_id)) {
(ty::ClosureKind::FnOnce, Some((span, place))) => {
- err.span_label(
- *span,
- format!(
- "closure is `FnOnce` because it moves the \
- variable `{}` out of its environment",
- ty::place_to_string_for_capture(self.tcx, place)
- ),
- );
+ err.fn_once_label = Some(ClosureFnOnceLabel {
+ span: *span,
+ place: ty::place_to_string_for_capture(self.tcx, &place),
+ })
}
(ty::ClosureKind::FnMut, Some((span, place))) => {
- err.span_label(
- *span,
- format!(
- "closure is `FnMut` because it mutates the \
- variable `{}` here",
- ty::place_to_string_for_capture(self.tcx, place)
- ),
- );
+ err.fn_mut_label = Some(ClosureFnMutLabel {
+ span: *span,
+ place: ty::place_to_string_for_capture(self.tcx, &place),
+ })
}
_ => {}
}
}
- err
+ self.tcx.sess.create_err(err)
}
fn report_type_parameter_mismatch_cyclic_type_error(
@@ -3273,7 +3250,7 @@ impl<'tcx> InferCtxtPrivExt<'tcx> for TypeErrCtxt<'_, 'tcx> {
let mut not_tupled = false;
- let found = match found_trait_ref.skip_binder().substs.type_at(1).kind() {
+ let found = match found_trait_ref.skip_binder().args.type_at(1).kind() {
ty::Tuple(ref tys) => vec![ArgKind::empty(); tys.len()],
_ => {
not_tupled = true;
@@ -3281,7 +3258,7 @@ impl<'tcx> InferCtxtPrivExt<'tcx> for TypeErrCtxt<'_, 'tcx> {
}
};
- let expected_ty = expected_trait_ref.skip_binder().substs.type_at(1);
+ let expected_ty = expected_trait_ref.skip_binder().args.type_at(1);
let expected = match expected_ty.kind() {
ty::Tuple(ref tys) => {
tys.iter().map(|t| ArgKind::from_expected_ty(t, Some(span))).collect()
@@ -3371,8 +3348,7 @@ impl<'tcx> InferCtxtPrivExt<'tcx> for TypeErrCtxt<'_, 'tcx> {
let const_span = self.tcx.def_span(uv.def);
match self.tcx.sess.source_map().span_to_snippet(const_span) {
Ok(snippet) => err.help(format!(
- "try adding a `where` bound using this expression: `where [(); {}]:`",
- snippet
+ "try adding a `where` bound using this expression: `where [(); {snippet}]:`"
)),
_ => err.help("consider adding a `where` bound using this expression"),
};
@@ -3554,7 +3530,7 @@ pub fn dump_proof_tree<'tcx>(o: &Obligation<'tcx, ty::Predicate<'tcx>>, infcx: &
.1
.expect("proof tree should have been generated");
let mut lock = std::io::stdout().lock();
- let _ = lock.write_fmt(format_args!("{tree:?}"));
+ let _ = lock.write_fmt(format_args!("{tree:?}\n"));
let _ = lock.flush();
});
}
diff --git a/compiler/rustc_trait_selection/src/traits/error_reporting/on_unimplemented.rs b/compiler/rustc_trait_selection/src/traits/error_reporting/on_unimplemented.rs
index b16d2eb5f..0e73bad19 100644
--- a/compiler/rustc_trait_selection/src/traits/error_reporting/on_unimplemented.rs
+++ b/compiler/rustc_trait_selection/src/traits/error_reporting/on_unimplemented.rs
@@ -6,7 +6,7 @@ use rustc_data_structures::fx::FxHashMap;
use rustc_errors::{struct_span_err, ErrorGuaranteed};
use rustc_hir as hir;
use rustc_hir::def_id::DefId;
-use rustc_middle::ty::SubstsRef;
+use rustc_middle::ty::GenericArgsRef;
use rustc_middle::ty::{self, GenericParamDefKind, TyCtxt};
use rustc_parse_format::{ParseMode, Parser, Piece, Position};
use rustc_span::symbol::{kw, sym, Symbol};
@@ -25,7 +25,7 @@ pub trait TypeErrCtxtExt<'tcx> {
&self,
trait_ref: ty::PolyTraitRef<'tcx>,
obligation: &PredicateObligation<'tcx>,
- ) -> Option<(DefId, SubstsRef<'tcx>)>;
+ ) -> Option<(DefId, GenericArgsRef<'tcx>)>;
/*private*/
fn describe_enclosure(&self, hir_id: hir::HirId) -> Option<&'static str>;
@@ -56,7 +56,7 @@ impl<'tcx> TypeErrCtxtExt<'tcx> for TypeErrCtxt<'_, 'tcx> {
&self,
trait_ref: ty::PolyTraitRef<'tcx>,
obligation: &PredicateObligation<'tcx>,
- ) -> Option<(DefId, SubstsRef<'tcx>)> {
+ ) -> Option<(DefId, GenericArgsRef<'tcx>)> {
let tcx = self.tcx;
let param_env = obligation.param_env;
let trait_ref = self.instantiate_binder_with_placeholders(trait_ref);
@@ -66,26 +66,23 @@ impl<'tcx> TypeErrCtxtExt<'tcx> for TypeErrCtxt<'_, 'tcx> {
let mut fuzzy_match_impls = vec![];
self.tcx.for_each_relevant_impl(trait_ref.def_id, trait_self_ty, |def_id| {
- let impl_substs = self.fresh_substs_for_item(obligation.cause.span, def_id);
- let impl_trait_ref = tcx.impl_trait_ref(def_id).unwrap().subst(tcx, impl_substs);
+ let impl_args = self.fresh_args_for_item(obligation.cause.span, def_id);
+ let impl_trait_ref = tcx.impl_trait_ref(def_id).unwrap().instantiate(tcx, impl_args);
let impl_self_ty = impl_trait_ref.self_ty();
if self.can_eq(param_env, trait_self_ty, impl_self_ty) {
- self_match_impls.push((def_id, impl_substs));
+ self_match_impls.push((def_id, impl_args));
- if iter::zip(
- trait_ref.substs.types().skip(1),
- impl_trait_ref.substs.types().skip(1),
- )
- .all(|(u, v)| self.fuzzy_match_tys(u, v, false).is_some())
+ if iter::zip(trait_ref.args.types().skip(1), impl_trait_ref.args.types().skip(1))
+ .all(|(u, v)| self.fuzzy_match_tys(u, v, false).is_some())
{
- fuzzy_match_impls.push((def_id, impl_substs));
+ fuzzy_match_impls.push((def_id, impl_args));
}
}
});
- let impl_def_id_and_substs = if self_match_impls.len() == 1 {
+ let impl_def_id_and_args = if self_match_impls.len() == 1 {
self_match_impls[0]
} else if fuzzy_match_impls.len() == 1 {
fuzzy_match_impls[0]
@@ -93,8 +90,8 @@ impl<'tcx> TypeErrCtxtExt<'tcx> for TypeErrCtxt<'_, 'tcx> {
return None;
};
- tcx.has_attr(impl_def_id_and_substs.0, sym::rustc_on_unimplemented)
- .then_some(impl_def_id_and_substs)
+ tcx.has_attr(impl_def_id_and_args.0, sym::rustc_on_unimplemented)
+ .then_some(impl_def_id_and_args)
}
/// Used to set on_unimplemented's `ItemContext`
@@ -143,9 +140,9 @@ impl<'tcx> TypeErrCtxtExt<'tcx> for TypeErrCtxt<'_, 'tcx> {
trait_ref: ty::PolyTraitRef<'tcx>,
obligation: &PredicateObligation<'tcx>,
) -> OnUnimplementedNote {
- let (def_id, substs) = self
+ let (def_id, args) = self
.impl_similar_to(trait_ref, obligation)
- .unwrap_or_else(|| (trait_ref.def_id(), trait_ref.skip_binder().substs));
+ .unwrap_or_else(|| (trait_ref.def_id(), trait_ref.skip_binder().args));
let trait_ref = trait_ref.skip_binder();
let mut flags = vec![];
@@ -173,7 +170,7 @@ impl<'tcx> TypeErrCtxtExt<'tcx> for TypeErrCtxt<'_, 'tcx> {
if let Some(k) = obligation.cause.span.desugaring_kind() {
flags.push((sym::from_desugaring, None));
- flags.push((sym::from_desugaring, Some(format!("{:?}", k))));
+ flags.push((sym::from_desugaring, Some(format!("{k:?}"))));
}
if let ObligationCauseCode::MainFunctionType = obligation.cause.code() {
@@ -192,14 +189,14 @@ impl<'tcx> TypeErrCtxtExt<'tcx> for TypeErrCtxt<'_, 'tcx> {
// signature with no type arguments resolved
flags.push((
sym::_Self,
- Some(self.tcx.type_of(def.did()).subst_identity().to_string()),
+ Some(self.tcx.type_of(def.did()).instantiate_identity().to_string()),
));
}
for param in generics.params.iter() {
let value = match param.kind {
GenericParamDefKind::Type { .. } | GenericParamDefKind::Const { .. } => {
- substs[param.index as usize].to_string()
+ args[param.index as usize].to_string()
}
GenericParamDefKind::Lifetime => continue,
};
@@ -207,13 +204,13 @@ impl<'tcx> TypeErrCtxtExt<'tcx> for TypeErrCtxt<'_, 'tcx> {
flags.push((name, Some(value)));
if let GenericParamDefKind::Type { .. } = param.kind {
- let param_ty = substs[param.index as usize].expect_ty();
+ let param_ty = args[param.index as usize].expect_ty();
if let Some(def) = param_ty.ty_adt_def() {
// We also want to be able to select the parameter's
// original signature with no type arguments resolved
flags.push((
name,
- Some(self.tcx.type_of(def.did()).subst_identity().to_string()),
+ Some(self.tcx.type_of(def.did()).instantiate_identity().to_string()),
));
}
}
@@ -249,7 +246,7 @@ impl<'tcx> TypeErrCtxtExt<'tcx> for TypeErrCtxt<'_, 'tcx> {
// signature with no type arguments resolved
flags.push((
sym::_Self,
- Some(format!("[{}]", self.tcx.type_of(def.did()).subst_identity())),
+ Some(format!("[{}]", self.tcx.type_of(def.did()).instantiate_identity())),
));
}
if aty.is_integral() {
@@ -261,14 +258,14 @@ impl<'tcx> TypeErrCtxtExt<'tcx> for TypeErrCtxt<'_, 'tcx> {
if let ty::Array(aty, len) = self_ty.kind() {
flags.push((sym::_Self, Some("[]".to_string())));
let len = len.try_to_value().and_then(|v| v.try_to_target_usize(self.tcx));
- flags.push((sym::_Self, Some(format!("[{}; _]", aty))));
+ flags.push((sym::_Self, Some(format!("[{aty}; _]"))));
if let Some(n) = len {
- flags.push((sym::_Self, Some(format!("[{}; {}]", aty, n))));
+ flags.push((sym::_Self, Some(format!("[{aty}; {n}]"))));
}
if let Some(def) = aty.ty_adt_def() {
// We also want to be able to select the array's type's original
// signature with no type arguments resolved
- let def_ty = self.tcx.type_of(def.did()).subst_identity();
+ let def_ty = self.tcx.type_of(def.did()).instantiate_identity();
flags.push((sym::_Self, Some(format!("[{def_ty}; _]"))));
if let Some(n) = len {
flags.push((sym::_Self, Some(format!("[{def_ty}; {n}]"))));
@@ -332,18 +329,13 @@ pub struct OnUnimplementedNote {
}
/// Append a message for `~const Trait` errors.
-#[derive(Clone, Copy, PartialEq, Eq, Debug)]
+#[derive(Clone, Copy, PartialEq, Eq, Debug, Default)]
pub enum AppendConstMessage {
+ #[default]
Default,
Custom(Symbol),
}
-impl Default for AppendConstMessage {
- fn default() -> Self {
- AppendConstMessage::Default
- }
-}
-
impl<'tcx> OnUnimplementedDirective {
fn parse(
tcx: TyCtxt<'tcx>,
@@ -587,7 +579,7 @@ impl<'tcx> OnUnimplementedFormatString {
"there is no parameter `{}` on {}",
s,
if trait_def_id == item_def_id {
- format!("trait `{}`", trait_name)
+ format!("trait `{trait_name}`")
} else {
"impl".to_string()
}
@@ -629,7 +621,7 @@ impl<'tcx> OnUnimplementedFormatString {
.filter_map(|param| {
let value = match param.kind {
GenericParamDefKind::Type { .. } | GenericParamDefKind::Const { .. } => {
- trait_ref.substs[param.index as usize].to_string()
+ trait_ref.args[param.index as usize].to_string()
}
GenericParamDefKind::Lifetime => return None,
};
diff --git a/compiler/rustc_trait_selection/src/traits/error_reporting/suggestions.rs b/compiler/rustc_trait_selection/src/traits/error_reporting/suggestions.rs
index 9ac1ba027..611ec6b00 100644
--- a/compiler/rustc_trait_selection/src/traits/error_reporting/suggestions.rs
+++ b/compiler/rustc_trait_selection/src/traits/error_reporting/suggestions.rs
@@ -5,6 +5,7 @@ use super::{
PredicateObligation,
};
+use crate::errors;
use crate::infer::InferCtxt;
use crate::traits::{NormalizeExt, ObligationCtxt};
@@ -30,7 +31,7 @@ use rustc_middle::hir::map;
use rustc_middle::ty::error::TypeError::{self, Sorts};
use rustc_middle::ty::{
self, suggest_arbitrary_trait_bound, suggest_constraining_type_param, AdtKind,
- GeneratorDiagnosticData, GeneratorInteriorTypeCause, InferTy, InternalSubsts, IsSuggestable,
+ GeneratorDiagnosticData, GeneratorInteriorTypeCause, GenericArgs, InferTy, IsSuggestable,
ToPredicate, Ty, TyCtxt, TypeAndMut, TypeFoldable, TypeFolder, TypeSuperFoldable,
TypeVisitableExt, TypeckResults,
};
@@ -40,7 +41,6 @@ use rustc_span::{BytePos, DesugaringKind, ExpnKind, MacroKind, Span, DUMMY_SP};
use rustc_target::spec::abi;
use std::borrow::Cow;
use std::iter;
-use std::ops::Deref;
use super::InferCtxtPrivExt;
use crate::infer::InferCtxtExt as _;
@@ -398,9 +398,10 @@ pub trait TypeErrCtxtExt<'tcx> {
param_env: ty::ParamEnv<'tcx>,
) -> Vec<Option<(Span, (DefId, Ty<'tcx>))>>;
- fn maybe_suggest_convert_to_slice(
+ fn suggest_convert_to_slice(
&self,
err: &mut Diagnostic,
+ obligation: &PredicateObligation<'tcx>,
trait_ref: ty::PolyTraitRef<'tcx>,
candidate_impls: &[ImplCandidate<'tcx>],
span: Span,
@@ -435,7 +436,7 @@ fn suggest_restriction<'tcx>(
) {
if hir_generics.where_clause_span.from_expansion()
|| hir_generics.where_clause_span.desugaring_kind().is_some()
- || projection.is_some_and(|projection| tcx.opt_rpitit_info(projection.def_id).is_some())
+ || projection.is_some_and(|projection| tcx.is_impl_trait_in_trait(projection.def_id))
{
return;
}
@@ -479,13 +480,13 @@ fn suggest_restriction<'tcx>(
.visit_ty(input);
}
// The type param `T: Trait` we will suggest to introduce.
- let type_param = format!("{}: {}", type_param_name, bound_str);
+ let type_param = format!("{type_param_name}: {bound_str}");
let mut sugg = vec![
if let Some(span) = hir_generics.span_for_param_suggestion() {
- (span, format!(", {}", type_param))
+ (span, format!(", {type_param}"))
} else {
- (hir_generics.span, format!("<{}>", type_param))
+ (hir_generics.span, format!("<{type_param}>"))
},
// `fn foo(t: impl Trait)`
// ^ suggest `where <T as Trait>::A: Bound`
@@ -530,7 +531,7 @@ fn suggest_restriction<'tcx>(
err.span_suggestion_verbose(
sp,
- format!("consider further restricting {}", msg),
+ format!("consider further restricting {msg}"),
suggestion,
Applicability::MachineApplicable,
);
@@ -654,6 +655,7 @@ impl<'tcx> TypeErrCtxtExt<'tcx> for TypeErrCtxt<'_, 'tcx> {
| hir::ItemKind::Impl(hir::Impl { generics, .. })
| hir::ItemKind::Fn(_, generics, _)
| hir::ItemKind::TyAlias(_, generics)
+ | hir::ItemKind::Const(_, generics, _)
| hir::ItemKind::TraitAlias(generics, _)
| hir::ItemKind::OpaqueTy(hir::OpaqueTy { generics, .. }),
..
@@ -670,7 +672,7 @@ impl<'tcx> TypeErrCtxtExt<'tcx> for TypeErrCtxt<'_, 'tcx> {
// this that we do in `suggest_restriction` and pull the
// `impl Trait` into a new generic if it shows up somewhere
// else in the predicate.
- if !trait_pred.skip_binder().trait_ref.substs[1..]
+ if !trait_pred.skip_binder().trait_ref.args[1..]
.iter()
.all(|g| g.is_suggestable(self.tcx, false))
{
@@ -693,7 +695,7 @@ impl<'tcx> TypeErrCtxtExt<'tcx> for TypeErrCtxt<'_, 'tcx> {
term
);
} else {
- constraint.push_str(&format!("<{} = {}>", name, term));
+ constraint.push_str(&format!("<{name} = {term}>"));
}
}
@@ -719,6 +721,7 @@ impl<'tcx> TypeErrCtxtExt<'tcx> for TypeErrCtxt<'_, 'tcx> {
| hir::ItemKind::Impl(hir::Impl { generics, .. })
| hir::ItemKind::Fn(_, generics, _)
| hir::ItemKind::TyAlias(_, generics)
+ | hir::ItemKind::Const(_, generics, _)
| hir::ItemKind::TraitAlias(generics, _)
| hir::ItemKind::OpaqueTy(hir::OpaqueTy { generics, .. }),
..
@@ -752,14 +755,20 @@ impl<'tcx> TypeErrCtxtExt<'tcx> for TypeErrCtxt<'_, 'tcx> {
trait_pred: ty::PolyTraitPredicate<'tcx>,
) -> bool {
// It only make sense when suggesting dereferences for arguments
- let ObligationCauseCode::FunctionArgumentObligation { arg_hir_id, call_hir_id, .. } = obligation.cause.code()
- else { return false; };
- let Some(typeck_results) = &self.typeck_results
- else { return false; };
- let hir::Node::Expr(expr) = self.tcx.hir().get(*arg_hir_id)
- else { return false; };
- let Some(arg_ty) = typeck_results.expr_ty_adjusted_opt(expr)
- else { return false; };
+ let ObligationCauseCode::FunctionArgumentObligation { arg_hir_id, call_hir_id, .. } =
+ obligation.cause.code()
+ else {
+ return false;
+ };
+ let Some(typeck_results) = &self.typeck_results else {
+ return false;
+ };
+ let hir::Node::Expr(expr) = self.tcx.hir().get(*arg_hir_id) else {
+ return false;
+ };
+ let Some(arg_ty) = typeck_results.expr_ty_adjusted_opt(expr) else {
+ return false;
+ };
let span = obligation.cause.span;
let mut real_trait_pred = trait_pred;
@@ -770,18 +779,14 @@ impl<'tcx> TypeErrCtxtExt<'tcx> for TypeErrCtxt<'_, 'tcx> {
real_trait_pred = parent_trait_pred;
}
- let real_ty = real_trait_pred.self_ty();
// We `erase_late_bound_regions` here because `make_subregion` does not handle
// `ReLateBound`, and we don't particularly care about the regions.
- if !self.can_eq(
- obligation.param_env,
- self.tcx.erase_late_bound_regions(real_ty),
- arg_ty,
- ) {
+ let real_ty = self.tcx.erase_late_bound_regions(real_trait_pred.self_ty());
+ if !self.can_eq(obligation.param_env, real_ty, arg_ty) {
continue;
}
- if let ty::Ref(region, base_ty, mutbl) = *real_ty.skip_binder().kind() {
+ if let ty::Ref(region, base_ty, mutbl) = *real_ty.kind() {
let autoderef = (self.autoderef_steps)(base_ty);
if let Some(steps) =
autoderef.into_iter().enumerate().find_map(|(steps, (ty, obligations))| {
@@ -933,11 +938,11 @@ impl<'tcx> TypeErrCtxtExt<'tcx> for TypeErrCtxt<'_, 'tcx> {
trait_pred.self_ty(),
);
- let Some((def_id_or_name, output, inputs)) = self.extract_callable_info(
- obligation.cause.body_id,
- obligation.param_env,
- self_ty,
- ) else { return false; };
+ let Some((def_id_or_name, output, inputs)) =
+ self.extract_callable_info(obligation.cause.body_id, obligation.param_env, self_ty)
+ else {
+ return false;
+ };
// Remapping bound vars here
let trait_pred_and_self = trait_pred.map_bound(|trait_pred| (trait_pred, output));
@@ -1012,7 +1017,7 @@ impl<'tcx> TypeErrCtxtExt<'tcx> for TypeErrCtxt<'_, 'tcx> {
let name = self.tcx.def_path_str(def_id);
err.span_label(
self.tcx.def_span(def_id),
- format!("consider calling the constructor for `{}`", name),
+ format!("consider calling the constructor for `{name}`"),
);
name
}
@@ -1035,26 +1040,40 @@ impl<'tcx> TypeErrCtxtExt<'tcx> for TypeErrCtxt<'_, 'tcx> {
span.remove_mark();
}
let mut expr_finder = FindExprBySpan::new(span);
- let Some(body_id) = self.tcx.hir().maybe_body_owned_by(obligation.cause.body_id) else { return; };
+ let Some(body_id) = self.tcx.hir().maybe_body_owned_by(obligation.cause.body_id) else {
+ return;
+ };
let body = self.tcx.hir().body(body_id);
expr_finder.visit_expr(body.value);
- let Some(expr) = expr_finder.result else { return; };
- let Some(typeck) = &self.typeck_results else { return; };
- let Some(ty) = typeck.expr_ty_adjusted_opt(expr) else { return; };
+ let Some(expr) = expr_finder.result else {
+ return;
+ };
+ let Some(typeck) = &self.typeck_results else {
+ return;
+ };
+ let Some(ty) = typeck.expr_ty_adjusted_opt(expr) else {
+ return;
+ };
if !ty.is_unit() {
return;
};
- let hir::ExprKind::Path(hir::QPath::Resolved(None, path)) = expr.kind else { return; };
- let hir::def::Res::Local(hir_id) = path.res else { return; };
+ let hir::ExprKind::Path(hir::QPath::Resolved(None, path)) = expr.kind else {
+ return;
+ };
+ let hir::def::Res::Local(hir_id) = path.res else {
+ return;
+ };
let Some(hir::Node::Pat(pat)) = self.tcx.hir().find(hir_id) else {
return;
};
- let Some(hir::Node::Local(hir::Local {
- ty: None,
- init: Some(init),
- ..
- })) = self.tcx.hir().find_parent(pat.hir_id) else { return; };
- let hir::ExprKind::Block(block, None) = init.kind else { return; };
+ let Some(hir::Node::Local(hir::Local { ty: None, init: Some(init), .. })) =
+ self.tcx.hir().find_parent(pat.hir_id)
+ else {
+ return;
+ };
+ let hir::ExprKind::Block(block, None) = init.kind else {
+ return;
+ };
if block.expr.is_some() {
return;
}
@@ -1062,7 +1081,9 @@ impl<'tcx> TypeErrCtxtExt<'tcx> for TypeErrCtxt<'_, 'tcx> {
err.span_label(block.span, "this empty block is missing a tail expression");
return;
};
- let hir::StmtKind::Semi(tail_expr) = stmt.kind else { return; };
+ let hir::StmtKind::Semi(tail_expr) = stmt.kind else {
+ return;
+ };
let Some(ty) = typeck.expr_ty_opt(tail_expr) else {
err.span_label(block.span, "this block is missing a tail expression");
return;
@@ -1092,12 +1113,18 @@ impl<'tcx> TypeErrCtxtExt<'tcx> for TypeErrCtxt<'_, 'tcx> {
) -> bool {
let self_ty = self.resolve_vars_if_possible(trait_pred.self_ty());
let ty = self.instantiate_binder_with_placeholders(self_ty);
- let Some(generics) = self.tcx.hir().get_generics(obligation.cause.body_id) else { return false };
+ let Some(generics) = self.tcx.hir().get_generics(obligation.cause.body_id) else {
+ return false;
+ };
let ty::Ref(_, inner_ty, hir::Mutability::Not) = ty.kind() else { return false };
let ty::Param(param) = inner_ty.kind() else { return false };
- let ObligationCauseCode::FunctionArgumentObligation { arg_hir_id, .. } = obligation.cause.code() else { return false };
+ let ObligationCauseCode::FunctionArgumentObligation { arg_hir_id, .. } =
+ obligation.cause.code()
+ else {
+ return false;
+ };
let arg_node = self.tcx.hir().get(*arg_hir_id);
- let Node::Expr(Expr { kind: hir::ExprKind::Path(_), ..}) = arg_node else { return false };
+ let Node::Expr(Expr { kind: hir::ExprKind::Path(_), .. }) = arg_node else { return false };
let clone_trait = self.tcx.require_lang_item(LangItem::Clone, None);
let has_clone = |ty| {
@@ -1143,24 +1170,33 @@ impl<'tcx> TypeErrCtxtExt<'tcx> for TypeErrCtxt<'_, 'tcx> {
found: Ty<'tcx>,
) -> Option<(DefIdOrName, Ty<'tcx>, Vec<Ty<'tcx>>)> {
// Autoderef is useful here because sometimes we box callables, etc.
- let Some((def_id_or_name, output, inputs)) = (self.autoderef_steps)(found).into_iter().find_map(|(found, _)| {
- match *found.kind() {
- ty::FnPtr(fn_sig) =>
- Some((DefIdOrName::Name("function pointer"), fn_sig.output(), fn_sig.inputs())),
- ty::FnDef(def_id, _) => {
- let fn_sig = found.fn_sig(self.tcx);
- Some((DefIdOrName::DefId(def_id), fn_sig.output(), fn_sig.inputs()))
- }
- ty::Closure(def_id, substs) => {
- let fn_sig = substs.as_closure().sig();
- Some((DefIdOrName::DefId(def_id), fn_sig.output(), fn_sig.inputs().map_bound(|inputs| &inputs[1..])))
- }
- ty::Alias(ty::Opaque, ty::AliasTy { def_id, substs, .. }) => {
- self.tcx.item_bounds(def_id).subst(self.tcx, substs).iter().find_map(|pred| {
- if let ty::ClauseKind::Projection(proj) = pred.kind().skip_binder()
+ let Some((def_id_or_name, output, inputs)) =
+ (self.autoderef_steps)(found).into_iter().find_map(|(found, _)| {
+ match *found.kind() {
+ ty::FnPtr(fn_sig) => Some((
+ DefIdOrName::Name("function pointer"),
+ fn_sig.output(),
+ fn_sig.inputs(),
+ )),
+ ty::FnDef(def_id, _) => {
+ let fn_sig = found.fn_sig(self.tcx);
+ Some((DefIdOrName::DefId(def_id), fn_sig.output(), fn_sig.inputs()))
+ }
+ ty::Closure(def_id, args) => {
+ let fn_sig = args.as_closure().sig();
+ Some((
+ DefIdOrName::DefId(def_id),
+ fn_sig.output(),
+ fn_sig.inputs().map_bound(|inputs| &inputs[1..]),
+ ))
+ }
+ ty::Alias(ty::Opaque, ty::AliasTy { def_id, args, .. }) => {
+ self.tcx.item_bounds(def_id).instantiate(self.tcx, args).iter().find_map(
+ |pred| {
+ if let ty::ClauseKind::Projection(proj) = pred.kind().skip_binder()
&& Some(proj.projection_ty.def_id) == self.tcx.lang_items().fn_once_output()
- // args tuple will always be substs[1]
- && let ty::Tuple(args) = proj.projection_ty.substs.type_at(1).kind()
+ // args tuple will always be args[1]
+ && let ty::Tuple(args) = proj.projection_ty.args.type_at(1).kind()
{
Some((
DefIdOrName::DefId(def_id),
@@ -1170,14 +1206,15 @@ impl<'tcx> TypeErrCtxtExt<'tcx> for TypeErrCtxt<'_, 'tcx> {
} else {
None
}
- })
- }
- ty::Dynamic(data, _, ty::Dyn) => {
- data.iter().find_map(|pred| {
- if let ty::ExistentialPredicate::Projection(proj) = pred.skip_binder()
+ },
+ )
+ }
+ ty::Dynamic(data, _, ty::Dyn) => {
+ data.iter().find_map(|pred| {
+ if let ty::ExistentialPredicate::Projection(proj) = pred.skip_binder()
&& Some(proj.def_id) == self.tcx.lang_items().fn_once_output()
- // for existential projection, substs are shifted over by 1
- && let ty::Tuple(args) = proj.substs.type_at(0).kind()
+ // for existential projection, args are shifted over by 1
+ && let ty::Tuple(args) = proj.args.type_at(0).kind()
{
Some((
DefIdOrName::Name("trait object"),
@@ -1187,11 +1224,11 @@ impl<'tcx> TypeErrCtxtExt<'tcx> for TypeErrCtxt<'_, 'tcx> {
} else {
None
}
- })
- }
- ty::Param(param) => {
- let generics = self.tcx.generics_of(body_id);
- let name = if generics.count() > param.index as usize
+ })
+ }
+ ty::Param(param) => {
+ let generics = self.tcx.generics_of(body_id);
+ let name = if generics.count() > param.index as usize
&& let def = generics.param_at(param.index as usize, self.tcx)
&& matches!(def.kind, ty::GenericParamDefKind::Type { .. })
&& def.name == param.name
@@ -1200,12 +1237,12 @@ impl<'tcx> TypeErrCtxtExt<'tcx> for TypeErrCtxt<'_, 'tcx> {
} else {
DefIdOrName::Name("type parameter")
};
- param_env.caller_bounds().iter().find_map(|pred| {
- if let ty::ClauseKind::Projection(proj) = pred.kind().skip_binder()
+ param_env.caller_bounds().iter().find_map(|pred| {
+ if let ty::ClauseKind::Projection(proj) = pred.kind().skip_binder()
&& Some(proj.projection_ty.def_id) == self.tcx.lang_items().fn_once_output()
&& proj.projection_ty.self_ty() == found
- // args tuple will always be substs[1]
- && let ty::Tuple(args) = proj.projection_ty.substs.type_at(1).kind()
+ // args tuple will always be args[1]
+ && let ty::Tuple(args) = proj.projection_ty.args.type_at(1).kind()
{
Some((
name,
@@ -1215,11 +1252,14 @@ impl<'tcx> TypeErrCtxtExt<'tcx> for TypeErrCtxt<'_, 'tcx> {
} else {
None
}
- })
+ })
+ }
+ _ => None,
}
- _ => None,
- }
- }) else { return None; };
+ })
+ else {
+ return None;
+ };
let output = self.instantiate_binder_with_fresh_vars(
DUMMY_SP,
@@ -1356,7 +1396,7 @@ impl<'tcx> TypeErrCtxtExt<'tcx> for TypeErrCtxt<'_, 'tcx> {
// Because of this, we modify the error to refer to the original obligation and
// return early in the caller.
- let msg = format!("the trait bound `{}` is not satisfied", old_pred);
+ let msg = format!("the trait bound `{old_pred}` is not satisfied");
if has_custom_message {
err.note(msg);
} else {
@@ -1389,30 +1429,34 @@ impl<'tcx> TypeErrCtxtExt<'tcx> for TypeErrCtxt<'_, 'tcx> {
// Issue #109436, we need to add parentheses properly for method calls
// for example, `foo.into()` should be `(&foo).into()`
- if let Ok(snippet) = self.tcx.sess.source_map().span_to_snippet(
- self.tcx.sess.source_map().span_look_ahead(span, Some("."), Some(50)),
- ) {
- if snippet == "." {
- err.multipart_suggestion_verbose(
- sugg_msg,
- vec![
- (span.shrink_to_lo(), format!("({}", sugg_prefix)),
- (span.shrink_to_hi(), ")".to_string()),
- ],
- Applicability::MaybeIncorrect,
- );
- return true;
- }
+ if let Some(_) =
+ self.tcx.sess.source_map().span_look_ahead(span, ".", Some(50))
+ {
+ err.multipart_suggestion_verbose(
+ sugg_msg,
+ vec![
+ (span.shrink_to_lo(), format!("({sugg_prefix}")),
+ (span.shrink_to_hi(), ")".to_string()),
+ ],
+ Applicability::MaybeIncorrect,
+ );
+ return true;
}
// Issue #104961, we need to add parentheses properly for compound expressions
// for example, `x.starts_with("hi".to_string() + "you")`
// should be `x.starts_with(&("hi".to_string() + "you"))`
- let Some(body_id) = self.tcx.hir().maybe_body_owned_by(obligation.cause.body_id) else { return false; };
+ let Some(body_id) =
+ self.tcx.hir().maybe_body_owned_by(obligation.cause.body_id)
+ else {
+ return false;
+ };
let body = self.tcx.hir().body(body_id);
let mut expr_finder = FindExprBySpan::new(span);
expr_finder.visit_expr(body.value);
- let Some(expr) = expr_finder.result else { return false; };
+ let Some(expr) = expr_finder.result else {
+ return false;
+ };
let needs_parens = match expr.kind {
// parenthesize if needed (Issue #46756)
hir::ExprKind::Cast(_, _) | hir::ExprKind::Binary(_, _, _) => true,
@@ -1423,10 +1467,10 @@ impl<'tcx> TypeErrCtxtExt<'tcx> for TypeErrCtxt<'_, 'tcx> {
let span = if needs_parens { span } else { span.shrink_to_lo() };
let suggestions = if !needs_parens {
- vec![(span.shrink_to_lo(), format!("{}", sugg_prefix))]
+ vec![(span.shrink_to_lo(), sugg_prefix)]
} else {
vec![
- (span.shrink_to_lo(), format!("{}(", sugg_prefix)),
+ (span.shrink_to_lo(), format!("{sugg_prefix}(")),
(span.shrink_to_hi(), ")".to_string()),
]
};
@@ -1463,8 +1507,12 @@ impl<'tcx> TypeErrCtxtExt<'tcx> for TypeErrCtxt<'_, 'tcx> {
self_ty: Ty<'tcx>,
target_ty: Ty<'tcx>,
) {
- let ty::Ref(_, object_ty, hir::Mutability::Not) = target_ty.kind() else { return; };
- let ty::Dynamic(predicates, _, ty::Dyn) = object_ty.kind() else { return; };
+ let ty::Ref(_, object_ty, hir::Mutability::Not) = target_ty.kind() else {
+ return;
+ };
+ let ty::Dynamic(predicates, _, ty::Dyn) = object_ty.kind() else {
+ return;
+ };
let self_ref_ty = Ty::new_imm_ref(self.tcx, self.tcx.lifetimes.re_erased, self_ty);
for predicate in predicates.iter() {
@@ -1566,7 +1614,9 @@ impl<'tcx> TypeErrCtxtExt<'tcx> for TypeErrCtxt<'_, 'tcx> {
}
// Maybe suggest removal of borrows from expressions, like in `for i in &&&foo {}`.
- let Some(mut expr) = expr_finder.result else { return false; };
+ let Some(mut expr) = expr_finder.result else {
+ return false;
+ };
let mut count = 0;
let mut suggestions = vec![];
// Skipping binder here, remapping below
@@ -1646,13 +1696,12 @@ impl<'tcx> TypeErrCtxtExt<'tcx> for TypeErrCtxt<'_, 'tcx> {
}
if let Some(typeck_results) = &self.typeck_results
&& let ty = typeck_results.expr_ty_adjusted(base)
- && let ty::FnDef(def_id, _substs) = ty.kind()
+ && let ty::FnDef(def_id, _args) = ty.kind()
&& let Some(hir::Node::Item(hir::Item { ident, span, vis_span, .. })) =
hir.get_if_local(*def_id)
{
let msg = format!(
- "alternatively, consider making `fn {}` asynchronous",
- ident
+ "alternatively, consider making `fn {ident}` asynchronous"
);
if vis_span.is_empty() {
err.span_suggestion_verbose(
@@ -1798,7 +1847,9 @@ impl<'tcx> TypeErrCtxtExt<'tcx> for TypeErrCtxt<'_, 'tcx> {
fn return_type_span(&self, obligation: &PredicateObligation<'tcx>) -> Option<Span> {
let hir = self.tcx.hir();
- let Some(hir::Node::Item(hir::Item { kind: hir::ItemKind::Fn(sig, ..), .. })) = hir.find_by_def_id(obligation.cause.body_id) else {
+ let Some(hir::Node::Item(hir::Item { kind: hir::ItemKind::Fn(sig, ..), .. })) =
+ hir.find_by_def_id(obligation.cause.body_id)
+ else {
return None;
};
@@ -1901,10 +1952,7 @@ impl<'tcx> TypeErrCtxtExt<'tcx> for TypeErrCtxt<'_, 'tcx> {
// don't print out the [type error] here
err.delay_as_bug();
} else {
- err.span_label(
- expr.span,
- format!("this returned value is of type `{}`", ty),
- );
+ err.span_label(expr.span, format!("this returned value is of type `{ty}`"));
}
}
}
@@ -1925,7 +1973,7 @@ impl<'tcx> TypeErrCtxtExt<'tcx> for TypeErrCtxt<'_, 'tcx> {
infcx: &InferCtxt<'tcx>,
trait_ref: ty::PolyTraitRef<'tcx>,
) -> Ty<'tcx> {
- let inputs = trait_ref.skip_binder().substs.type_at(1);
+ let inputs = trait_ref.skip_binder().args.type_at(1);
let sig = match inputs.kind() {
ty::Tuple(inputs) if infcx.tcx.is_fn_trait(trait_ref.def_id()) => {
infcx.tcx.mk_fn_sig(
@@ -2006,12 +2054,12 @@ impl<'tcx> TypeErrCtxtExt<'tcx> for TypeErrCtxt<'_, 'tcx> {
{
let expected_self =
self.tcx.anonymize_bound_vars(pred.kind().rebind(trait_pred.self_ty()));
- let expected_substs = self
+ let expected_args = self
.tcx
- .anonymize_bound_vars(pred.kind().rebind(trait_pred.trait_ref.substs));
+ .anonymize_bound_vars(pred.kind().rebind(trait_pred.trait_ref.args));
// Find another predicate whose self-type is equal to the expected self type,
- // but whose substs don't match.
+ // but whose args don't match.
let other_pred = predicates.into_iter()
.enumerate()
.find(|(other_idx, (pred, _))| match pred.kind().skip_binder() {
@@ -2024,10 +2072,10 @@ impl<'tcx> TypeErrCtxtExt<'tcx> for TypeErrCtxt<'_, 'tcx> {
== self.tcx.anonymize_bound_vars(
pred.kind().rebind(trait_pred.self_ty()),
)
- // But the substs don't match (i.e. incompatible args)
- && expected_substs
+ // But the args don't match (i.e. incompatible args)
+ && expected_args
!= self.tcx.anonymize_bound_vars(
- pred.kind().rebind(trait_pred.trait_ref.substs),
+ pred.kind().rebind(trait_pred.trait_ref.args),
) =>
{
true
@@ -2222,7 +2270,9 @@ impl<'tcx> TypeErrCtxtExt<'tcx> for TypeErrCtxt<'_, 'tcx> {
// Only continue if a generator was found.
debug!(?generator, ?trait_ref, ?target_ty);
- let (Some(generator_did), Some(trait_ref), Some(target_ty)) = (generator, trait_ref, target_ty) else {
+ let (Some(generator_did), Some(trait_ref), Some(target_ty)) =
+ (generator, trait_ref, target_ty)
+ else {
return false;
};
@@ -2403,8 +2453,7 @@ impl<'tcx> TypeErrCtxtExt<'tcx> for TypeErrCtxt<'_, 'tcx> {
err.clear_code();
err.set_primary_message(format!(
- "{} cannot be {} between threads safely",
- future_or_generator, trait_verb
+ "{future_or_generator} cannot be {trait_verb} between threads safely"
));
let original_span = err.span.primary_span().unwrap();
@@ -2413,7 +2462,7 @@ impl<'tcx> TypeErrCtxtExt<'tcx> for TypeErrCtxt<'_, 'tcx> {
let message = outer_generator
.and_then(|generator_did| {
Some(match self.tcx.generator_kind(generator_did).unwrap() {
- GeneratorKind::Gen => format!("generator is not {}", trait_name),
+ GeneratorKind::Gen => format!("generator is not {trait_name}"),
GeneratorKind::Async(AsyncGeneratorKind::Fn) => self
.tcx
.parent(generator_did)
@@ -2421,73 +2470,73 @@ impl<'tcx> TypeErrCtxtExt<'tcx> for TypeErrCtxt<'_, 'tcx> {
.map(|parent_did| hir.local_def_id_to_hir_id(parent_did))
.and_then(|parent_hir_id| hir.opt_name(parent_hir_id))
.map(|name| {
- format!("future returned by `{}` is not {}", name, trait_name)
+ format!("future returned by `{name}` is not {trait_name}")
})?,
GeneratorKind::Async(AsyncGeneratorKind::Block) => {
- format!("future created by async block is not {}", trait_name)
+ format!("future created by async block is not {trait_name}")
}
GeneratorKind::Async(AsyncGeneratorKind::Closure) => {
- format!("future created by async closure is not {}", trait_name)
+ format!("future created by async closure is not {trait_name}")
}
})
})
- .unwrap_or_else(|| format!("{} is not {}", future_or_generator, trait_name));
+ .unwrap_or_else(|| format!("{future_or_generator} is not {trait_name}"));
span.push_span_label(original_span, message);
err.set_span(span);
- format!("is not {}", trait_name)
+ format!("is not {trait_name}")
} else {
format!("does not implement `{}`", trait_pred.print_modifiers_and_trait_path())
};
- let mut explain_yield =
- |interior_span: Span, yield_span: Span, scope_span: Option<Span>| {
- let mut span = MultiSpan::from_span(yield_span);
- let snippet = match source_map.span_to_snippet(interior_span) {
- // #70935: If snippet contains newlines, display "the value" instead
- // so that we do not emit complex diagnostics.
- Ok(snippet) if !snippet.contains('\n') => format!("`{}`", snippet),
- _ => "the value".to_string(),
- };
- // note: future is not `Send` as this value is used across an await
- // --> $DIR/issue-70935-complex-spans.rs:13:9
- // |
- // LL | baz(|| async {
- // | ______________-
- // | |
- // | |
- // LL | | foo(tx.clone());
- // LL | | }).await;
- // | | - ^^^^^^ await occurs here, with value maybe used later
- // | |__________|
- // | has type `closure` which is not `Send`
- // note: value is later dropped here
- // LL | | }).await;
- // | | ^
- //
- span.push_span_label(
- yield_span,
- format!("{} occurs here, with {} maybe used later", await_or_yield, snippet),
- );
- span.push_span_label(
- interior_span,
- format!("has type `{}` which {}", target_ty, trait_explanation),
- );
- if let Some(scope_span) = scope_span {
- let scope_span = source_map.end_point(scope_span);
+ let mut explain_yield = |interior_span: Span,
+ yield_span: Span,
+ scope_span: Option<Span>| {
+ let mut span = MultiSpan::from_span(yield_span);
+ let snippet = match source_map.span_to_snippet(interior_span) {
+ // #70935: If snippet contains newlines, display "the value" instead
+ // so that we do not emit complex diagnostics.
+ Ok(snippet) if !snippet.contains('\n') => format!("`{snippet}`"),
+ _ => "the value".to_string(),
+ };
+ // note: future is not `Send` as this value is used across an await
+ // --> $DIR/issue-70935-complex-spans.rs:13:9
+ // |
+ // LL | baz(|| async {
+ // | ______________-
+ // | |
+ // | |
+ // LL | | foo(tx.clone());
+ // LL | | }).await;
+ // | | - ^^^^^^ await occurs here, with value maybe used later
+ // | |__________|
+ // | has type `closure` which is not `Send`
+ // note: value is later dropped here
+ // LL | | }).await;
+ // | | ^
+ //
+ span.push_span_label(
+ yield_span,
+ format!("{await_or_yield} occurs here, with {snippet} maybe used later"),
+ );
+ span.push_span_label(
+ interior_span,
+ format!("has type `{target_ty}` which {trait_explanation}"),
+ );
+ if let Some(scope_span) = scope_span {
+ let scope_span = source_map.end_point(scope_span);
- let msg = format!("{} is later dropped here", snippet);
- span.push_span_label(scope_span, msg);
- }
- err.span_note(
+ let msg = format!("{snippet} is later dropped here");
+ span.push_span_label(scope_span, msg);
+ }
+ err.span_note(
span,
format!(
- "{} {} as this value is used across {}",
- future_or_generator, trait_explanation, an_await_or_yield
+ "{future_or_generator} {trait_explanation} as this value is used across {an_await_or_yield}"
),
);
- };
+ };
match interior_or_upvar_span {
GeneratorInteriorOrUpvar::Interior(interior_span, interior_extra_info) => {
if let Some((scope_span, yield_span, expr, from_awaited_ty)) = interior_extra_info {
@@ -2497,15 +2546,13 @@ impl<'tcx> TypeErrCtxtExt<'tcx> for TypeErrCtxt<'_, 'tcx> {
span.push_span_label(
await_span,
format!(
- "await occurs here on type `{}`, which {}",
- target_ty, trait_explanation
+ "await occurs here on type `{target_ty}`, which {trait_explanation}"
),
);
err.span_note(
span,
format!(
- "future {not_trait} as it awaits another future which {not_trait}",
- not_trait = trait_explanation
+ "future {trait_explanation} as it awaits another future which {trait_explanation}"
),
);
} else {
@@ -2588,18 +2635,16 @@ impl<'tcx> TypeErrCtxtExt<'tcx> for TypeErrCtxt<'_, 'tcx> {
let ref_kind = if is_mut { "&mut" } else { "&" };
(
format!(
- "has type `{}` which {}, because `{}` is not `{}`",
- target_ty, trait_explanation, ref_ty, ref_ty_trait
+ "has type `{target_ty}` which {trait_explanation}, because `{ref_ty}` is not `{ref_ty_trait}`"
),
format!(
- "captured value {} because `{}` references cannot be sent unless their referent is `{}`",
- trait_explanation, ref_kind, ref_ty_trait
+ "captured value {trait_explanation} because `{ref_kind}` references cannot be sent unless their referent is `{ref_ty_trait}`"
),
)
}
None => (
- format!("has type `{}` which {}", target_ty, trait_explanation),
- format!("captured value {}", trait_explanation),
+ format!("has type `{target_ty}` which {trait_explanation}"),
+ format!("captured value {trait_explanation}"),
),
};
@@ -2655,7 +2700,7 @@ impl<'tcx> TypeErrCtxtExt<'tcx> for TypeErrCtxt<'_, 'tcx> {
| ObligationCauseCode::MatchImpl(..)
| ObligationCauseCode::ReturnType
| ObligationCauseCode::ReturnValue(_)
- | ObligationCauseCode::BlockTailExpression(_)
+ | ObligationCauseCode::BlockTailExpression(..)
| ObligationCauseCode::AwaitableExpr(_)
| ObligationCauseCode::ForLoopIterator
| ObligationCauseCode::QuestionMark
@@ -2688,8 +2733,7 @@ impl<'tcx> TypeErrCtxtExt<'tcx> for TypeErrCtxt<'_, 'tcx> {
}
ObligationCauseCode::ObjectTypeBound(object_ty, region) => {
err.note(format!(
- "required so that the lifetime bound of `{}` for `{}` is satisfied",
- region, object_ty,
+ "required so that the lifetime bound of `{region}` for `{object_ty}` is satisfied",
));
}
ObligationCauseCode::ItemObligation(_)
@@ -2743,7 +2787,7 @@ impl<'tcx> TypeErrCtxtExt<'tcx> for TypeErrCtxt<'_, 'tcx> {
// implement this trait and list them.
err.note(format!(
"`{short_item_name}` is a \"sealed trait\", because to implement \
- it you also need to implelement `{}`, which is not accessible; \
+ it you also need to implement `{}`, which is not accessible; \
this is usually done to force you to use one of the provided \
types that already implement it",
with_no_trimmed_paths!(tcx.def_path_str(def_id)),
@@ -2808,7 +2852,7 @@ impl<'tcx> TypeErrCtxtExt<'tcx> for TypeErrCtxt<'_, 'tcx> {
err.note("all local variables must have a statically known size");
}
Some(Node::Local(hir::Local {
- init: Some(hir::Expr { kind: hir::ExprKind::Index(_, _), span, .. }),
+ init: Some(hir::Expr { kind: hir::ExprKind::Index(..), span, .. }),
..
})) => {
// When encountering an assignment of an unsized trait, like
@@ -3009,11 +3053,11 @@ impl<'tcx> TypeErrCtxtExt<'tcx> for TypeErrCtxt<'_, 'tcx> {
let mut msg =
"required because it captures the following types: ".to_owned();
for ty in bound_tys.skip_binder() {
- with_forced_trimmed_paths!(write!(msg, "`{}`, ", ty).unwrap());
+ with_forced_trimmed_paths!(write!(msg, "`{ty}`, ").unwrap());
}
err.note(msg.trim_end_matches(", ").to_string())
}
- ty::GeneratorWitnessMIR(def_id, substs) => {
+ ty::GeneratorWitnessMIR(def_id, args) => {
use std::fmt::Write;
// FIXME: this is kind of an unusual format for rustc, can we make it more clear?
@@ -3022,8 +3066,8 @@ impl<'tcx> TypeErrCtxtExt<'tcx> for TypeErrCtxt<'_, 'tcx> {
let mut msg =
"required because it captures the following types: ".to_owned();
for bty in tcx.generator_hidden_types(*def_id) {
- let ty = bty.subst(tcx, substs);
- write!(msg, "`{}`, ", ty).unwrap();
+ let ty = bty.instantiate(tcx, args);
+ write!(msg, "`{ty}`, ").unwrap();
}
err.note(msg.trim_end_matches(", ").to_string())
}
@@ -3082,7 +3126,6 @@ impl<'tcx> TypeErrCtxtExt<'tcx> for TypeErrCtxt<'_, 'tcx> {
ObligationCauseCode::ImplDerivedObligation(ref data) => {
let mut parent_trait_pred =
self.resolve_vars_if_possible(data.derived.parent_trait_pred);
- parent_trait_pred.remap_constness_diag(param_env);
let parent_def_id = parent_trait_pred.def_id();
let (self_ty, file) =
self.tcx.short_ty_string(parent_trait_pred.skip_binder().self_ty());
@@ -3350,7 +3393,7 @@ impl<'tcx> TypeErrCtxtExt<'tcx> for TypeErrCtxt<'_, 'tcx> {
Ty::new_projection(
self.tcx,
item_def_id,
- // Future::Output has no substs
+ // Future::Output has no args
[trait_pred.self_ty()],
)
});
@@ -3391,7 +3434,7 @@ impl<'tcx> TypeErrCtxtExt<'tcx> for TypeErrCtxt<'_, 'tcx> {
_ => return,
};
if let ty::Float(_) = trait_ref.skip_binder().self_ty().kind()
- && let ty::Infer(InferTy::IntVar(_)) = trait_ref.skip_binder().substs.type_at(1).kind()
+ && let ty::Infer(InferTy::IntVar(_)) = trait_ref.skip_binder().args.type_at(1).kind()
{
err.span_suggestion_verbose(
rhs_span.shrink_to_hi(),
@@ -3411,15 +3454,15 @@ impl<'tcx> TypeErrCtxtExt<'tcx> for TypeErrCtxt<'_, 'tcx> {
let Some(diagnostic_name) = self.tcx.get_diagnostic_name(trait_pred.def_id()) else {
return;
};
- let (adt, substs) = match trait_pred.skip_binder().self_ty().kind() {
- ty::Adt(adt, substs) if adt.did().is_local() => (adt, substs),
+ let (adt, args) = match trait_pred.skip_binder().self_ty().kind() {
+ ty::Adt(adt, args) if adt.did().is_local() => (adt, args),
_ => return,
};
let can_derive = {
let is_derivable_trait = match diagnostic_name {
sym::Default => !adt.is_enum(),
sym::PartialEq | sym::PartialOrd => {
- let rhs_ty = trait_pred.skip_binder().trait_ref.substs.type_at(1);
+ let rhs_ty = trait_pred.skip_binder().trait_ref.args.type_at(1);
trait_pred.skip_binder().self_ty() == rhs_ty
}
sym::Eq | sym::Ord | sym::Clone | sym::Copy | sym::Hash | sym::Debug => true,
@@ -3428,8 +3471,8 @@ impl<'tcx> TypeErrCtxtExt<'tcx> for TypeErrCtxt<'_, 'tcx> {
is_derivable_trait &&
// Ensure all fields impl the trait.
adt.all_fields().all(|field| {
- let field_ty = field.ty(self.tcx, substs);
- let trait_substs = match diagnostic_name {
+ let field_ty = field.ty(self.tcx, args);
+ let trait_args = match diagnostic_name {
sym::PartialEq | sym::PartialOrd => {
Some(field_ty)
}
@@ -3438,7 +3481,7 @@ impl<'tcx> TypeErrCtxtExt<'tcx> for TypeErrCtxt<'_, 'tcx> {
let trait_pred = trait_pred.map_bound_ref(|tr| ty::TraitPredicate {
trait_ref: ty::TraitRef::new(self.tcx,
trait_pred.def_id(),
- [field_ty].into_iter().chain(trait_substs),
+ [field_ty].into_iter().chain(trait_args),
),
..*tr
});
@@ -3459,7 +3502,7 @@ impl<'tcx> TypeErrCtxtExt<'tcx> for TypeErrCtxt<'_, 'tcx> {
trait_pred.skip_binder().self_ty(),
diagnostic_name,
),
- format!("#[derive({})]\n", diagnostic_name),
+ format!("#[derive({diagnostic_name})]\n"),
Applicability::MaybeIncorrect,
);
}
@@ -3473,7 +3516,7 @@ impl<'tcx> TypeErrCtxtExt<'tcx> for TypeErrCtxt<'_, 'tcx> {
) {
if let ObligationCauseCode::ImplDerivedObligation(_) = obligation.cause.code()
&& self.tcx.is_diagnostic_item(sym::SliceIndex, trait_pred.skip_binder().trait_ref.def_id)
- && let ty::Slice(_) = trait_pred.skip_binder().trait_ref.substs.type_at(1).kind()
+ && let ty::Slice(_) = trait_pred.skip_binder().trait_ref.args.type_at(1).kind()
&& let ty::Ref(_, inner_ty, _) = trait_pred.skip_binder().self_ty().kind()
&& let ty::Uint(ty::UintTy::Usize) = inner_ty.kind()
{
@@ -3522,9 +3565,9 @@ impl<'tcx> TypeErrCtxtExt<'tcx> for TypeErrCtxt<'_, 'tcx> {
// to an associated type (as seen from `trait_pred`) in the predicate. Like in
// trait_pred `S: Sum<<Self as Iterator>::Item>` and predicate `i32: Sum<&()>`
let mut type_diffs = vec![];
- if let ObligationCauseCode::ExprBindingObligation(def_id, _, _, idx) = parent_code.deref()
- && let Some(node_substs) = typeck_results.node_substs_opt(call_hir_id)
- && let where_clauses = self.tcx.predicates_of(def_id).instantiate(self.tcx, node_substs)
+ if let ObligationCauseCode::ExprBindingObligation(def_id, _, _, idx) = parent_code
+ && let Some(node_args) = typeck_results.node_args_opt(call_hir_id)
+ && let where_clauses = self.tcx.predicates_of(def_id).instantiate(self.tcx, node_args)
&& let Some(where_pred) = where_clauses.predicates.get(*idx)
{
if let Some(where_pred) = where_pred.as_trait_clause()
@@ -3538,7 +3581,7 @@ impl<'tcx> TypeErrCtxtExt<'tcx> for TypeErrCtxt<'_, 'tcx> {
);
let zipped =
- iter::zip(where_pred.trait_ref.substs, failed_pred.trait_ref.substs);
+ iter::zip(where_pred.trait_ref.args, failed_pred.trait_ref.args);
for (expected, actual) in zipped {
self.probe(|_| {
match self
@@ -3617,7 +3660,9 @@ impl<'tcx> TypeErrCtxtExt<'tcx> for TypeErrCtxt<'_, 'tcx> {
let Some(typeck_results) = self.typeck_results.as_ref() else { return };
// Make sure we're dealing with the `Option` type.
- let Some(option_ty_adt) = typeck_results.expr_ty_adjusted(expr).ty_adt_def() else { return };
+ let Some(option_ty_adt) = typeck_results.expr_ty_adjusted(expr).ty_adt_def() else {
+ return;
+ };
if !tcx.is_diagnostic_item(sym::Option, option_ty_adt.did()) {
return;
}
@@ -3627,7 +3672,7 @@ impl<'tcx> TypeErrCtxtExt<'tcx> for TypeErrCtxt<'_, 'tcx> {
if let ty::PredicateKind::Clause(ty::ClauseKind::Trait(ty::TraitPredicate { trait_ref, .. }))
= failed_pred.kind().skip_binder()
&& tcx.is_fn_trait(trait_ref.def_id)
- && let [self_ty, found_ty] = trait_ref.substs.as_slice()
+ && let [self_ty, found_ty] = trait_ref.args.as_slice()
&& let Some(fn_ty) = self_ty.as_type().filter(|ty| ty.is_fn())
&& let fn_sig @ ty::FnSig {
abi: abi::Abi::Rust,
@@ -3647,7 +3692,7 @@ impl<'tcx> TypeErrCtxtExt<'tcx> for TypeErrCtxt<'_, 'tcx> {
// Extract `<U as Deref>::Target` assoc type and check that it is `T`
&& let Some(deref_target_did) = tcx.lang_items().deref_target()
- && let projection = Ty::new_projection(tcx,deref_target_did, tcx.mk_substs(&[ty::GenericArg::from(found_ty)]))
+ && let projection = Ty::new_projection(tcx,deref_target_did, tcx.mk_args(&[ty::GenericArg::from(found_ty)]))
&& let InferOk { value: deref_target, obligations } = infcx.at(&ObligationCause::dummy(), param_env).normalize(projection)
&& obligations.iter().all(|obligation| infcx.predicate_must_hold_modulo_regions(obligation))
&& infcx.can_eq(param_env, deref_target, target_ty)
@@ -3749,11 +3794,17 @@ impl<'tcx> TypeErrCtxtExt<'tcx> for TypeErrCtxt<'_, 'tcx> {
while let Some(assocs_in_method) = assocs.next() {
let Some(prev_assoc_in_method) = assocs.peek() else {
for entry in assocs_in_method {
- let Some((span, (assoc, ty))) = entry else { continue; };
- if primary_spans.is_empty() || type_diffs.iter().any(|diff| {
- let Sorts(expected_found) = diff else { return false; };
- self.can_eq(param_env, expected_found.found, ty)
- }) {
+ let Some((span, (assoc, ty))) = entry else {
+ continue;
+ };
+ if primary_spans.is_empty()
+ || type_diffs.iter().any(|diff| {
+ let Sorts(expected_found) = diff else {
+ return false;
+ };
+ self.can_eq(param_env, expected_found.found, ty)
+ })
+ {
// FIXME: this doesn't quite work for `Iterator::collect`
// because we have `Vec<i32>` and `()`, but we'd want `i32`
// to point at the `.into_iter()` call, but as long as we
@@ -3781,7 +3832,9 @@ impl<'tcx> TypeErrCtxtExt<'tcx> for TypeErrCtxt<'_, 'tcx> {
let assoc = with_forced_trimmed_paths!(self.tcx.def_path_str(assoc));
if !self.can_eq(param_env, ty, *prev_ty) {
if type_diffs.iter().any(|diff| {
- let Sorts(expected_found) = diff else { return false; };
+ let Sorts(expected_found) = diff else {
+ return false;
+ };
self.can_eq(param_env, expected_found.found, ty)
}) {
primary_spans.push(span);
@@ -3829,15 +3882,19 @@ impl<'tcx> TypeErrCtxtExt<'tcx> for TypeErrCtxt<'_, 'tcx> {
let ocx = ObligationCtxt::new(self.infcx);
let mut assocs_in_this_method = Vec::with_capacity(type_diffs.len());
for diff in type_diffs {
- let Sorts(expected_found) = diff else { continue; };
- let ty::Alias(ty::Projection, proj) = expected_found.expected.kind() else { continue; };
+ let Sorts(expected_found) = diff else {
+ continue;
+ };
+ let ty::Alias(ty::Projection, proj) = expected_found.expected.kind() else {
+ continue;
+ };
let origin = TypeVariableOrigin { kind: TypeVariableOriginKind::TypeInference, span };
let trait_def_id = proj.trait_def_id(self.tcx);
// Make `Self` be equivalent to the type of the call chain
// expression we're looking at now, so that we can tell what
// for example `Iterator::Item` is at this point in the chain.
- let substs = InternalSubsts::for_item(self.tcx, trait_def_id, |param, _| {
+ let args = GenericArgs::for_item(self.tcx, trait_def_id, |param, _| {
match param.kind {
ty::GenericParamDefKind::Type { .. } => {
if param.index == 0 {
@@ -3855,7 +3912,7 @@ impl<'tcx> TypeErrCtxtExt<'tcx> for TypeErrCtxt<'_, 'tcx> {
// This corresponds to `<ExprTy as Iterator>::Item = _`.
let projection = ty::Binder::dummy(ty::PredicateKind::Clause(
ty::ClauseKind::Projection(ty::ProjectionPredicate {
- projection_ty: self.tcx.mk_alias_ty(proj.def_id, substs),
+ projection_ty: self.tcx.mk_alias_ty(proj.def_id, args),
term: ty_var.into(),
}),
));
@@ -3886,13 +3943,22 @@ impl<'tcx> TypeErrCtxtExt<'tcx> for TypeErrCtxt<'_, 'tcx> {
/// If the type that failed selection is an array or a reference to an array,
/// but the trait is implemented for slices, suggest that the user converts
/// the array into a slice.
- fn maybe_suggest_convert_to_slice(
+ fn suggest_convert_to_slice(
&self,
err: &mut Diagnostic,
+ obligation: &PredicateObligation<'tcx>,
trait_ref: ty::PolyTraitRef<'tcx>,
candidate_impls: &[ImplCandidate<'tcx>],
span: Span,
) {
+ // We can only suggest the slice coersion for function and binary operation arguments,
+ // since the suggestion would make no sense in turbofish or call
+ let (ObligationCauseCode::BinOp { .. }
+ | ObligationCauseCode::FunctionArgumentObligation { .. }) = obligation.cause.code()
+ else {
+ return;
+ };
+
// Three cases where we can make a suggestion:
// 1. `[T; _]` (array of T)
// 2. `&[T; _]` (reference to array of T)
@@ -3931,7 +3997,7 @@ impl<'tcx> TypeErrCtxtExt<'tcx> for TypeErrCtxt<'_, 'tcx> {
.map(|trait_ref| trait_ref.trait_ref.self_ty())
.find(|t| is_slice(*t))
{
- let msg = format!("convert the array to a `{}` slice instead", slice_ty);
+ let msg = format!("convert the array to a `{slice_ty}` slice instead");
if let Ok(snippet) = self.tcx.sess.source_map().span_to_snippet(span) {
let mut suggestions = vec![];
@@ -3960,6 +4026,10 @@ fn hint_missing_borrow<'tcx>(
found_node: Node<'_>,
err: &mut Diagnostic,
) {
+ if matches!(found_node, Node::TraitItem(..)) {
+ return;
+ }
+
let found_args = match found.kind() {
ty::FnPtr(f) => infcx.instantiate_binder_with_placeholders(*f).inputs().iter(),
kind => {
@@ -3974,7 +4044,9 @@ fn hint_missing_borrow<'tcx>(
};
// This could be a variant constructor, for example.
- let Some(fn_decl) = found_node.fn_decl() else { return; };
+ let Some(fn_decl) = found_node.fn_decl() else {
+ return;
+ };
let args = fn_decl.inputs.iter();
@@ -4029,19 +4101,11 @@ fn hint_missing_borrow<'tcx>(
}
if !to_borrow.is_empty() {
- err.multipart_suggestion_verbose(
- "consider borrowing the argument",
- to_borrow,
- Applicability::MaybeIncorrect,
- );
+ err.subdiagnostic(errors::AdjustSignatureBorrow::Borrow { to_borrow });
}
if !remove_borrow.is_empty() {
- err.multipart_suggestion_verbose(
- "do not borrow the argument",
- remove_borrow,
- Applicability::MaybeIncorrect,
- );
+ err.subdiagnostic(errors::AdjustSignatureBorrow::RemoveBorrow { remove_borrow });
}
}
diff --git a/compiler/rustc_trait_selection/src/traits/fulfill.rs b/compiler/rustc_trait_selection/src/traits/fulfill.rs
index cf9d9315f..3ebf1246a 100644
--- a/compiler/rustc_trait_selection/src/traits/fulfill.rs
+++ b/compiler/rustc_trait_selection/src/traits/fulfill.rs
@@ -8,7 +8,7 @@ use rustc_infer::traits::{PolyTraitObligation, SelectionError, TraitEngine};
use rustc_middle::mir::interpret::ErrorHandled;
use rustc_middle::ty::abstract_const::NotConstEvaluatable;
use rustc_middle::ty::error::{ExpectedFound, TypeError};
-use rustc_middle::ty::subst::SubstsRef;
+use rustc_middle::ty::GenericArgsRef;
use rustc_middle::ty::{self, Binder, Const, TypeVisitableExt};
use std::marker::PhantomData;
@@ -410,8 +410,8 @@ impl<'a, 'tcx> ObligationProcessor for FulfillProcessor<'a, 'tcx> {
}
}
- ty::PredicateKind::ClosureKind(_, closure_substs, kind) => {
- match self.selcx.infcx.closure_kind(closure_substs) {
+ ty::PredicateKind::ClosureKind(_, closure_args, kind) => {
+ match self.selcx.infcx.closure_kind(closure_args) {
Some(closure_kind) => {
if closure_kind.extends(kind) {
ProcessResult::Changed(vec![])
@@ -536,7 +536,7 @@ impl<'a, 'tcx> ObligationProcessor for FulfillProcessor<'a, 'tcx> {
if let Ok(new_obligations) = infcx
.at(&obligation.cause, obligation.param_env)
.trace(c1, c2)
- .eq(DefineOpaqueTypes::No, a.substs, b.substs)
+ .eq(DefineOpaqueTypes::No, a.args, b.args)
{
return ProcessResult::Changed(mk_pending(
new_obligations.into_obligations(),
@@ -559,31 +559,30 @@ impl<'a, 'tcx> ObligationProcessor for FulfillProcessor<'a, 'tcx> {
let stalled_on = &mut pending_obligation.stalled_on;
- let mut evaluate = |c: Const<'tcx>| {
- if let ty::ConstKind::Unevaluated(unevaluated) = c.kind() {
- match self.selcx.infcx.try_const_eval_resolve(
- obligation.param_env,
- unevaluated,
- c.ty(),
- Some(obligation.cause.span),
- ) {
- Ok(val) => Ok(val),
- Err(e) => match e {
- ErrorHandled::TooGeneric => {
- stalled_on.extend(
- unevaluated.substs.iter().filter_map(
+ let mut evaluate =
+ |c: Const<'tcx>| {
+ if let ty::ConstKind::Unevaluated(unevaluated) = c.kind() {
+ match self.selcx.infcx.try_const_eval_resolve(
+ obligation.param_env,
+ unevaluated,
+ c.ty(),
+ Some(obligation.cause.span),
+ ) {
+ Ok(val) => Ok(val),
+ Err(e) => match e {
+ ErrorHandled::TooGeneric => {
+ stalled_on.extend(unevaluated.args.iter().filter_map(
TyOrConstInferVar::maybe_from_generic_arg,
- ),
- );
- Err(ErrorHandled::TooGeneric)
- }
- _ => Err(e),
- },
+ ));
+ Err(ErrorHandled::TooGeneric)
+ }
+ _ => Err(e),
+ },
+ }
+ } else {
+ Ok(c)
}
- } else {
- Ok(c)
- }
- };
+ };
match (evaluate(c1), evaluate(c2)) {
(Ok(c1), Ok(c2)) => {
@@ -671,7 +670,7 @@ impl<'a, 'tcx> FulfillProcessor<'a, 'tcx> {
stalled_on: &mut Vec<TyOrConstInferVar<'tcx>>,
) -> ProcessResult<PendingPredicateObligation<'tcx>, FulfillmentErrorCode<'tcx>> {
let infcx = self.selcx.infcx;
- if obligation.predicate.is_global() {
+ if obligation.predicate.is_global() && !self.selcx.is_intercrate() {
// no type variables present, can use evaluation for better caching.
// FIXME: consider caching errors too.
if infcx.predicate_must_hold_considering_regions(obligation) {
@@ -696,9 +695,9 @@ impl<'a, 'tcx> FulfillProcessor<'a, 'tcx> {
// trait selection is because we don't have enough
// information about the types in the trait.
stalled_on.clear();
- stalled_on.extend(substs_infer_vars(
+ stalled_on.extend(args_infer_vars(
&self.selcx,
- trait_obligation.predicate.map_bound(|pred| pred.trait_ref.substs),
+ trait_obligation.predicate.map_bound(|pred| pred.trait_ref.args),
));
debug!(
@@ -725,7 +724,7 @@ impl<'a, 'tcx> FulfillProcessor<'a, 'tcx> {
) -> ProcessResult<PendingPredicateObligation<'tcx>, FulfillmentErrorCode<'tcx>> {
let tcx = self.selcx.tcx();
- if obligation.predicate.is_global() {
+ if obligation.predicate.is_global() && !self.selcx.is_intercrate() {
// no type variables present, can use evaluation for better caching.
// FIXME: consider caching errors too.
if self.selcx.infcx.predicate_must_hold_considering_regions(obligation) {
@@ -753,9 +752,9 @@ impl<'a, 'tcx> FulfillProcessor<'a, 'tcx> {
ProjectAndUnifyResult::Holds(os) => ProcessResult::Changed(mk_pending(os)),
ProjectAndUnifyResult::FailedNormalization => {
stalled_on.clear();
- stalled_on.extend(substs_infer_vars(
+ stalled_on.extend(args_infer_vars(
&self.selcx,
- project_obligation.predicate.map_bound(|pred| pred.projection_ty.substs),
+ project_obligation.predicate.map_bound(|pred| pred.projection_ty.args),
));
ProcessResult::Unchanged
}
@@ -770,14 +769,14 @@ impl<'a, 'tcx> FulfillProcessor<'a, 'tcx> {
}
}
-/// Returns the set of inference variables contained in `substs`.
-fn substs_infer_vars<'a, 'tcx>(
+/// Returns the set of inference variables contained in `args`.
+fn args_infer_vars<'a, 'tcx>(
selcx: &SelectionContext<'a, 'tcx>,
- substs: ty::Binder<'tcx, SubstsRef<'tcx>>,
+ args: ty::Binder<'tcx, GenericArgsRef<'tcx>>,
) -> impl Iterator<Item = TyOrConstInferVar<'tcx>> {
selcx
.infcx
- .resolve_vars_if_possible(substs)
+ .resolve_vars_if_possible(args)
.skip_binder() // ok because this check doesn't care about regions
.iter()
.filter(|arg| arg.has_non_region_infer())
diff --git a/compiler/rustc_trait_selection/src/traits/misc.rs b/compiler/rustc_trait_selection/src/traits/misc.rs
index e9cfd63e2..ab07b10c6 100644
--- a/compiler/rustc_trait_selection/src/traits/misc.rs
+++ b/compiler/rustc_trait_selection/src/traits/misc.rs
@@ -43,7 +43,7 @@ pub fn type_allowed_to_implement_copy<'tcx>(
self_type: Ty<'tcx>,
parent_cause: ObligationCause<'tcx>,
) -> Result<(), CopyImplementationError<'tcx>> {
- let (adt, substs) = match self_type.kind() {
+ let (adt, args) = match self_type.kind() {
// These types used to have a builtin impl.
// Now libcore provides that impl.
ty::Uint(_)
@@ -56,7 +56,7 @@ pub fn type_allowed_to_implement_copy<'tcx>(
| ty::Ref(_, _, hir::Mutability::Not)
| ty::Array(..) => return Ok(()),
- &ty::Adt(adt, substs) => (adt, substs),
+ &ty::Adt(adt, args) => (adt, args),
_ => return Err(CopyImplementationError::NotAnAdt),
};
@@ -66,7 +66,7 @@ pub fn type_allowed_to_implement_copy<'tcx>(
param_env,
self_type,
adt,
- substs,
+ args,
parent_cause,
hir::LangItem::Copy,
)
@@ -91,7 +91,7 @@ pub fn type_allowed_to_implement_const_param_ty<'tcx>(
self_type: Ty<'tcx>,
parent_cause: ObligationCause<'tcx>,
) -> Result<(), ConstParamTyImplementationError<'tcx>> {
- let (adt, substs) = match self_type.kind() {
+ let (adt, args) = match self_type.kind() {
// `core` provides these impls.
ty::Uint(_)
| ty::Int(_)
@@ -103,7 +103,7 @@ pub fn type_allowed_to_implement_const_param_ty<'tcx>(
| ty::Ref(.., hir::Mutability::Not)
| ty::Tuple(_) => return Ok(()),
- &ty::Adt(adt, substs) => (adt, substs),
+ &ty::Adt(adt, args) => (adt, args),
_ => return Err(ConstParamTyImplementationError::NotAnAdtOrBuiltinAllowed),
};
@@ -113,7 +113,7 @@ pub fn type_allowed_to_implement_const_param_ty<'tcx>(
param_env,
self_type,
adt,
- substs,
+ args,
parent_cause,
hir::LangItem::ConstParamTy,
)
@@ -128,7 +128,7 @@ pub fn all_fields_implement_trait<'tcx>(
param_env: ty::ParamEnv<'tcx>,
self_type: Ty<'tcx>,
adt: AdtDef<'tcx>,
- substs: &'tcx List<GenericArg<'tcx>>,
+ args: &'tcx List<GenericArg<'tcx>>,
parent_cause: ObligationCause<'tcx>,
lang_item: LangItem,
) -> Result<(), Vec<(&'tcx ty::FieldDef, Ty<'tcx>, InfringingFieldsReason<'tcx>)>> {
@@ -141,7 +141,7 @@ pub fn all_fields_implement_trait<'tcx>(
let infcx = tcx.infer_ctxt().build();
let ocx = traits::ObligationCtxt::new(&infcx);
- let unnormalized_ty = field.ty(tcx, substs);
+ let unnormalized_ty = field.ty(tcx, args);
if unnormalized_ty.references_error() {
continue;
}
@@ -154,11 +154,11 @@ pub fn all_fields_implement_trait<'tcx>(
// FIXME(compiler-errors): This gives us better spans for bad
// projection types like in issue-50480.
- // If the ADT has substs, point to the cause we are given.
+ // If the ADT has args, point to the cause we are given.
// If it does not, then this field probably doesn't normalize
// to begin with, and point to the bad field's span instead.
let normalization_cause = if field
- .ty(tcx, traits::InternalSubsts::identity_for_item(tcx, adt.did()))
+ .ty(tcx, traits::GenericArgs::identity_for_item(tcx, adt.did()))
.has_non_region_param()
{
parent_cause.clone()
diff --git a/compiler/rustc_trait_selection/src/traits/mod.rs b/compiler/rustc_trait_selection/src/traits/mod.rs
index 1af8323b6..d2210c6d5 100644
--- a/compiler/rustc_trait_selection/src/traits/mod.rs
+++ b/compiler/rustc_trait_selection/src/traits/mod.rs
@@ -13,12 +13,12 @@ mod object_safety;
pub mod outlives_bounds;
pub mod project;
pub mod query;
-#[cfg_attr(not(bootstrap), allow(hidden_glob_reexports))]
+#[allow(hidden_glob_reexports)]
mod select;
mod specialize;
mod structural_match;
mod structural_normalize;
-#[cfg_attr(not(bootstrap), allow(hidden_glob_reexports))]
+#[allow(hidden_glob_reexports)]
mod util;
pub mod vtable;
pub mod wf;
@@ -32,7 +32,7 @@ use rustc_middle::query::Providers;
use rustc_middle::ty::fold::TypeFoldable;
use rustc_middle::ty::visit::{TypeVisitable, TypeVisitableExt};
use rustc_middle::ty::{self, ToPredicate, Ty, TyCtxt, TypeFolder, TypeSuperVisitable};
-use rustc_middle::ty::{InternalSubsts, SubstsRef};
+use rustc_middle::ty::{GenericArgs, GenericArgsRef};
use rustc_span::def_id::DefId;
use rustc_span::Span;
@@ -61,13 +61,13 @@ pub use self::select::{EvaluationResult, IntercrateAmbiguityCause, OverflowError
pub use self::specialize::specialization_graph::FutureCompatOverlapError;
pub use self::specialize::specialization_graph::FutureCompatOverlapErrorKind;
pub use self::specialize::{
- specialization_graph, translate_substs, translate_substs_with_cause, OverlapError,
+ specialization_graph, translate_args, translate_args_with_cause, OverlapError,
};
pub use self::structural_match::search_for_structural_match_violation;
pub use self::structural_normalize::StructurallyNormalizeExt;
pub use self::util::elaborate;
pub use self::util::{
- check_substs_compatible, supertrait_def_ids, supertraits, transitive_bounds,
+ check_args_compatible, supertrait_def_ids, supertraits, transitive_bounds,
transitive_bounds_that_define_assoc_item, SupertraitDefIds,
};
pub use self::util::{expand_trait_aliases, TraitAliasExpander};
@@ -133,7 +133,7 @@ pub fn type_known_to_meet_bound_modulo_regions<'tcx>(
def_id: DefId,
) -> bool {
let trait_ref = ty::TraitRef::new(infcx.tcx, def_id, [ty]);
- pred_known_to_hold_modulo_regions(infcx, param_env, trait_ref.without_const())
+ pred_known_to_hold_modulo_regions(infcx, param_env, trait_ref)
}
/// FIXME(@lcnr): this function doesn't seem right and shouldn't exist?
@@ -328,11 +328,7 @@ pub fn normalize_param_env_or_error<'tcx>(
debug!("normalize_param_env_or_error: elaborated-predicates={:?}", predicates);
- let elaborated_env = ty::ParamEnv::new(
- tcx.mk_clauses(&predicates),
- unnormalized_env.reveal(),
- unnormalized_env.constness(),
- );
+ let elaborated_env = ty::ParamEnv::new(tcx.mk_clauses(&predicates), unnormalized_env.reveal());
// HACK: we are trying to normalize the param-env inside *itself*. The problem is that
// normalization expects its param-env to be already normalized, which means we have
@@ -362,12 +358,9 @@ pub fn normalize_param_env_or_error<'tcx>(
"normalize_param_env_or_error: predicates=(non-outlives={:?}, outlives={:?})",
predicates, outlives_predicates
);
- let Ok(non_outlives_predicates) = do_normalize_predicates(
- tcx,
- cause.clone(),
- elaborated_env,
- predicates,
- ) else {
+ let Ok(non_outlives_predicates) =
+ do_normalize_predicates(tcx, cause.clone(), elaborated_env, predicates)
+ else {
// An unnormalized env is better than nothing.
debug!("normalize_param_env_or_error: errored resolving non-outlives predicates");
return elaborated_env;
@@ -379,17 +372,11 @@ pub fn normalize_param_env_or_error<'tcx>(
// here. I believe they should not matter, because we are ignoring TypeOutlives param-env
// predicates here anyway. Keeping them here anyway because it seems safer.
let outlives_env = non_outlives_predicates.iter().chain(&outlives_predicates).cloned();
- let outlives_env = ty::ParamEnv::new(
- tcx.mk_clauses_from_iter(outlives_env),
- unnormalized_env.reveal(),
- unnormalized_env.constness(),
- );
- let Ok(outlives_predicates) = do_normalize_predicates(
- tcx,
- cause,
- outlives_env,
- outlives_predicates,
- ) else {
+ let outlives_env =
+ ty::ParamEnv::new(tcx.mk_clauses_from_iter(outlives_env), unnormalized_env.reveal());
+ let Ok(outlives_predicates) =
+ do_normalize_predicates(tcx, cause, outlives_env, outlives_predicates)
+ else {
// An unnormalized env is better than nothing.
debug!("normalize_param_env_or_error: errored resolving outlives predicates");
return elaborated_env;
@@ -399,11 +386,7 @@ pub fn normalize_param_env_or_error<'tcx>(
let mut predicates = non_outlives_predicates;
predicates.extend(outlives_predicates);
debug!("normalize_param_env_or_error: final predicates={:?}", predicates);
- ty::ParamEnv::new(
- tcx.mk_clauses(&predicates),
- unnormalized_env.reveal(),
- unnormalized_env.constness(),
- )
+ ty::ParamEnv::new(tcx.mk_clauses(&predicates), unnormalized_env.reveal())
}
/// Normalize a type and process all resulting obligations, returning any errors.
@@ -460,7 +443,7 @@ pub fn impossible_predicates<'tcx>(tcx: TyCtxt<'tcx>, predicates: Vec<ty::Clause
fn subst_and_check_impossible_predicates<'tcx>(
tcx: TyCtxt<'tcx>,
- key: (DefId, SubstsRef<'tcx>),
+ key: (DefId, GenericArgsRef<'tcx>),
) -> bool {
debug!("subst_and_check_impossible_predicates(key={:?})", key);
@@ -480,11 +463,14 @@ fn subst_and_check_impossible_predicates<'tcx>(
result
}
-/// Checks whether a trait's method is impossible to call on a given impl.
+/// Checks whether a trait's associated item is impossible to reference on a given impl.
///
/// This only considers predicates that reference the impl's generics, and not
/// those that reference the method's generics.
-fn is_impossible_method(tcx: TyCtxt<'_>, (impl_def_id, trait_item_def_id): (DefId, DefId)) -> bool {
+fn is_impossible_associated_item(
+ tcx: TyCtxt<'_>,
+ (impl_def_id, trait_item_def_id): (DefId, DefId),
+) -> bool {
struct ReferencesOnlyParentGenerics<'tcx> {
tcx: TyCtxt<'tcx>,
generics: &'tcx ty::Generics,
@@ -527,7 +513,7 @@ fn is_impossible_method(tcx: TyCtxt<'_>, (impl_def_id, trait_item_def_id): (DefI
let impl_trait_ref = tcx
.impl_trait_ref(impl_def_id)
.expect("expected impl to correspond to trait")
- .subst_identity();
+ .instantiate_identity();
let param_env = tcx.param_env(impl_def_id);
let mut visitor = ReferencesOnlyParentGenerics { tcx, generics, trait_item_def_id };
@@ -537,7 +523,7 @@ fn is_impossible_method(tcx: TyCtxt<'_>, (impl_def_id, trait_item_def_id): (DefI
tcx,
ObligationCause::dummy_with_span(*span),
param_env,
- ty::EarlyBinder::bind(*pred).subst(tcx, impl_trait_ref.substs),
+ ty::EarlyBinder::bind(*pred).instantiate(tcx, impl_trait_ref.args),
)
})
});
@@ -562,7 +548,7 @@ pub fn provide(providers: &mut Providers) {
specializes: specialize::specializes,
subst_and_check_impossible_predicates,
check_tys_might_be_eq: misc::check_tys_might_be_eq,
- is_impossible_method,
+ is_impossible_associated_item,
..*providers
};
}
diff --git a/compiler/rustc_trait_selection/src/traits/object_safety.rs b/compiler/rustc_trait_selection/src/traits/object_safety.rs
index c31944c16..5823b4508 100644
--- a/compiler/rustc_trait_selection/src/traits/object_safety.rs
+++ b/compiler/rustc_trait_selection/src/traits/object_safety.rs
@@ -17,10 +17,10 @@ use rustc_errors::{DelayDm, FatalError, MultiSpan};
use rustc_hir as hir;
use rustc_hir::def_id::DefId;
use rustc_middle::query::Providers;
-use rustc_middle::ty::subst::{GenericArg, InternalSubsts};
use rustc_middle::ty::{
self, EarlyBinder, Ty, TyCtxt, TypeSuperVisitable, TypeVisitable, TypeVisitor,
};
+use rustc_middle::ty::{GenericArg, GenericArgs};
use rustc_middle::ty::{ToPredicate, TypeVisitableExt};
use rustc_session::lint::builtin::WHERE_CLAUSES_OBJECT_SAFETY;
use rustc_span::symbol::Symbol;
@@ -270,7 +270,7 @@ fn bounds_reference_self(tcx: TyCtxt<'_>, trait_def_id: DefId) -> SmallVec<[Span
tcx.associated_items(trait_def_id)
.in_definition_order()
.filter(|item| item.kind == ty::AssocKind::Type)
- .flat_map(|item| tcx.explicit_item_bounds(item.def_id).subst_identity_iter_copied())
+ .flat_map(|item| tcx.explicit_item_bounds(item.def_id).instantiate_identity_iter_copied())
.filter_map(|c| predicate_references_self(tcx, c))
.collect()
}
@@ -284,7 +284,7 @@ fn predicate_references_self<'tcx>(
match predicate.kind().skip_binder() {
ty::ClauseKind::Trait(ref data) => {
// In the case of a trait predicate, we can skip the "self" type.
- data.trait_ref.substs[1..].iter().any(has_self_ty).then_some(sp)
+ data.trait_ref.args[1..].iter().any(has_self_ty).then_some(sp)
}
ty::ClauseKind::Projection(ref data) => {
// And similarly for projections. This should be redundant with
@@ -302,7 +302,7 @@ fn predicate_references_self<'tcx>(
//
// This is ALT2 in issue #56288, see that for discussion of the
// possible alternatives.
- data.projection_ty.substs[1..].iter().any(has_self_ty).then_some(sp)
+ data.projection_ty.args[1..].iter().any(has_self_ty).then_some(sp)
}
ty::ClauseKind::ConstArgHasType(_ct, ty) => has_self_ty(&ty.into()).then_some(sp),
@@ -393,7 +393,7 @@ fn object_safety_violation_for_assoc_item(
ty::AssocKind::Type => {
if !tcx.features().generic_associated_types_extended
&& !tcx.generics_of(item.def_id).params.is_empty()
- && item.opt_rpitit_info.is_none()
+ && !item.is_impl_trait_in_trait()
{
Some(ObjectSafetyViolation::GAT(item.name, item.ident(tcx).span))
} else {
@@ -414,7 +414,7 @@ fn virtual_call_violation_for_method<'tcx>(
trait_def_id: DefId,
method: ty::AssocItem,
) -> Option<MethodViolationCode> {
- let sig = tcx.fn_sig(method.def_id).subst_identity();
+ let sig = tcx.fn_sig(method.def_id).instantiate_identity();
// The method's first parameter must be named `self`
if !method.fn_has_self_parameter {
@@ -517,8 +517,7 @@ fn virtual_call_violation_for_method<'tcx>(
tcx.sess.delay_span_bug(
tcx.def_span(method.def_id),
format!(
- "receiver when `Self = ()` should have a Scalar ABI; found {:?}",
- abi
+ "receiver when `Self = ()` should have a Scalar ABI; found {abi:?}"
),
);
}
@@ -536,8 +535,7 @@ fn virtual_call_violation_for_method<'tcx>(
tcx.sess.delay_span_bug(
tcx.def_span(method.def_id),
format!(
- "receiver when `Self = {}` should have a ScalarPair ABI; found {:?}",
- trait_object_ty, abi
+ "receiver when `Self = {trait_object_ty}` should have a ScalarPair ABI; found {abi:?}"
),
);
}
@@ -576,7 +574,6 @@ fn virtual_call_violation_for_method<'tcx>(
// implement auto traits if the underlying type does as well.
if let ty::ClauseKind::Trait(ty::TraitPredicate {
trait_ref: pred_trait_ref,
- constness: ty::BoundConstness::NotConst,
polarity: ty::ImplPolarity::Positive,
}) = pred.kind().skip_binder()
&& pred_trait_ref.self_ty() == tcx.types.self_param
@@ -586,7 +583,7 @@ fn virtual_call_violation_for_method<'tcx>(
// allowed to have generic parameters so `auto trait Bound<T> {}`
// would already have reported an error at the definition of the
// auto trait.
- if pred_trait_ref.substs.len() != 1 {
+ if pred_trait_ref.args.len() != 1 {
tcx.sess.diagnostic().delay_span_bug(
span,
"auto traits cannot have generic parameters",
@@ -612,11 +609,11 @@ fn receiver_for_self_ty<'tcx>(
method_def_id: DefId,
) -> Ty<'tcx> {
debug!("receiver_for_self_ty({:?}, {:?}, {:?})", receiver_ty, self_ty, method_def_id);
- let substs = InternalSubsts::for_item(tcx, method_def_id, |param, _| {
+ let args = GenericArgs::for_item(tcx, method_def_id, |param, _| {
if param.index == 0 { self_ty.into() } else { tcx.mk_param_from_def(param) }
});
- let result = EarlyBinder::bind(receiver_ty).subst(tcx, substs);
+ let result = EarlyBinder::bind(receiver_ty).instantiate(tcx, args);
debug!(
"receiver_for_self_ty({:?}, {:?}, {:?}) = {:?}",
receiver_ty, self_ty, method_def_id, result
@@ -751,21 +748,17 @@ fn receiver_is_dispatchable<'tcx>(
// U: Trait<Arg1, ..., ArgN>
let trait_predicate = {
let trait_def_id = method.trait_container(tcx).unwrap();
- let substs = InternalSubsts::for_item(tcx, trait_def_id, |param, _| {
+ let args = GenericArgs::for_item(tcx, trait_def_id, |param, _| {
if param.index == 0 { unsized_self_ty.into() } else { tcx.mk_param_from_def(param) }
});
- ty::TraitRef::new(tcx, trait_def_id, substs).to_predicate(tcx)
+ ty::TraitRef::new(tcx, trait_def_id, args).to_predicate(tcx)
};
let caller_bounds =
param_env.caller_bounds().iter().chain([unsize_predicate, trait_predicate]);
- ty::ParamEnv::new(
- tcx.mk_clauses_from_iter(caller_bounds),
- param_env.reveal(),
- param_env.constness(),
- )
+ ty::ParamEnv::new(tcx.mk_clauses_from_iter(caller_bounds), param_env.reveal())
};
// Receiver: DispatchFromDyn<Receiver[Self => U]>
diff --git a/compiler/rustc_trait_selection/src/traits/outlives_bounds.rs b/compiler/rustc_trait_selection/src/traits/outlives_bounds.rs
index ae6fc7cf8..32bbd626d 100644
--- a/compiler/rustc_trait_selection/src/traits/outlives_bounds.rs
+++ b/compiler/rustc_trait_selection/src/traits/outlives_bounds.rs
@@ -79,7 +79,8 @@ impl<'a, 'tcx: 'a> InferCtxtExt<'a, 'tcx> for InferCtxt<'tcx> {
&canonical_var_values,
canonical_result,
&mut constraints,
- ) else {
+ )
+ else {
return vec![];
};
assert_eq!(&obligations, &[]);
diff --git a/compiler/rustc_trait_selection/src/traits/project.rs b/compiler/rustc_trait_selection/src/traits/project.rs
index a10bca31f..06a1027e5 100644
--- a/compiler/rustc_trait_selection/src/traits/project.rs
+++ b/compiler/rustc_trait_selection/src/traits/project.rs
@@ -1,10 +1,9 @@
//! Code for projecting associated types out of trait references.
-use super::check_substs_compatible;
+use super::check_args_compatible;
use super::specialization_graph;
-use super::translate_substs;
+use super::translate_args;
use super::util;
-use super::ImplSourceUserDefinedData;
use super::MismatchedProjectionTypes;
use super::Obligation;
use super::ObligationCause;
@@ -13,6 +12,9 @@ use super::Selection;
use super::SelectionContext;
use super::SelectionError;
use super::{Normalized, NormalizedTy, ProjectionCacheEntry, ProjectionCacheKey};
+use rustc_middle::traits::BuiltinImplSource;
+use rustc_middle::traits::ImplSource;
+use rustc_middle::traits::ImplSourceUserDefinedData;
use crate::errors::InherentProjectionNormalizationOverflow;
use crate::infer::type_variable::{TypeVariableOrigin, TypeVariableOriginKind};
@@ -131,8 +133,6 @@ enum ProjectionCandidate<'tcx> {
/// From an "impl" (or a "pseudo-impl" returned by select)
Select(Selection<'tcx>),
-
- ImplTraitInTrait(ImplSourceUserDefinedData<'tcx, PredicateObligation<'tcx>>),
}
enum ProjectionCandidateSet<'tcx> {
@@ -483,8 +483,7 @@ impl<'a, 'b, 'tcx> AssocTypeNormalizer<'a, 'b, 'tcx> {
assert!(
!value.has_escaping_bound_vars(),
- "Normalizing {:?} without wrapping in a `Binder`",
- value
+ "Normalizing {value:?} without wrapping in a `Binder`"
);
if !needs_normalization(&value, self.param_env.reveal()) {
@@ -526,7 +525,7 @@ impl<'a, 'b, 'tcx> TypeFolder<TyCtxt<'tcx>> for AssocTypeNormalizer<'a, 'b, 'tcx
// ```
// for<'a> fn(<T as Foo>::One<'a, Box<dyn Bar<'a, Item=<T as Foo>::Two<'a>>>>)
// ```
- // We normalize the substs on the projection before the projecting, but
+ // We normalize the args on the projection before the projecting, but
// if we're naive, we'll
// replace bound vars on inner, project inner, replace placeholders on inner,
// replace bound vars on outer, project outer, replace placeholders on outer
@@ -541,7 +540,7 @@ impl<'a, 'b, 'tcx> TypeFolder<TyCtxt<'tcx>> for AssocTypeNormalizer<'a, 'b, 'tcx
//
// On the other hand, this does add a bit of complexity, since we only
// replace bound vars if the current type is a `Projection` and we need
- // to make sure we don't forget to fold the substs regardless.
+ // to make sure we don't forget to fold the args regardless.
match kind {
ty::Opaque => {
@@ -560,9 +559,9 @@ impl<'a, 'b, 'tcx> TypeFolder<TyCtxt<'tcx>> for AssocTypeNormalizer<'a, 'b, 'tcx
);
}
- let substs = data.substs.fold_with(self);
+ let args = data.args.fold_with(self);
let generic_ty = self.interner().type_of(data.def_id);
- let concrete_ty = generic_ty.subst(self.interner(), substs);
+ let concrete_ty = generic_ty.instantiate(self.interner(), args);
self.depth += 1;
let folded_ty = self.fold_ty(concrete_ty);
self.depth -= 1;
@@ -662,11 +661,8 @@ impl<'a, 'b, 'tcx> TypeFolder<TyCtxt<'tcx>> for AssocTypeNormalizer<'a, 'b, 'tcx
ty::Weak => {
let infcx = self.selcx.infcx;
self.obligations.extend(
- infcx
- .tcx
- .predicates_of(data.def_id)
- .instantiate_own(infcx.tcx, data.substs)
- .map(|(mut predicate, span)| {
+ infcx.tcx.predicates_of(data.def_id).instantiate_own(infcx.tcx, data.args).map(
+ |(mut predicate, span)| {
if data.has_escaping_bound_vars() {
(predicate, ..) = BoundVarReplacer::replace_bound_vars(
infcx,
@@ -679,9 +675,10 @@ impl<'a, 'b, 'tcx> TypeFolder<TyCtxt<'tcx>> for AssocTypeNormalizer<'a, 'b, 'tcx
ObligationCauseCode::TypeAlias(code, span, data.def_id)
});
Obligation::new(infcx.tcx, cause, self.param_env, predicate)
- }),
+ },
+ ),
);
- infcx.tcx.type_of(data.def_id).subst(infcx.tcx, data.substs).fold_with(self)
+ infcx.tcx.type_of(data.def_id).instantiate(infcx.tcx, data.args).fold_with(self)
}
ty::Inherent if !data.has_escaping_bound_vars() => {
@@ -1217,7 +1214,7 @@ fn opt_normalize_projection_type<'a, 'b, 'tcx>(
let projected_term = selcx.infcx.resolve_vars_if_possible(projected_term);
- let mut result = if projected_term.has_projections() {
+ let result = if projected_term.has_projections() {
let mut normalizer = AssocTypeNormalizer::new(
selcx,
param_env,
@@ -1227,19 +1224,14 @@ fn opt_normalize_projection_type<'a, 'b, 'tcx>(
);
let normalized_ty = normalizer.fold(projected_term);
+ let mut deduped = SsoHashSet::with_capacity(projected_obligations.len());
+ projected_obligations.retain(|obligation| deduped.insert(obligation.clone()));
+
Normalized { value: normalized_ty, obligations: projected_obligations }
} else {
Normalized { value: projected_term, obligations: projected_obligations }
};
- let mut deduped: SsoHashSet<_> = Default::default();
- result.obligations.retain(|projected_obligation| {
- if !deduped.insert(projected_obligation.clone()) {
- return false;
- }
- true
- });
-
if use_cache {
infcx.inner.borrow_mut().projection_cache().insert_term(cache_key, result.clone());
}
@@ -1309,7 +1301,7 @@ fn normalize_to_error<'a, 'tcx>(
cause,
recursion_depth: depth,
param_env,
- predicate: trait_ref.without_const().to_predicate(selcx.tcx()),
+ predicate: trait_ref.to_predicate(selcx.tcx()),
};
let tcx = selcx.infcx.tcx;
let new_value = selcx.infcx.next_ty_var(TypeVariableOrigin {
@@ -1339,7 +1331,7 @@ pub fn normalize_inherent_projection<'a, 'b, 'tcx>(
});
}
- let substs = compute_inherent_assoc_ty_substs(
+ let args = compute_inherent_assoc_ty_args(
selcx,
param_env,
alias_ty,
@@ -1349,7 +1341,7 @@ pub fn normalize_inherent_projection<'a, 'b, 'tcx>(
);
// Register the obligations arising from the impl and from the associated type itself.
- let predicates = tcx.predicates_of(alias_ty.def_id).instantiate(tcx, substs);
+ let predicates = tcx.predicates_of(alias_ty.def_id).instantiate(tcx, args);
for (predicate, span) in predicates {
let predicate = normalize_with_depth_to(
selcx,
@@ -1383,7 +1375,7 @@ pub fn normalize_inherent_projection<'a, 'b, 'tcx>(
));
}
- let ty = tcx.type_of(alias_ty.def_id).subst(tcx, substs);
+ let ty = tcx.type_of(alias_ty.def_id).instantiate(tcx, args);
let mut ty = selcx.infcx.resolve_vars_if_possible(ty);
if ty.has_projections() {
@@ -1393,22 +1385,30 @@ pub fn normalize_inherent_projection<'a, 'b, 'tcx>(
ty
}
-pub fn compute_inherent_assoc_ty_substs<'a, 'b, 'tcx>(
+pub fn compute_inherent_assoc_ty_args<'a, 'b, 'tcx>(
selcx: &'a mut SelectionContext<'b, 'tcx>,
param_env: ty::ParamEnv<'tcx>,
alias_ty: ty::AliasTy<'tcx>,
cause: ObligationCause<'tcx>,
depth: usize,
obligations: &mut Vec<PredicateObligation<'tcx>>,
-) -> ty::SubstsRef<'tcx> {
+) -> ty::GenericArgsRef<'tcx> {
let tcx = selcx.tcx();
let impl_def_id = tcx.parent(alias_ty.def_id);
- let impl_substs = selcx.infcx.fresh_substs_for_item(cause.span, impl_def_id);
+ let impl_args = selcx.infcx.fresh_args_for_item(cause.span, impl_def_id);
- let impl_ty = tcx.type_of(impl_def_id).subst(tcx, impl_substs);
- let impl_ty =
- normalize_with_depth_to(selcx, param_env, cause.clone(), depth + 1, impl_ty, obligations);
+ let mut impl_ty = tcx.type_of(impl_def_id).instantiate(tcx, impl_args);
+ if !selcx.infcx.next_trait_solver() {
+ impl_ty = normalize_with_depth_to(
+ selcx,
+ param_env,
+ cause.clone(),
+ depth + 1,
+ impl_ty,
+ obligations,
+ );
+ }
// Infer the generic parameters of the impl by unifying the
// impl type with the self type of the projection.
@@ -1425,7 +1425,7 @@ pub fn compute_inherent_assoc_ty_substs<'a, 'b, 'tcx>(
}
}
- alias_ty.rebase_substs_onto_impl(impl_substs, tcx)
+ alias_ty.rebase_inherent_args_onto_impl(impl_args, tcx)
}
enum Projected<'tcx> {
@@ -1472,8 +1472,6 @@ fn project<'cx, 'tcx>(
let mut candidates = ProjectionCandidateSet::None;
- assemble_candidate_for_impl_trait_in_trait(selcx, obligation, &mut candidates);
-
// Make sure that the following procedures are kept in order. ParamEnv
// needs to be first because it has highest priority, and Select checks
// the return value of push_candidate which assumes it's ran at last.
@@ -1499,20 +1497,18 @@ fn project<'cx, 'tcx>(
ProjectionCandidateSet::None => {
let tcx = selcx.tcx();
let term = match tcx.def_kind(obligation.predicate.def_id) {
- DefKind::AssocTy | DefKind::ImplTraitPlaceholder => Ty::new_projection(
- tcx,
- obligation.predicate.def_id,
- obligation.predicate.substs,
- )
- .into(),
+ DefKind::AssocTy => {
+ Ty::new_projection(tcx, obligation.predicate.def_id, obligation.predicate.args)
+ .into()
+ }
DefKind::AssocConst => ty::Const::new_unevaluated(
tcx,
ty::UnevaluatedConst::new(
obligation.predicate.def_id,
- obligation.predicate.substs,
+ obligation.predicate.args,
),
tcx.type_of(obligation.predicate.def_id)
- .subst(tcx, obligation.predicate.substs),
+ .instantiate(tcx, obligation.predicate.args),
)
.into(),
kind => {
@@ -1530,47 +1526,6 @@ fn project<'cx, 'tcx>(
}
}
-/// If the predicate's item is an `ImplTraitPlaceholder`, we do a select on the
-/// corresponding trait ref. If this yields an `impl`, then we're able to project
-/// to a concrete type, since we have an `impl`'s method to provide the RPITIT.
-fn assemble_candidate_for_impl_trait_in_trait<'cx, 'tcx>(
- selcx: &mut SelectionContext<'cx, 'tcx>,
- obligation: &ProjectionTyObligation<'tcx>,
- candidate_set: &mut ProjectionCandidateSet<'tcx>,
-) {
- let tcx = selcx.tcx();
- if tcx.def_kind(obligation.predicate.def_id) == DefKind::ImplTraitPlaceholder {
- let trait_fn_def_id = tcx.impl_trait_in_trait_parent_fn(obligation.predicate.def_id);
-
- let trait_def_id = tcx.parent(trait_fn_def_id);
- let trait_substs =
- obligation.predicate.substs.truncate_to(tcx, tcx.generics_of(trait_def_id));
- let trait_predicate = ty::TraitRef::new(tcx, trait_def_id, trait_substs);
-
- let _ = selcx.infcx.commit_if_ok(|_| {
- match selcx.select(&obligation.with(tcx, trait_predicate)) {
- Ok(Some(super::ImplSource::UserDefined(data))) => {
- candidate_set.push_candidate(ProjectionCandidate::ImplTraitInTrait(data));
- Ok(())
- }
- Ok(None) => {
- candidate_set.mark_ambiguous();
- Err(())
- }
- Ok(Some(_)) => {
- // Don't know enough about the impl to provide a useful signature
- Err(())
- }
- Err(e) => {
- debug!(error = ?e, "selection error");
- candidate_set.mark_error(e);
- Err(())
- }
- }
- });
- }
-}
-
/// The first thing we have to do is scan through the parameter
/// environment to see whether there are any projection predicates
/// there that can answer this question.
@@ -1612,7 +1567,7 @@ fn assemble_candidates_from_trait_def<'cx, 'tcx>(
let bounds = match *obligation.predicate.self_ty().kind() {
// Excluding IATs and type aliases here as they don't have meaningful item bounds.
ty::Alias(ty::Projection | ty::Opaque, ref data) => {
- tcx.item_bounds(data.def_id).subst(tcx, data.substs)
+ tcx.item_bounds(data.def_id).instantiate(tcx, data.args)
}
ty::Infer(ty::TyVar(_)) => {
// If the self-type is an inference variable, then it MAY wind up
@@ -1739,11 +1694,6 @@ fn assemble_candidates_from_impls<'cx, 'tcx>(
obligation: &ProjectionTyObligation<'tcx>,
candidate_set: &mut ProjectionCandidateSet<'tcx>,
) {
- // Can't assemble candidate from impl for RPITIT
- if selcx.tcx().def_kind(obligation.predicate.def_id) == DefKind::ImplTraitPlaceholder {
- return;
- }
-
// If we are resolving `<T as TraitRef<...>>::Item == Type`,
// start out by selecting the predicate `T as TraitRef<...>`:
let trait_ref = obligation.predicate.trait_ref(selcx.tcx());
@@ -1763,7 +1713,7 @@ fn assemble_candidates_from_impls<'cx, 'tcx>(
};
let eligible = match &impl_source {
- super::ImplSource::UserDefined(impl_data) => {
+ ImplSource::UserDefined(impl_data) => {
// We have to be careful when projecting out of an
// impl because of specialization. If we are not in
// codegen (i.e., projection mode is not "any"), and the
@@ -1813,7 +1763,7 @@ fn assemble_candidates_from_impls<'cx, 'tcx>(
}
}
}
- super::ImplSource::Builtin(..) => {
+ ImplSource::Builtin(BuiltinImplSource::Misc, _) => {
// While a builtin impl may be known to exist, the associated type may not yet
// be known. Any type with multiple potential associated types is therefore
// not eligible.
@@ -1912,8 +1862,7 @@ fn assemble_candidates_from_impls<'cx, 'tcx>(
if selcx.infcx.predicate_must_hold_modulo_regions(
&obligation.with(
selcx.tcx(),
- ty::TraitRef::from_lang_item(selcx.tcx(), LangItem::Sized, obligation.cause.span(),[self_ty])
- .without_const(),
+ ty::TraitRef::from_lang_item(selcx.tcx(), LangItem::Sized, obligation.cause.span(),[self_ty]),
),
) =>
{
@@ -1937,7 +1886,7 @@ fn assemble_candidates_from_impls<'cx, 'tcx>(
bug!("unexpected builtin trait with associated type: {trait_ref:?}")
}
}
- super::ImplSource::Param(..) => {
+ ImplSource::Param(..) => {
// This case tell us nothing about the value of an
// associated type. Consider:
//
@@ -1965,17 +1914,18 @@ fn assemble_candidates_from_impls<'cx, 'tcx>(
// in `assemble_candidates_from_param_env`.
false
}
- super::ImplSource::Object(_) => {
+ ImplSource::Builtin(BuiltinImplSource::Object { .. }, _) => {
// Handled by the `Object` projection candidate. See
// `assemble_candidates_from_object_ty` for an explanation of
// why we special case object types.
false
}
- | super::ImplSource::TraitUpcasting(_) => {
+ ImplSource::Builtin(BuiltinImplSource::TraitUpcasting { .. }, _)
+ | ImplSource::Builtin(BuiltinImplSource::TupleUnsizing, _) => {
// These traits have no associated types.
selcx.tcx().sess.delay_span_bug(
obligation.cause.span,
- format!("Cannot project an associated type from `{:?}`", impl_source),
+ format!("Cannot project an associated type from `{impl_source:?}`"),
);
return Err(());
}
@@ -2012,9 +1962,6 @@ fn confirm_candidate<'cx, 'tcx>(
ProjectionCandidate::Select(impl_source) => {
confirm_select_candidate(selcx, obligation, impl_source)
}
- ProjectionCandidate::ImplTraitInTrait(data) => {
- confirm_impl_trait_in_trait_candidate(selcx, obligation, data)
- }
};
// When checking for cycle during evaluation, we compare predicates with
@@ -2034,8 +1981,8 @@ fn confirm_select_candidate<'cx, 'tcx>(
impl_source: Selection<'tcx>,
) -> Progress<'tcx> {
match impl_source {
- super::ImplSource::UserDefined(data) => confirm_impl_candidate(selcx, obligation, data),
- super::ImplSource::Builtin(data) => {
+ ImplSource::UserDefined(data) => confirm_impl_candidate(selcx, obligation, data),
+ ImplSource::Builtin(BuiltinImplSource::Misc, data) => {
let trait_def_id = obligation.predicate.trait_def_id(selcx.tcx());
let lang_items = selcx.tcx().lang_items();
if lang_items.gen_trait() == Some(trait_def_id) {
@@ -2052,9 +1999,10 @@ fn confirm_select_candidate<'cx, 'tcx>(
confirm_builtin_candidate(selcx, obligation, data)
}
}
- super::ImplSource::Object(_)
- | super::ImplSource::Param(..)
- | super::ImplSource::TraitUpcasting(_) => {
+ ImplSource::Builtin(BuiltinImplSource::Object { .. }, _)
+ | ImplSource::Param(..)
+ | ImplSource::Builtin(BuiltinImplSource::TraitUpcasting { .. }, _)
+ | ImplSource::Builtin(BuiltinImplSource::TupleUnsizing, _) => {
// we don't create Select candidates with this kind of resolution
span_bug!(
obligation.cause.span,
@@ -2070,12 +2018,12 @@ fn confirm_generator_candidate<'cx, 'tcx>(
obligation: &ProjectionTyObligation<'tcx>,
nested: Vec<PredicateObligation<'tcx>>,
) -> Progress<'tcx> {
- let ty::Generator(_, substs, _) =
+ let ty::Generator(_, args, _) =
selcx.infcx.shallow_resolve(obligation.predicate.self_ty()).kind()
else {
unreachable!()
};
- let gen_sig = substs.as_generator().poly_sig();
+ let gen_sig = args.as_generator().poly_sig();
let Normalized { value: gen_sig, obligations } = normalize_with_depth(
selcx,
obligation.param_env,
@@ -2107,7 +2055,7 @@ fn confirm_generator_candidate<'cx, 'tcx>(
};
ty::ProjectionPredicate {
- projection_ty: tcx.mk_alias_ty(obligation.predicate.def_id, trait_ref.substs),
+ projection_ty: tcx.mk_alias_ty(obligation.predicate.def_id, trait_ref.args),
term: ty.into(),
}
});
@@ -2122,12 +2070,12 @@ fn confirm_future_candidate<'cx, 'tcx>(
obligation: &ProjectionTyObligation<'tcx>,
nested: Vec<PredicateObligation<'tcx>>,
) -> Progress<'tcx> {
- let ty::Generator(_, substs, _) =
+ let ty::Generator(_, args, _) =
selcx.infcx.shallow_resolve(obligation.predicate.self_ty()).kind()
else {
unreachable!()
};
- let gen_sig = substs.as_generator().poly_sig();
+ let gen_sig = args.as_generator().poly_sig();
let Normalized { value: gen_sig, obligations } = normalize_with_depth(
selcx,
obligation.param_env,
@@ -2151,7 +2099,7 @@ fn confirm_future_candidate<'cx, 'tcx>(
debug_assert_eq!(tcx.associated_item(obligation.predicate.def_id).name, sym::Output);
ty::ProjectionPredicate {
- projection_ty: tcx.mk_alias_ty(obligation.predicate.def_id, trait_ref.substs),
+ projection_ty: tcx.mk_alias_ty(obligation.predicate.def_id, trait_ref.args),
term: return_ty.into(),
}
});
@@ -2168,7 +2116,7 @@ fn confirm_builtin_candidate<'cx, 'tcx>(
) -> Progress<'tcx> {
let tcx = selcx.tcx();
let self_ty = obligation.predicate.self_ty();
- let substs = tcx.mk_substs(&[self_ty.into()]);
+ let args = tcx.mk_args(&[self_ty.into()]);
let lang_items = tcx.lang_items();
let item_def_id = obligation.predicate.def_id;
let trait_def_id = tcx.trait_of_item(item_def_id).unwrap();
@@ -2198,8 +2146,7 @@ fn confirm_builtin_candidate<'cx, 'tcx>(
LangItem::Sized,
obligation.cause.span(),
[self_ty],
- )
- .without_const();
+ );
obligations.push(obligation.with(tcx, sized_predicate));
}
(metadata_ty.into(), obligations)
@@ -2208,7 +2155,7 @@ fn confirm_builtin_candidate<'cx, 'tcx>(
};
let predicate =
- ty::ProjectionPredicate { projection_ty: tcx.mk_alias_ty(item_def_id, substs), term };
+ ty::ProjectionPredicate { projection_ty: tcx.mk_alias_ty(item_def_id, args), term };
confirm_param_env_candidate(selcx, obligation, ty::Binder::dummy(predicate), false)
.with_addl_obligations(obligations)
@@ -2240,12 +2187,11 @@ fn confirm_closure_candidate<'cx, 'tcx>(
obligation: &ProjectionTyObligation<'tcx>,
nested: Vec<PredicateObligation<'tcx>>,
) -> Progress<'tcx> {
- let ty::Closure(_, substs) =
- selcx.infcx.shallow_resolve(obligation.predicate.self_ty()).kind()
+ let ty::Closure(_, args) = selcx.infcx.shallow_resolve(obligation.predicate.self_ty()).kind()
else {
unreachable!()
};
- let closure_sig = substs.as_closure().sig();
+ let closure_sig = args.as_closure().sig();
let Normalized { value: closure_sig, obligations } = normalize_with_depth(
selcx,
obligation.param_env,
@@ -2282,7 +2228,7 @@ fn confirm_callable_candidate<'cx, 'tcx>(
flag,
)
.map_bound(|(trait_ref, ret_type)| ty::ProjectionPredicate {
- projection_ty: tcx.mk_alias_ty(fn_once_output_def_id, trait_ref.substs),
+ projection_ty: tcx.mk_alias_ty(fn_once_output_def_id, trait_ref.args),
term: ret_type.into(),
});
@@ -2349,8 +2295,7 @@ fn confirm_param_env_candidate<'cx, 'tcx>(
}
Err(e) => {
let msg = format!(
- "Failed to unify obligation `{:?}` with poly_projection `{:?}`: {:?}",
- obligation, poly_cache_entry, e,
+ "Failed to unify obligation `{obligation:?}` with poly_projection `{poly_cache_entry:?}`: {e:?}",
);
debug!("confirm_param_env_candidate: {}", msg);
let err = Ty::new_error_with_message(infcx.tcx, obligation.cause.span, msg);
@@ -2366,7 +2311,7 @@ fn confirm_impl_candidate<'cx, 'tcx>(
) -> Progress<'tcx> {
let tcx = selcx.tcx();
- let ImplSourceUserDefinedData { impl_def_id, substs, mut nested } = impl_impl_source;
+ let ImplSourceUserDefinedData { impl_def_id, args, mut nested } = impl_impl_source;
let assoc_item_id = obligation.predicate.def_id;
let trait_def_id = tcx.trait_id_of_impl(impl_def_id).unwrap();
@@ -2390,23 +2335,22 @@ fn confirm_impl_candidate<'cx, 'tcx>(
// If we're trying to normalize `<Vec<u32> as X>::A<S>` using
//`impl<T> X for Vec<T> { type A<Y> = Box<Y>; }`, then:
//
- // * `obligation.predicate.substs` is `[Vec<u32>, S]`
- // * `substs` is `[u32]`
- // * `substs` ends up as `[u32, S]`
- let substs = obligation.predicate.substs.rebase_onto(tcx, trait_def_id, substs);
- let substs =
- translate_substs(selcx.infcx, param_env, impl_def_id, substs, assoc_ty.defining_node);
+ // * `obligation.predicate.args` is `[Vec<u32>, S]`
+ // * `args` is `[u32]`
+ // * `args` ends up as `[u32, S]`
+ let args = obligation.predicate.args.rebase_onto(tcx, trait_def_id, args);
+ let args = translate_args(selcx.infcx, param_env, impl_def_id, args, assoc_ty.defining_node);
let ty = tcx.type_of(assoc_ty.item.def_id);
let is_const = matches!(tcx.def_kind(assoc_ty.item.def_id), DefKind::AssocConst);
let term: ty::EarlyBinder<ty::Term<'tcx>> = if is_const {
let did = assoc_ty.item.def_id;
- let identity_substs = crate::traits::InternalSubsts::identity_for_item(tcx, did);
- let uv = ty::UnevaluatedConst::new(did, identity_substs);
+ let identity_args = crate::traits::GenericArgs::identity_for_item(tcx, did);
+ let uv = ty::UnevaluatedConst::new(did, identity_args);
ty.map_bound(|ty| ty::Const::new_unevaluated(tcx, uv, ty).into())
} else {
ty.map_bound(|ty| ty.into())
};
- if !check_substs_compatible(tcx, assoc_ty.item, substs) {
+ if !check_args_compatible(tcx, assoc_ty.item, args) {
let err = Ty::new_error_with_message(
tcx,
obligation.cause.span,
@@ -2415,107 +2359,10 @@ fn confirm_impl_candidate<'cx, 'tcx>(
Progress { term: err.into(), obligations: nested }
} else {
assoc_ty_own_obligations(selcx, obligation, &mut nested);
- Progress { term: term.subst(tcx, substs), obligations: nested }
+ Progress { term: term.instantiate(tcx, args), obligations: nested }
}
}
-fn confirm_impl_trait_in_trait_candidate<'tcx>(
- selcx: &mut SelectionContext<'_, 'tcx>,
- obligation: &ProjectionTyObligation<'tcx>,
- data: ImplSourceUserDefinedData<'tcx, PredicateObligation<'tcx>>,
-) -> Progress<'tcx> {
- let tcx = selcx.tcx();
- let mut obligations = data.nested;
-
- let trait_fn_def_id = tcx.impl_trait_in_trait_parent_fn(obligation.predicate.def_id);
- let leaf_def = match specialization_graph::assoc_def(tcx, data.impl_def_id, trait_fn_def_id) {
- Ok(assoc_ty) => assoc_ty,
- Err(guar) => return Progress::error(tcx, guar),
- };
- // We don't support specialization for RPITITs anyways... yet.
- // Also don't try to project to an RPITIT that has no value
- if !leaf_def.is_final() || !leaf_def.item.defaultness(tcx).has_value() {
- return Progress { term: Ty::new_misc_error(tcx).into(), obligations };
- }
-
- // Use the default `impl Trait` for the trait, e.g., for a default trait body
- if leaf_def.item.container == ty::AssocItemContainer::TraitContainer {
- return Progress {
- term: Ty::new_opaque(tcx, obligation.predicate.def_id, obligation.predicate.substs)
- .into(),
- obligations,
- };
- }
-
- // Rebase from {trait}::{fn}::{opaque} to {impl}::{fn}::{opaque},
- // since `data.substs` are the impl substs.
- let impl_fn_substs =
- obligation.predicate.substs.rebase_onto(tcx, tcx.parent(trait_fn_def_id), data.substs);
- let impl_fn_substs = translate_substs(
- selcx.infcx,
- obligation.param_env,
- data.impl_def_id,
- impl_fn_substs,
- leaf_def.defining_node,
- );
-
- if !check_substs_compatible(tcx, leaf_def.item, impl_fn_substs) {
- let err = Ty::new_error_with_message(
- tcx,
- obligation.cause.span,
- "impl method and trait method have different parameters",
- );
- return Progress { term: err.into(), obligations };
- }
-
- let impl_fn_def_id = leaf_def.item.def_id;
-
- let cause = ObligationCause::new(
- obligation.cause.span,
- obligation.cause.body_id,
- super::ItemObligation(impl_fn_def_id),
- );
- let predicates = normalize_with_depth_to(
- selcx,
- obligation.param_env,
- cause.clone(),
- obligation.recursion_depth + 1,
- tcx.predicates_of(impl_fn_def_id).instantiate(tcx, impl_fn_substs),
- &mut obligations,
- );
- obligations.extend(predicates.into_iter().map(|(pred, span)| {
- Obligation::with_depth(
- tcx,
- ObligationCause::new(
- obligation.cause.span,
- obligation.cause.body_id,
- if span.is_dummy() {
- super::ItemObligation(impl_fn_def_id)
- } else {
- super::BindingObligation(impl_fn_def_id, span)
- },
- ),
- obligation.recursion_depth + 1,
- obligation.param_env,
- pred,
- )
- }));
-
- let ty = normalize_with_depth_to(
- selcx,
- obligation.param_env,
- cause.clone(),
- obligation.recursion_depth + 1,
- tcx.collect_return_position_impl_trait_in_trait_tys(impl_fn_def_id).map_or_else(
- |guar| Ty::new_error(tcx, guar),
- |tys| tys[&obligation.predicate.def_id].subst(tcx, impl_fn_substs),
- ),
- &mut obligations,
- );
-
- Progress { term: ty.into(), obligations }
-}
-
// Get obligations corresponding to the predicates from the where-clause of the
// associated type itself.
fn assoc_ty_own_obligations<'cx, 'tcx>(
@@ -2526,7 +2373,7 @@ fn assoc_ty_own_obligations<'cx, 'tcx>(
let tcx = selcx.tcx();
let predicates = tcx
.predicates_of(obligation.predicate.def_id)
- .instantiate_own(tcx, obligation.predicate.substs);
+ .instantiate_own(tcx, obligation.predicate.args);
for (predicate, span) in predicates {
let normalized = normalize_with_depth_to(
selcx,
diff --git a/compiler/rustc_trait_selection/src/traits/query/dropck_outlives.rs b/compiler/rustc_trait_selection/src/traits/query/dropck_outlives.rs
index 709c3f432..9484a50e3 100644
--- a/compiler/rustc_trait_selection/src/traits/query/dropck_outlives.rs
+++ b/compiler/rustc_trait_selection/src/traits/query/dropck_outlives.rs
@@ -49,8 +49,8 @@ pub fn trivial_dropck_outlives<'tcx>(tcx: TyCtxt<'tcx>, ty: Ty<'tcx>) -> bool {
// (T1..Tn) and closures have same properties as T1..Tn --
// check if *all* of them are trivial.
ty::Tuple(tys) => tys.iter().all(|t| trivial_dropck_outlives(tcx, t)),
- ty::Closure(_, ref substs) => {
- trivial_dropck_outlives(tcx, substs.as_closure().tupled_upvars_ty())
+ ty::Closure(_, ref args) => {
+ trivial_dropck_outlives(tcx, args.as_closure().tupled_upvars_ty())
}
ty::Adt(def, _) => {
@@ -237,8 +237,8 @@ pub fn dtorck_constraint_for_ty_inner<'tcx>(
Ok::<_, NoSolution>(())
})?,
- ty::Closure(_, substs) => {
- if !substs.as_closure().is_valid() {
+ ty::Closure(_, args) => {
+ if !args.as_closure().is_valid() {
// By the time this code runs, all type variables ought to
// be fully resolved.
@@ -250,14 +250,14 @@ pub fn dtorck_constraint_for_ty_inner<'tcx>(
}
rustc_data_structures::stack::ensure_sufficient_stack(|| {
- for ty in substs.as_closure().upvar_tys() {
+ for ty in args.as_closure().upvar_tys() {
dtorck_constraint_for_ty_inner(tcx, span, for_ty, depth + 1, ty, constraints)?;
}
Ok::<_, NoSolution>(())
})?
}
- ty::Generator(_, substs, _movability) => {
+ ty::Generator(_, args, _movability) => {
// rust-lang/rust#49918: types can be constructed, stored
// in the interior, and sit idle when generator yields
// (and is subsequently dropped).
@@ -281,7 +281,7 @@ pub fn dtorck_constraint_for_ty_inner<'tcx>(
// derived from lifetimes attached to the upvars and resume
// argument, and we *do* incorporate those here.
- if !substs.as_generator().is_valid() {
+ if !args.as_generator().is_valid() {
// By the time this code runs, all type variables ought to
// be fully resolved.
tcx.sess.delay_span_bug(
@@ -291,29 +291,26 @@ pub fn dtorck_constraint_for_ty_inner<'tcx>(
return Err(NoSolution);
}
- constraints.outlives.extend(
- substs
- .as_generator()
- .upvar_tys()
- .map(|t| -> ty::subst::GenericArg<'tcx> { t.into() }),
- );
- constraints.outlives.push(substs.as_generator().resume_ty().into());
+ constraints
+ .outlives
+ .extend(args.as_generator().upvar_tys().iter().map(ty::GenericArg::from));
+ constraints.outlives.push(args.as_generator().resume_ty().into());
}
- ty::Adt(def, substs) => {
+ ty::Adt(def, args) => {
let DropckConstraint { dtorck_types, outlives, overflows } =
tcx.at(span).adt_dtorck_constraint(def.did())?;
// FIXME: we can try to recursively `dtorck_constraint_on_ty`
// there, but that needs some way to handle cycles.
constraints
.dtorck_types
- .extend(dtorck_types.iter().map(|t| EarlyBinder::bind(*t).subst(tcx, substs)));
+ .extend(dtorck_types.iter().map(|t| EarlyBinder::bind(*t).instantiate(tcx, args)));
constraints
.outlives
- .extend(outlives.iter().map(|t| EarlyBinder::bind(*t).subst(tcx, substs)));
+ .extend(outlives.iter().map(|t| EarlyBinder::bind(*t).instantiate(tcx, args)));
constraints
.overflows
- .extend(overflows.iter().map(|t| EarlyBinder::bind(*t).subst(tcx, substs)));
+ .extend(overflows.iter().map(|t| EarlyBinder::bind(*t).instantiate(tcx, args)));
}
// Objects must be alive in order for their destructor
diff --git a/compiler/rustc_trait_selection/src/traits/query/evaluate_obligation.rs b/compiler/rustc_trait_selection/src/traits/query/evaluate_obligation.rs
index a50644bb7..65f32b1c4 100644
--- a/compiler/rustc_trait_selection/src/traits/query/evaluate_obligation.rs
+++ b/compiler/rustc_trait_selection/src/traits/query/evaluate_obligation.rs
@@ -1,5 +1,4 @@
use rustc_infer::traits::{TraitEngine, TraitEngineExt};
-use rustc_middle::ty;
use crate::infer::canonical::OriginalQueryValues;
use crate::infer::InferCtxt;
@@ -66,17 +65,7 @@ impl<'tcx> InferCtxtExt<'tcx> for InferCtxt<'tcx> {
) -> Result<EvaluationResult, OverflowError> {
let mut _orig_values = OriginalQueryValues::default();
- let param_env = match obligation.predicate.kind().skip_binder() {
- ty::PredicateKind::Clause(ty::ClauseKind::Trait(pred)) => {
- // we ignore the value set to it.
- let mut _constness = pred.constness;
- obligation
- .param_env
- .with_constness(_constness.and(obligation.param_env.constness()))
- }
- // constness has no effect on the given predicate.
- _ => obligation.param_env.without_const(),
- };
+ let param_env = obligation.param_env;
if self.next_trait_solver() {
self.probe(|snapshot| {
diff --git a/compiler/rustc_trait_selection/src/traits/query/normalize.rs b/compiler/rustc_trait_selection/src/traits/query/normalize.rs
index 7fe79fd86..87beaddc6 100644
--- a/compiler/rustc_trait_selection/src/traits/query/normalize.rs
+++ b/compiler/rustc_trait_selection/src/traits/query/normalize.rs
@@ -61,8 +61,27 @@ impl<'cx, 'tcx> QueryNormalizeExt<'tcx> for At<'cx, 'tcx> {
self.cause,
);
+ // This is actually a consequence by the way `normalize_erasing_regions` works currently.
+ // Because it needs to call the `normalize_generic_arg_after_erasing_regions`, it folds
+ // through tys and consts in a `TypeFoldable`. Importantly, it skips binders, leaving us
+ // with trying to normalize with escaping bound vars.
+ //
+ // Here, we just add the universes that we *would* have created had we passed through the binders.
+ //
+ // We *could* replace escaping bound vars eagerly here, but it doesn't seem really necessary.
+ // The rest of the code is already set up to be lazy about replacing bound vars,
+ // and only when we actually have to normalize.
+ let universes = if value.has_escaping_bound_vars() {
+ let mut max_visitor =
+ MaxEscapingBoundVarVisitor { outer_index: ty::INNERMOST, escaping: 0 };
+ value.visit_with(&mut max_visitor);
+ vec![None; max_visitor.escaping]
+ } else {
+ vec![]
+ };
+
if self.infcx.next_trait_solver() {
- match crate::solve::deeply_normalize(self, value) {
+ match crate::solve::deeply_normalize_with_skipped_universes(self, value, universes) {
Ok(value) => return Ok(Normalized { value, obligations: vec![] }),
Err(_errors) => {
return Err(NoSolution);
@@ -81,27 +100,9 @@ impl<'cx, 'tcx> QueryNormalizeExt<'tcx> for At<'cx, 'tcx> {
obligations: vec![],
cache: SsoHashMap::new(),
anon_depth: 0,
- universes: vec![],
+ universes,
};
- // This is actually a consequence by the way `normalize_erasing_regions` works currently.
- // Because it needs to call the `normalize_generic_arg_after_erasing_regions`, it folds
- // through tys and consts in a `TypeFoldable`. Importantly, it skips binders, leaving us
- // with trying to normalize with escaping bound vars.
- //
- // Here, we just add the universes that we *would* have created had we passed through the binders.
- //
- // We *could* replace escaping bound vars eagerly here, but it doesn't seem really necessary.
- // The rest of the code is already set up to be lazy about replacing bound vars,
- // and only when we actually have to normalize.
- if value.has_escaping_bound_vars() {
- let mut max_visitor =
- MaxEscapingBoundVarVisitor { outer_index: ty::INNERMOST, escaping: 0 };
- value.visit_with(&mut max_visitor);
- if max_visitor.escaping > 0 {
- normalizer.universes.extend((0..max_visitor.escaping).map(|_| None));
- }
- }
let result = value.try_fold_with(&mut normalizer);
info!(
"normalize::<{}>: result={:?} with {} obligations",
@@ -217,7 +218,7 @@ impl<'cx, 'tcx> FallibleTypeFolder<TyCtxt<'tcx>> for QueryNormalizer<'cx, 'tcx>
};
// See note in `rustc_trait_selection::traits::project` about why we
- // wait to fold the substs.
+ // wait to fold the args.
// Wrap this in a closure so we don't accidentally return from the outer function
let res = match kind {
@@ -227,7 +228,7 @@ impl<'cx, 'tcx> FallibleTypeFolder<TyCtxt<'tcx>> for QueryNormalizer<'cx, 'tcx>
Reveal::UserFacing => ty.try_super_fold_with(self)?,
Reveal::All => {
- let substs = data.substs.try_fold_with(self)?;
+ let args = data.args.try_fold_with(self)?;
let recursion_limit = self.interner().recursion_limit();
if !recursion_limit.value_within_limit(self.anon_depth) {
// A closure or generator may have itself as in its upvars.
@@ -243,14 +244,14 @@ impl<'cx, 'tcx> FallibleTypeFolder<TyCtxt<'tcx>> for QueryNormalizer<'cx, 'tcx>
}
let generic_ty = self.interner().type_of(data.def_id);
- let concrete_ty = generic_ty.subst(self.interner(), substs);
+ let concrete_ty = generic_ty.instantiate(self.interner(), args);
self.anon_depth += 1;
if concrete_ty == ty {
bug!(
- "infinite recursion generic_ty: {:#?}, substs: {:#?}, \
+ "infinite recursion generic_ty: {:#?}, args: {:#?}, \
concrete_ty: {:#?}, ty: {:#?}",
generic_ty,
- substs,
+ args,
concrete_ty,
ty
);
@@ -298,7 +299,7 @@ impl<'cx, 'tcx> FallibleTypeFolder<TyCtxt<'tcx>> for QueryNormalizer<'cx, 'tcx>
if !tcx.sess.opts.actually_rustdoc {
tcx.sess.delay_span_bug(
DUMMY_SP,
- format!("unexpected ambiguity: {:?} {:?}", c_data, result),
+ format!("unexpected ambiguity: {c_data:?} {result:?}"),
);
}
return Err(NoSolution);
diff --git a/compiler/rustc_trait_selection/src/traits/query/type_op/ascribe_user_type.rs b/compiler/rustc_trait_selection/src/traits/query/type_op/ascribe_user_type.rs
index 44671a076..302b6016e 100644
--- a/compiler/rustc_trait_selection/src/traits/query/type_op/ascribe_user_type.rs
+++ b/compiler/rustc_trait_selection/src/traits/query/type_op/ascribe_user_type.rs
@@ -4,7 +4,7 @@ use rustc_hir::def_id::{DefId, CRATE_DEF_ID};
use rustc_infer::traits::Obligation;
use rustc_middle::traits::query::NoSolution;
use rustc_middle::traits::{ObligationCause, ObligationCauseCode};
-use rustc_middle::ty::{self, ParamEnvAnd, Ty, TyCtxt, UserSelfTy, UserSubsts, UserType};
+use rustc_middle::ty::{self, ParamEnvAnd, Ty, TyCtxt, UserArgs, UserSelfTy, UserType};
pub use rustc_middle::traits::query::type_op::AscribeUserType;
use rustc_span::{Span, DUMMY_SP};
@@ -47,8 +47,8 @@ pub fn type_op_ascribe_user_type_with_span<'tcx>(
let span = span.unwrap_or(DUMMY_SP);
match user_ty {
UserType::Ty(user_ty) => relate_mir_and_user_ty(ocx, param_env, span, mir_ty, user_ty)?,
- UserType::TypeOf(def_id, user_substs) => {
- relate_mir_and_user_substs(ocx, param_env, span, mir_ty, def_id, user_substs)?
+ UserType::TypeOf(def_id, user_args) => {
+ relate_mir_and_user_args(ocx, param_env, span, mir_ty, def_id, user_args)?
}
};
Ok(())
@@ -74,20 +74,19 @@ fn relate_mir_and_user_ty<'tcx>(
}
#[instrument(level = "debug", skip(ocx, param_env, span))]
-fn relate_mir_and_user_substs<'tcx>(
+fn relate_mir_and_user_args<'tcx>(
ocx: &ObligationCtxt<'_, 'tcx>,
param_env: ty::ParamEnv<'tcx>,
span: Span,
mir_ty: Ty<'tcx>,
def_id: DefId,
- user_substs: UserSubsts<'tcx>,
+ user_args: UserArgs<'tcx>,
) -> Result<(), NoSolution> {
- let param_env = param_env.without_const();
- let UserSubsts { user_self_ty, substs } = user_substs;
+ let UserArgs { user_self_ty, args } = user_args;
let tcx = ocx.infcx.tcx;
let cause = ObligationCause::dummy_with_span(span);
- let ty = tcx.type_of(def_id).subst(tcx, substs);
+ let ty = tcx.type_of(def_id).instantiate(tcx, args);
let ty = ocx.normalize(&cause, param_env, ty);
debug!("relate_type_and_user_type: ty of def-id is {:?}", ty);
@@ -98,7 +97,7 @@ fn relate_mir_and_user_substs<'tcx>(
// Also, normalize the `instantiated_predicates`
// because otherwise we wind up with duplicate "type
// outlives" error messages.
- let instantiated_predicates = tcx.predicates_of(def_id).instantiate(tcx, substs);
+ let instantiated_predicates = tcx.predicates_of(def_id).instantiate(tcx, args);
debug!(?instantiated_predicates);
for (instantiated_predicate, predicate_span) in instantiated_predicates {
@@ -116,7 +115,7 @@ fn relate_mir_and_user_substs<'tcx>(
if let Some(UserSelfTy { impl_def_id, self_ty }) = user_self_ty {
let self_ty = ocx.normalize(&cause, param_env, self_ty);
- let impl_self_ty = tcx.type_of(impl_def_id).subst(tcx, substs);
+ let impl_self_ty = tcx.type_of(impl_def_id).instantiate(tcx, args);
let impl_self_ty = ocx.normalize(&cause, param_env, impl_self_ty);
ocx.eq(&cause, param_env, self_ty, impl_self_ty)?;
@@ -128,9 +127,9 @@ fn relate_mir_and_user_substs<'tcx>(
// In addition to proving the predicates, we have to
// prove that `ty` is well-formed -- this is because
- // the WF of `ty` is predicated on the substs being
+ // the WF of `ty` is predicated on the args being
// well-formed, and we haven't proven *that*. We don't
- // want to prove the WF of types from `substs` directly because they
+ // want to prove the WF of types from `args` directly because they
// haven't been normalized.
//
// FIXME(nmatsakis): Well, perhaps we should normalize
diff --git a/compiler/rustc_trait_selection/src/traits/query/type_op/custom.rs b/compiler/rustc_trait_selection/src/traits/query/type_op/custom.rs
index 5420caee3..c99e018e9 100644
--- a/compiler/rustc_trait_selection/src/traits/query/type_op/custom.rs
+++ b/compiler/rustc_trait_selection/src/traits/query/type_op/custom.rs
@@ -77,8 +77,7 @@ where
let pre_obligations = infcx.take_registered_region_obligations();
assert!(
pre_obligations.is_empty(),
- "scrape_region_constraints: incoming region obligations = {:#?}",
- pre_obligations,
+ "scrape_region_constraints: incoming region obligations = {pre_obligations:#?}",
);
let value = infcx.commit_if_ok(|_| {
@@ -92,7 +91,7 @@ where
} else {
Err(infcx.tcx.sess.delay_span_bug(
DUMMY_SP,
- format!("errors selecting obligation during MIR typeck: {:?}", errors),
+ format!("errors selecting obligation during MIR typeck: {errors:?}"),
))
}
})?;
diff --git a/compiler/rustc_trait_selection/src/traits/query/type_op/outlives.rs b/compiler/rustc_trait_selection/src/traits/query/type_op/outlives.rs
index 988942633..59f4a22ac 100644
--- a/compiler/rustc_trait_selection/src/traits/query/type_op/outlives.rs
+++ b/compiler/rustc_trait_selection/src/traits/query/type_op/outlives.rs
@@ -31,16 +31,6 @@ impl<'tcx> super::QueryTypeOp<'tcx> for DropckOutlives<'tcx> {
tcx: TyCtxt<'tcx>,
canonicalized: Canonical<'tcx, ParamEnvAnd<'tcx, Self>>,
) -> Result<CanonicalQueryResponse<'tcx, Self::QueryResponse>, NoSolution> {
- // Subtle: note that we are not invoking
- // `infcx.at(...).dropck_outlives(...)` here, but rather the
- // underlying `dropck_outlives` query. This same underlying
- // query is also used by the
- // `infcx.at(...).dropck_outlives(...)` fn. Avoiding the
- // wrapper means we don't need an infcx in this code, which is
- // good because the interface doesn't give us one (so that we
- // know we are not registering any subregion relations or
- // other things).
-
// FIXME convert to the type expected by the `dropck_outlives`
// query. This should eventually be fixed by changing the
// *underlying query*.
diff --git a/compiler/rustc_trait_selection/src/traits/select/candidate_assembly.rs b/compiler/rustc_trait_selection/src/traits/select/candidate_assembly.rs
index d5f6aaa7f..e3da87a22 100644
--- a/compiler/rustc_trait_selection/src/traits/select/candidate_assembly.rs
+++ b/compiler/rustc_trait_selection/src/traits/select/candidate_assembly.rs
@@ -124,11 +124,7 @@ impl<'cx, 'tcx> SelectionContext<'cx, 'tcx> {
self.assemble_candidates_from_projected_tys(obligation, &mut candidates);
self.assemble_candidates_from_caller_bounds(stack, &mut candidates)?;
- // Auto implementations have lower priority, so we only
- // consider triggering a default if there is no other impl that can apply.
- if candidates.vec.is_empty() {
- self.assemble_candidates_from_auto_impls(obligation, &mut candidates);
- }
+ self.assemble_candidates_from_auto_impls(obligation, &mut candidates);
}
debug!("candidate list size: {}", candidates.vec.len());
Ok(candidates)
@@ -158,9 +154,10 @@ impl<'cx, 'tcx> SelectionContext<'cx, 'tcx> {
.infcx
.probe(|_| self.match_projection_obligation_against_definition_bounds(obligation));
- candidates
- .vec
- .extend(result.into_iter().map(|(idx, constness)| ProjectionCandidate(idx, constness)));
+ // FIXME(effects) proper constness needed?
+ candidates.vec.extend(
+ result.into_iter().map(|idx| ProjectionCandidate(idx, ty::BoundConstness::NotConst)),
+ );
}
/// Given an obligation like `<SomeTrait for T>`, searches the obligations that the caller
@@ -209,7 +206,7 @@ impl<'cx, 'tcx> SelectionContext<'cx, 'tcx> {
obligation: &PolyTraitObligation<'tcx>,
candidates: &mut SelectionCandidateSet<'tcx>,
) {
- // Okay to skip binder because the substs on generator types never
+ // Okay to skip binder because the args on generator types never
// touch bound regions, they just capture the in-scope
// type/region parameters.
let self_ty = obligation.self_ty().skip_binder();
@@ -261,14 +258,14 @@ impl<'cx, 'tcx> SelectionContext<'cx, 'tcx> {
return;
};
- // Okay to skip binder because the substs on closure types never
+ // Okay to skip binder because the args on closure types never
// touch bound regions, they just capture the in-scope
// type/region parameters
match *obligation.self_ty().skip_binder().kind() {
- ty::Closure(def_id, closure_substs) => {
+ ty::Closure(def_id, closure_args) => {
let is_const = self.tcx().is_const_fn_raw(def_id);
debug!(?kind, ?obligation, "assemble_unboxed_candidates");
- match self.infcx.closure_kind(closure_substs) {
+ match self.infcx.closure_kind(closure_args) {
Some(closure_kind) => {
debug!(?closure_kind, "assemble_unboxed_candidates");
if closure_kind.extends(kind) {
@@ -351,7 +348,7 @@ impl<'cx, 'tcx> SelectionContext<'cx, 'tcx> {
}
let drcx = DeepRejectCtxt { treat_obligation_params: TreatParams::ForLookup };
- let obligation_substs = obligation.predicate.skip_binder().trait_ref.substs;
+ let obligation_args = obligation.predicate.skip_binder().trait_ref.args;
self.tcx().for_each_relevant_impl(
obligation.predicate.def_id(),
obligation.predicate.skip_binder().trait_ref.self_ty(),
@@ -360,9 +357,7 @@ impl<'cx, 'tcx> SelectionContext<'cx, 'tcx> {
// consider a "quick reject". This avoids creating more types
// and so forth that we need to.
let impl_trait_ref = self.tcx().impl_trait_ref(impl_def_id).unwrap();
- if !drcx
- .substs_refs_may_unify(obligation_substs, impl_trait_ref.skip_binder().substs)
- {
+ if !drcx.args_refs_may_unify(obligation_args, impl_trait_ref.skip_binder().args) {
return;
}
if self.reject_fn_ptr_impls(
@@ -374,7 +369,7 @@ impl<'cx, 'tcx> SelectionContext<'cx, 'tcx> {
}
self.infcx.probe(|_| {
- if let Ok(_substs) = self.match_impl(impl_def_id, impl_trait_ref, obligation) {
+ if let Ok(_args) = self.match_impl(impl_def_id, impl_trait_ref, obligation) {
candidates.vec.push(ImplCandidate(impl_def_id));
}
});
@@ -402,8 +397,7 @@ impl<'cx, 'tcx> SelectionContext<'cx, 'tcx> {
};
for &(predicate, _) in self.tcx().predicates_of(impl_def_id).predicates {
- let ty::ClauseKind::Trait(pred)
- = predicate.kind().skip_binder() else { continue };
+ let ty::ClauseKind::Trait(pred) = predicate.kind().skip_binder() else { continue };
if fn_ptr_trait != pred.trait_ref.def_id {
continue;
}
@@ -516,7 +510,7 @@ impl<'cx, 'tcx> SelectionContext<'cx, 'tcx> {
// for an example of a test case that exercises
// this path.
}
- ty::Infer(ty::TyVar(_)) => {
+ ty::Infer(ty::TyVar(_) | ty::IntVar(_) | ty::FloatVar(_)) => {
// The auto impl might apply; we don't know.
candidates.ambiguous = true;
}
@@ -536,7 +530,63 @@ impl<'cx, 'tcx> SelectionContext<'cx, 'tcx> {
}
}
- _ => candidates.vec.push(AutoImplCandidate),
+ ty::Infer(ty::FreshTy(_) | ty::FreshIntTy(_) | ty::FreshFloatTy(_)) => {
+ bug!(
+ "asked to assemble auto trait candidates of unexpected type: {:?}",
+ self_ty
+ );
+ }
+
+ ty::Alias(_, _)
+ if candidates.vec.iter().any(|c| matches!(c, ProjectionCandidate(..))) =>
+ {
+ // We do not generate an auto impl candidate for `impl Trait`s which already
+ // reference our auto trait.
+ //
+ // For example during candidate assembly for `impl Send: Send`, we don't have
+ // to look at the constituent types for this opaque types to figure out that this
+ // trivially holds.
+ //
+ // Note that this is only sound as projection candidates of opaque types
+ // are always applicable for auto traits.
+ }
+ ty::Alias(_, _) => candidates.vec.push(AutoImplCandidate),
+
+ ty::Bool
+ | ty::Char
+ | ty::Int(_)
+ | ty::Uint(_)
+ | ty::Float(_)
+ | ty::Str
+ | ty::Array(_, _)
+ | ty::Slice(_)
+ | ty::Adt(..)
+ | ty::RawPtr(_)
+ | ty::Ref(..)
+ | ty::FnDef(..)
+ | ty::FnPtr(_)
+ | ty::Closure(_, _)
+ | ty::Generator(..)
+ | ty::Never
+ | ty::Tuple(_)
+ | ty::GeneratorWitness(_)
+ | ty::GeneratorWitnessMIR(..) => {
+ // Only consider auto impls if there are no manual impls for the root of `self_ty`.
+ //
+ // For example, we only consider auto candidates for `&i32: Auto` if no explicit impl
+ // for `&SomeType: Auto` exists. Due to E0321 the only crate where impls
+ // for `&SomeType: Auto` can be defined is the crate where `Auto` has been defined.
+ //
+ // Generally, we have to guarantee that for all `SimplifiedType`s the only crate
+ // which may define impls for that type is either the crate defining the type
+ // or the trait. This should be guaranteed by the orphan check.
+ let mut has_impl = false;
+ self.tcx().for_each_relevant_impl(def_id, self_ty, |_| has_impl = true);
+ if !has_impl {
+ candidates.vec.push(AutoImplCandidate)
+ }
+ }
+ ty::Error(_) => {} // do not add an auto trait impl for `ty::Error` for now.
}
}
}
@@ -651,7 +701,7 @@ impl<'cx, 'tcx> SelectionContext<'cx, 'tcx> {
let ty = traits::normalize_projection_type(
self,
param_env,
- tcx.mk_alias_ty(tcx.lang_items().deref_target()?, trait_ref.substs),
+ tcx.mk_alias_ty(tcx.lang_items().deref_target()?, trait_ref.args),
cause.clone(),
0,
// We're *intentionally* throwing these away,
@@ -689,13 +739,16 @@ impl<'cx, 'tcx> SelectionContext<'cx, 'tcx> {
// Don't add any candidates if there are bound regions.
return;
};
- let target = obligation.predicate.skip_binder().trait_ref.substs.type_at(1);
+ let target = obligation.predicate.skip_binder().trait_ref.args.type_at(1);
debug!(?source, ?target, "assemble_candidates_for_unsizing");
match (source.kind(), target.kind()) {
// Trait+Kx+'a -> Trait+Ky+'b (upcasts).
- (&ty::Dynamic(ref data_a, _, ty::Dyn), &ty::Dynamic(ref data_b, _, ty::Dyn)) => {
+ (
+ &ty::Dynamic(ref a_data, a_region, ty::Dyn),
+ &ty::Dynamic(ref b_data, b_region, ty::Dyn),
+ ) => {
// Upcast coercions permit several things:
//
// 1. Dropping auto traits, e.g., `Foo + Send` to `Foo`
@@ -707,19 +760,19 @@ impl<'cx, 'tcx> SelectionContext<'cx, 'tcx> {
//
// We always perform upcasting coercions when we can because of reason
// #2 (region bounds).
- let auto_traits_compatible = data_b
+ let auto_traits_compatible = b_data
.auto_traits()
// All of a's auto traits need to be in b's auto traits.
- .all(|b| data_a.auto_traits().any(|a| a == b));
+ .all(|b| a_data.auto_traits().any(|a| a == b));
if auto_traits_compatible {
- let principal_def_id_a = data_a.principal_def_id();
- let principal_def_id_b = data_b.principal_def_id();
+ let principal_def_id_a = a_data.principal_def_id();
+ let principal_def_id_b = b_data.principal_def_id();
if principal_def_id_a == principal_def_id_b {
// no cyclic
candidates.vec.push(BuiltinUnsizeCandidate);
} else if principal_def_id_a.is_some() && principal_def_id_b.is_some() {
// not casual unsizing, now check whether this is trait upcasting coercion.
- let principal_a = data_a.principal().unwrap();
+ let principal_a = a_data.principal().unwrap();
let target_trait_did = principal_def_id_b.unwrap();
let source_trait_ref = principal_a.with_self_ty(self.tcx(), source);
if let Some(deref_trait_ref) = self.need_migrate_deref_output_trait_object(
@@ -735,9 +788,23 @@ impl<'cx, 'tcx> SelectionContext<'cx, 'tcx> {
for (idx, upcast_trait_ref) in
util::supertraits(self.tcx(), source_trait_ref).enumerate()
{
- if upcast_trait_ref.def_id() == target_trait_did {
- candidates.vec.push(TraitUpcastingUnsizeCandidate(idx));
- }
+ self.infcx.probe(|_| {
+ if upcast_trait_ref.def_id() == target_trait_did
+ && let Ok(nested) = self.match_upcast_principal(
+ obligation,
+ upcast_trait_ref,
+ a_data,
+ b_data,
+ a_region,
+ b_region,
+ )
+ {
+ if nested.is_none() {
+ candidates.ambiguous = true;
+ }
+ candidates.vec.push(TraitUpcastingUnsizeCandidate(idx));
+ }
+ })
}
}
}
@@ -842,7 +909,8 @@ impl<'cx, 'tcx> SelectionContext<'cx, 'tcx> {
) {
// If the predicate is `~const Destruct` in a non-const environment, we don't actually need
// to check anything. We'll short-circuit checking any obligations in confirmation, too.
- if !obligation.is_const() {
+ // FIXME(effects)
+ if true {
candidates.vec.push(ConstDestructCandidate(None));
return;
}
diff --git a/compiler/rustc_trait_selection/src/traits/select/confirmation.rs b/compiler/rustc_trait_selection/src/traits/select/confirmation.rs
index 7adc29bbb..88d030033 100644
--- a/compiler/rustc_trait_selection/src/traits/select/confirmation.rs
+++ b/compiler/rustc_trait_selection/src/traits/select/confirmation.rs
@@ -11,10 +11,10 @@ use rustc_data_structures::stack::ensure_sufficient_stack;
use rustc_hir::lang_items::LangItem;
use rustc_infer::infer::LateBoundRegionConversionTime::HigherRankedType;
use rustc_infer::infer::{DefineOpaqueTypes, InferOk};
-use rustc_middle::traits::SelectionOutputTypeParameterMismatch;
+use rustc_middle::traits::{BuiltinImplSource, SelectionOutputTypeParameterMismatch};
use rustc_middle::ty::{
- self, Binder, GenericParamDefKind, InternalSubsts, SubstsRef, ToPolyTraitRef, ToPredicate,
- TraitPredicate, TraitRef, Ty, TyCtxt, TypeVisitableExt,
+ self, GenericArgs, GenericArgsRef, GenericParamDefKind, ToPolyTraitRef, ToPredicate,
+ TraitPredicate, Ty, TyCtxt, TypeVisitableExt,
};
use rustc_span::def_id::DefId;
@@ -26,9 +26,9 @@ use crate::traits::vtable::{
};
use crate::traits::{
BuiltinDerivedObligation, ImplDerivedObligation, ImplDerivedObligationCause, ImplSource,
- ImplSourceObjectData, ImplSourceTraitUpcastingData, ImplSourceUserDefinedData, Normalized,
- Obligation, ObligationCause, OutputTypeParameterMismatch, PolyTraitObligation,
- PredicateObligation, Selection, SelectionError, TraitNotObjectSafe, Unimplemented,
+ ImplSourceUserDefinedData, Normalized, Obligation, ObligationCause,
+ OutputTypeParameterMismatch, PolyTraitObligation, PredicateObligation, Selection,
+ SelectionError, TraitNotObjectSafe, Unimplemented,
};
use super::BuiltinImplConditions;
@@ -48,18 +48,18 @@ impl<'cx, 'tcx> SelectionContext<'cx, 'tcx> {
let mut impl_src = match candidate {
BuiltinCandidate { has_nested } => {
let data = self.confirm_builtin_candidate(obligation, has_nested);
- ImplSource::Builtin(data)
+ ImplSource::Builtin(BuiltinImplSource::Misc, data)
}
TransmutabilityCandidate => {
let data = self.confirm_transmutability_candidate(obligation)?;
- ImplSource::Builtin(data)
+ ImplSource::Builtin(BuiltinImplSource::Misc, data)
}
ParamCandidate(param) => {
let obligations =
self.confirm_param_candidate(obligation, param.map_bound(|t| t.trait_ref));
- ImplSource::Param(obligations, param.skip_binder().constness)
+ ImplSource::Param(obligations)
}
ImplCandidate(impl_def_id) => {
@@ -68,64 +68,57 @@ impl<'cx, 'tcx> SelectionContext<'cx, 'tcx> {
AutoImplCandidate => {
let data = self.confirm_auto_impl_candidate(obligation)?;
- ImplSource::Builtin(data)
+ ImplSource::Builtin(BuiltinImplSource::Misc, data)
}
- ProjectionCandidate(idx, constness) => {
+ ProjectionCandidate(idx, _) => {
let obligations = self.confirm_projection_candidate(obligation, idx)?;
- ImplSource::Param(obligations, constness)
+ ImplSource::Param(obligations)
}
- ObjectCandidate(idx) => {
- let data = self.confirm_object_candidate(obligation, idx)?;
- ImplSource::Object(data)
- }
+ ObjectCandidate(idx) => self.confirm_object_candidate(obligation, idx)?,
ClosureCandidate { .. } => {
let vtable_closure = self.confirm_closure_candidate(obligation)?;
- ImplSource::Builtin(vtable_closure)
+ ImplSource::Builtin(BuiltinImplSource::Misc, vtable_closure)
}
GeneratorCandidate => {
let vtable_generator = self.confirm_generator_candidate(obligation)?;
- ImplSource::Builtin(vtable_generator)
+ ImplSource::Builtin(BuiltinImplSource::Misc, vtable_generator)
}
FutureCandidate => {
let vtable_future = self.confirm_future_candidate(obligation)?;
- ImplSource::Builtin(vtable_future)
+ ImplSource::Builtin(BuiltinImplSource::Misc, vtable_future)
}
FnPointerCandidate { is_const } => {
let data = self.confirm_fn_pointer_candidate(obligation, is_const)?;
- ImplSource::Builtin(data)
+ ImplSource::Builtin(BuiltinImplSource::Misc, data)
}
TraitAliasCandidate => {
let data = self.confirm_trait_alias_candidate(obligation);
- ImplSource::Builtin(data)
+ ImplSource::Builtin(BuiltinImplSource::Misc, data)
}
BuiltinObjectCandidate => {
// This indicates something like `Trait + Send: Send`. In this case, we know that
// this holds because that's what the object type is telling us, and there's really
// no additional obligations to prove and no types in particular to unify, etc.
- ImplSource::Builtin(Vec::new())
+ ImplSource::Builtin(BuiltinImplSource::Misc, Vec::new())
}
- BuiltinUnsizeCandidate => {
- let data = self.confirm_builtin_unsize_candidate(obligation)?;
- ImplSource::Builtin(data)
- }
+ BuiltinUnsizeCandidate => self.confirm_builtin_unsize_candidate(obligation)?,
TraitUpcastingUnsizeCandidate(idx) => {
- let data = self.confirm_trait_upcasting_unsize_candidate(obligation, idx)?;
- ImplSource::TraitUpcasting(data)
+ self.confirm_trait_upcasting_unsize_candidate(obligation, idx)?
}
ConstDestructCandidate(def_id) => {
let data = self.confirm_const_destruct_candidate(obligation, def_id)?;
- ImplSource::Builtin(data)
+ ImplSource::Builtin(BuiltinImplSource::Misc, data)
}
};
@@ -135,14 +128,6 @@ impl<'cx, 'tcx> SelectionContext<'cx, 'tcx> {
subobligation.set_depth_from_parent(obligation.recursion_depth);
}
- if !obligation.predicate.is_const_if_const() {
- // normalize nested predicates according to parent predicate's constness.
- impl_src = impl_src.map(|mut o| {
- o.predicate = o.predicate.without_const(self.tcx());
- o
- });
- }
-
Ok(impl_src)
}
@@ -158,15 +143,16 @@ impl<'cx, 'tcx> SelectionContext<'cx, 'tcx> {
self.infcx.instantiate_binder_with_placeholders(trait_predicate).trait_ref;
let placeholder_self_ty = placeholder_trait_predicate.self_ty();
let placeholder_trait_predicate = ty::Binder::dummy(placeholder_trait_predicate);
- let (def_id, substs) = match *placeholder_self_ty.kind() {
+ let (def_id, args) = match *placeholder_self_ty.kind() {
// Excluding IATs and type aliases here as they don't have meaningful item bounds.
- ty::Alias(ty::Projection | ty::Opaque, ty::AliasTy { def_id, substs, .. }) => {
- (def_id, substs)
+ ty::Alias(ty::Projection | ty::Opaque, ty::AliasTy { def_id, args, .. }) => {
+ (def_id, args)
}
_ => bug!("projection candidate for unexpected type: {:?}", placeholder_self_ty),
};
- let candidate_predicate = tcx.item_bounds(def_id).map_bound(|i| i[idx]).subst(tcx, substs);
+ let candidate_predicate =
+ tcx.item_bounds(def_id).map_bound(|i| i[idx]).instantiate(tcx, args);
let candidate = candidate_predicate
.as_trait_clause()
.expect("projection candidate is not a trait predicate")
@@ -190,7 +176,7 @@ impl<'cx, 'tcx> SelectionContext<'cx, 'tcx> {
})?);
if let ty::Alias(ty::Projection, ..) = placeholder_self_ty.kind() {
- let predicates = tcx.predicates_of(def_id).instantiate_own(tcx, substs);
+ let predicates = tcx.predicates_of(def_id).instantiate_own(tcx, args);
for (predicate, _) in predicates {
let normalized = normalize_with_depth_to(
self,
@@ -298,8 +284,8 @@ impl<'cx, 'tcx> SelectionContext<'cx, 'tcx> {
.collect(),
Condition::IfTransmutable { src, dst } => {
let trait_def_id = obligation.predicate.def_id();
- let scope = predicate.trait_ref.substs.type_at(2);
- let assume_const = predicate.trait_ref.substs.const_at(3);
+ let scope = predicate.trait_ref.args.type_at(2);
+ let assume_const = predicate.trait_ref.args.const_at(3);
let make_obl = |from_ty, to_ty| {
let trait_ref1 = ty::TraitRef::new(
tcx,
@@ -342,19 +328,19 @@ impl<'cx, 'tcx> SelectionContext<'cx, 'tcx> {
let Some(assume) = rustc_transmute::Assume::from_const(
self.infcx.tcx,
obligation.param_env,
- predicate.trait_ref.substs.const_at(3)
+ predicate.trait_ref.args.const_at(3),
) else {
return Err(Unimplemented);
};
- let dst = predicate.trait_ref.substs.type_at(0);
- let src = predicate.trait_ref.substs.type_at(1);
+ let dst = predicate.trait_ref.args.type_at(0);
+ let src = predicate.trait_ref.args.type_at(1);
debug!(?src, ?dst);
let mut transmute_env = rustc_transmute::TransmuteTypeEnv::new(self.infcx);
let maybe_transmutable = transmute_env.is_transmutable(
obligation.cause.clone(),
rustc_transmute::Types { dst, src },
- predicate.trait_ref.substs.type_at(2),
+ predicate.trait_ref.args.type_at(2),
assume,
);
@@ -402,7 +388,7 @@ impl<'cx, 'tcx> SelectionContext<'cx, 'tcx> {
obligation.recursion_depth + 1,
obligation.param_env,
trait_def_id,
- &trait_ref.substs,
+ &trait_ref.args,
obligation.predicate,
);
@@ -433,12 +419,12 @@ impl<'cx, 'tcx> SelectionContext<'cx, 'tcx> {
// First, create the substitutions by matching the impl again,
// this time not in a probe.
- let substs = self.rematch_impl(impl_def_id, obligation);
- debug!(?substs, "impl substs");
+ let args = self.rematch_impl(impl_def_id, obligation);
+ debug!(?args, "impl args");
ensure_sufficient_stack(|| {
self.vtable_impl(
impl_def_id,
- substs,
+ args,
&obligation.cause,
obligation.recursion_depth + 1,
obligation.param_env,
@@ -450,40 +436,40 @@ impl<'cx, 'tcx> SelectionContext<'cx, 'tcx> {
fn vtable_impl(
&mut self,
impl_def_id: DefId,
- substs: Normalized<'tcx, SubstsRef<'tcx>>,
+ args: Normalized<'tcx, GenericArgsRef<'tcx>>,
cause: &ObligationCause<'tcx>,
recursion_depth: usize,
param_env: ty::ParamEnv<'tcx>,
parent_trait_pred: ty::Binder<'tcx, ty::TraitPredicate<'tcx>>,
) -> ImplSourceUserDefinedData<'tcx, PredicateObligation<'tcx>> {
- debug!(?impl_def_id, ?substs, ?recursion_depth, "vtable_impl");
+ debug!(?impl_def_id, ?args, ?recursion_depth, "vtable_impl");
let mut impl_obligations = self.impl_or_trait_obligations(
cause,
recursion_depth,
param_env,
impl_def_id,
- &substs.value,
+ &args.value,
parent_trait_pred,
);
debug!(?impl_obligations, "vtable_impl");
// Because of RFC447, the impl-trait-ref and obligations
- // are sufficient to determine the impl substs, without
+ // are sufficient to determine the impl args, without
// relying on projections in the impl-trait-ref.
//
// e.g., `impl<U: Tr, V: Iterator<Item=U>> Foo<<U as Tr>::T> for V`
- impl_obligations.extend(substs.obligations);
+ impl_obligations.extend(args.obligations);
- ImplSourceUserDefinedData { impl_def_id, substs: substs.value, nested: impl_obligations }
+ ImplSourceUserDefinedData { impl_def_id, args: args.value, nested: impl_obligations }
}
fn confirm_object_candidate(
&mut self,
obligation: &PolyTraitObligation<'tcx>,
index: usize,
- ) -> Result<ImplSourceObjectData<PredicateObligation<'tcx>>, SelectionError<'tcx>> {
+ ) -> Result<ImplSource<'tcx, PredicateObligation<'tcx>>, SelectionError<'tcx>> {
let tcx = self.tcx();
debug!(?obligation, ?index, "confirm_object_candidate");
@@ -531,7 +517,7 @@ impl<'cx, 'tcx> SelectionContext<'cx, 'tcx> {
// will be checked in the code below.
for super_trait in tcx
.super_predicates_of(trait_predicate.def_id())
- .instantiate(tcx, trait_predicate.trait_ref.substs)
+ .instantiate(tcx, trait_predicate.trait_ref.args)
.predicates
.into_iter()
{
@@ -569,68 +555,65 @@ impl<'cx, 'tcx> SelectionContext<'cx, 'tcx> {
// higher-ranked things.
// Prevent, e.g., `dyn Iterator<Item = str>`.
for bound in self.tcx().item_bounds(assoc_type).transpose_iter() {
- let subst_bound =
- if defs.count() == 0 {
- bound.subst(tcx, trait_predicate.trait_ref.substs)
- } else {
- let mut substs = smallvec::SmallVec::with_capacity(defs.count());
- substs.extend(trait_predicate.trait_ref.substs.iter());
- let mut bound_vars: smallvec::SmallVec<[ty::BoundVariableKind; 8]> =
- smallvec::SmallVec::with_capacity(
- bound.skip_binder().kind().bound_vars().len() + defs.count(),
- );
- bound_vars.extend(bound.skip_binder().kind().bound_vars().into_iter());
- InternalSubsts::fill_single(&mut substs, defs, &mut |param, _| match param
- .kind
- {
- GenericParamDefKind::Type { .. } => {
- let kind = ty::BoundTyKind::Param(param.def_id, param.name);
- let bound_var = ty::BoundVariableKind::Ty(kind);
- bound_vars.push(bound_var);
- Ty::new_bound(
- tcx,
- ty::INNERMOST,
- ty::BoundTy {
- var: ty::BoundVar::from_usize(bound_vars.len() - 1),
- kind,
- },
- )
- .into()
- }
- GenericParamDefKind::Lifetime => {
- let kind = ty::BoundRegionKind::BrNamed(param.def_id, param.name);
- let bound_var = ty::BoundVariableKind::Region(kind);
- bound_vars.push(bound_var);
- ty::Region::new_late_bound(
- tcx,
- ty::INNERMOST,
- ty::BoundRegion {
- var: ty::BoundVar::from_usize(bound_vars.len() - 1),
- kind,
- },
- )
- .into()
- }
- GenericParamDefKind::Const { .. } => {
- let bound_var = ty::BoundVariableKind::Const;
- bound_vars.push(bound_var);
- ty::Const::new_bound(
- tcx,
- ty::INNERMOST,
- ty::BoundVar::from_usize(bound_vars.len() - 1),
- tcx.type_of(param.def_id)
- .no_bound_vars()
- .expect("const parameter types cannot be generic"),
- )
- .into()
- }
- });
- let bound_vars = tcx.mk_bound_variable_kinds(&bound_vars);
- let assoc_ty_substs = tcx.mk_substs(&substs);
- let bound =
- bound.map_bound(|b| b.kind().skip_binder()).subst(tcx, assoc_ty_substs);
- ty::Binder::bind_with_vars(bound, bound_vars).to_predicate(tcx)
- };
+ let subst_bound = if defs.count() == 0 {
+ bound.instantiate(tcx, trait_predicate.trait_ref.args)
+ } else {
+ let mut args = smallvec::SmallVec::with_capacity(defs.count());
+ args.extend(trait_predicate.trait_ref.args.iter());
+ let mut bound_vars: smallvec::SmallVec<[ty::BoundVariableKind; 8]> =
+ smallvec::SmallVec::with_capacity(
+ bound.skip_binder().kind().bound_vars().len() + defs.count(),
+ );
+ bound_vars.extend(bound.skip_binder().kind().bound_vars().into_iter());
+ GenericArgs::fill_single(&mut args, defs, &mut |param, _| match param.kind {
+ GenericParamDefKind::Type { .. } => {
+ let kind = ty::BoundTyKind::Param(param.def_id, param.name);
+ let bound_var = ty::BoundVariableKind::Ty(kind);
+ bound_vars.push(bound_var);
+ Ty::new_bound(
+ tcx,
+ ty::INNERMOST,
+ ty::BoundTy {
+ var: ty::BoundVar::from_usize(bound_vars.len() - 1),
+ kind,
+ },
+ )
+ .into()
+ }
+ GenericParamDefKind::Lifetime => {
+ let kind = ty::BoundRegionKind::BrNamed(param.def_id, param.name);
+ let bound_var = ty::BoundVariableKind::Region(kind);
+ bound_vars.push(bound_var);
+ ty::Region::new_late_bound(
+ tcx,
+ ty::INNERMOST,
+ ty::BoundRegion {
+ var: ty::BoundVar::from_usize(bound_vars.len() - 1),
+ kind,
+ },
+ )
+ .into()
+ }
+ GenericParamDefKind::Const { .. } => {
+ let bound_var = ty::BoundVariableKind::Const;
+ bound_vars.push(bound_var);
+ ty::Const::new_bound(
+ tcx,
+ ty::INNERMOST,
+ ty::BoundVar::from_usize(bound_vars.len() - 1),
+ tcx.type_of(param.def_id)
+ .no_bound_vars()
+ .expect("const parameter types cannot be generic"),
+ )
+ .into()
+ }
+ });
+ let bound_vars = tcx.mk_bound_variable_kinds(&bound_vars);
+ let assoc_ty_args = tcx.mk_args(&args);
+ let bound =
+ bound.map_bound(|b| b.kind().skip_binder()).instantiate(tcx, assoc_ty_args);
+ ty::Binder::bind_with_vars(bound, bound_vars).to_predicate(tcx)
+ };
let normalized_bound = normalize_with_depth_to(
self,
obligation.param_env,
@@ -650,22 +633,20 @@ impl<'cx, 'tcx> SelectionContext<'cx, 'tcx> {
(unnormalized_upcast_trait_ref, ty::Binder::dummy(object_trait_ref)),
);
- Ok(ImplSourceObjectData { vtable_base, nested })
+ Ok(ImplSource::Builtin(BuiltinImplSource::Object { vtable_base: vtable_base }, nested))
}
fn confirm_fn_pointer_candidate(
&mut self,
obligation: &PolyTraitObligation<'tcx>,
- is_const: bool,
+ // FIXME(effects)
+ _is_const: bool,
) -> Result<Vec<PredicateObligation<'tcx>>, SelectionError<'tcx>> {
debug!(?obligation, "confirm_fn_pointer_candidate");
let tcx = self.tcx();
- let Some(self_ty) = self
- .infcx
- .shallow_resolve(obligation.self_ty().no_bound_vars()) else
- {
+ let Some(self_ty) = self.infcx.shallow_resolve(obligation.self_ty().no_bound_vars()) else {
// FIXME: Ideally we'd support `for<'a> fn(&'a ()): Fn(&'a ())`,
// but we do not currently. Luckily, such a bound is not
// particularly useful, so we don't expect users to write
@@ -686,16 +667,6 @@ impl<'cx, 'tcx> SelectionContext<'cx, 'tcx> {
let mut nested = self.confirm_poly_trait_refs(obligation, trait_ref)?;
let cause = obligation.derived_cause(BuiltinDerivedObligation);
- if obligation.is_const() && !is_const {
- // function is a trait method
- if let ty::FnDef(def_id, substs) = self_ty.kind() && let Some(trait_id) = tcx.trait_of_item(*def_id) {
- let trait_ref = TraitRef::from_method(tcx, trait_id, *substs);
- let poly_trait_pred = Binder::dummy(trait_ref).with_constness(ty::BoundConstness::ConstIfConst);
- let obligation = Obligation::new(tcx, cause.clone(), obligation.param_env, poly_trait_pred);
- nested.push(obligation);
- }
- }
-
// Confirm the `type Output: Sized;` bound that is present on `FnOnce`
let output_ty = self.infcx.instantiate_binder_with_placeholders(sig.output());
let output_ty = normalize_with_depth_to(
@@ -721,14 +692,14 @@ impl<'cx, 'tcx> SelectionContext<'cx, 'tcx> {
let predicate = self.infcx.instantiate_binder_with_placeholders(obligation.predicate);
let trait_ref = predicate.trait_ref;
let trait_def_id = trait_ref.def_id;
- let substs = trait_ref.substs;
+ let args = trait_ref.args;
let trait_obligations = self.impl_or_trait_obligations(
&obligation.cause,
obligation.recursion_depth,
obligation.param_env,
trait_def_id,
- &substs,
+ &args,
obligation.predicate,
);
@@ -741,17 +712,17 @@ impl<'cx, 'tcx> SelectionContext<'cx, 'tcx> {
&mut self,
obligation: &PolyTraitObligation<'tcx>,
) -> Result<Vec<PredicateObligation<'tcx>>, SelectionError<'tcx>> {
- // Okay to skip binder because the substs on generator types never
+ // Okay to skip binder because the args on generator types never
// touch bound regions, they just capture the in-scope
// type/region parameters.
let self_ty = self.infcx.shallow_resolve(obligation.self_ty().skip_binder());
- let ty::Generator(generator_def_id, substs, _) = *self_ty.kind() else {
+ let ty::Generator(generator_def_id, args, _) = *self_ty.kind() else {
bug!("closure candidate for non-closure {:?}", obligation);
};
- debug!(?obligation, ?generator_def_id, ?substs, "confirm_generator_candidate");
+ debug!(?obligation, ?generator_def_id, ?args, "confirm_generator_candidate");
- let gen_sig = substs.as_generator().poly_sig();
+ let gen_sig = args.as_generator().poly_sig();
// NOTE: The self-type is a generator type and hence is
// in fact unparameterized (or at least does not reference any
@@ -780,17 +751,17 @@ impl<'cx, 'tcx> SelectionContext<'cx, 'tcx> {
&mut self,
obligation: &PolyTraitObligation<'tcx>,
) -> Result<Vec<PredicateObligation<'tcx>>, SelectionError<'tcx>> {
- // Okay to skip binder because the substs on generator types never
+ // Okay to skip binder because the args on generator types never
// touch bound regions, they just capture the in-scope
// type/region parameters.
let self_ty = self.infcx.shallow_resolve(obligation.self_ty().skip_binder());
- let ty::Generator(generator_def_id, substs, _) = *self_ty.kind() else {
+ let ty::Generator(generator_def_id, args, _) = *self_ty.kind() else {
bug!("closure candidate for non-closure {:?}", obligation);
};
- debug!(?obligation, ?generator_def_id, ?substs, "confirm_future_candidate");
+ debug!(?obligation, ?generator_def_id, ?args, "confirm_future_candidate");
- let gen_sig = substs.as_generator().poly_sig();
+ let gen_sig = args.as_generator().poly_sig();
let trait_ref = super::util::future_trait_ref_and_outputs(
self.tcx(),
@@ -816,22 +787,22 @@ impl<'cx, 'tcx> SelectionContext<'cx, 'tcx> {
.fn_trait_kind_from_def_id(obligation.predicate.def_id())
.unwrap_or_else(|| bug!("closure candidate for non-fn trait {:?}", obligation));
- // Okay to skip binder because the substs on closure types never
+ // Okay to skip binder because the args on closure types never
// touch bound regions, they just capture the in-scope
// type/region parameters.
let self_ty = self.infcx.shallow_resolve(obligation.self_ty().skip_binder());
- let ty::Closure(closure_def_id, substs) = *self_ty.kind() else {
+ let ty::Closure(closure_def_id, args) = *self_ty.kind() else {
bug!("closure candidate for non-closure {:?}", obligation);
};
- let trait_ref = self.closure_trait_ref_unnormalized(obligation, substs);
+ let trait_ref = self.closure_trait_ref_unnormalized(obligation, args);
let mut nested = self.confirm_poly_trait_refs(obligation, trait_ref)?;
debug!(?closure_def_id, ?trait_ref, ?nested, "confirm closure candidate obligations");
nested.push(obligation.with(
self.tcx(),
- ty::Binder::dummy(ty::PredicateKind::ClosureKind(closure_def_id, substs, kind)),
+ ty::Binder::dummy(ty::PredicateKind::ClosureKind(closure_def_id, args, kind)),
));
Ok(nested)
@@ -902,73 +873,32 @@ impl<'cx, 'tcx> SelectionContext<'cx, 'tcx> {
&mut self,
obligation: &PolyTraitObligation<'tcx>,
idx: usize,
- ) -> Result<ImplSourceTraitUpcastingData<PredicateObligation<'tcx>>, SelectionError<'tcx>> {
+ ) -> Result<ImplSource<'tcx, PredicateObligation<'tcx>>, SelectionError<'tcx>> {
let tcx = self.tcx();
// `assemble_candidates_for_unsizing` should ensure there are no late-bound
// regions here. See the comment there for more details.
- let source = self.infcx.shallow_resolve(obligation.self_ty().no_bound_vars().unwrap());
- let target = obligation.predicate.skip_binder().trait_ref.substs.type_at(1);
- let target = self.infcx.shallow_resolve(target);
-
- debug!(?source, ?target, "confirm_trait_upcasting_unsize_candidate");
-
- let mut nested = vec![];
- let source_trait_ref;
- let upcast_trait_ref;
- match (source.kind(), target.kind()) {
- // TraitA+Kx+'a -> TraitB+Ky+'b (trait upcasting coercion).
- (
- &ty::Dynamic(ref data_a, r_a, repr_a @ ty::Dyn),
- &ty::Dynamic(ref data_b, r_b, ty::Dyn),
- ) => {
- // See `assemble_candidates_for_unsizing` for more info.
- // We already checked the compatibility of auto traits within `assemble_candidates_for_unsizing`.
- let principal_a = data_a.principal().unwrap();
- source_trait_ref = principal_a.with_self_ty(tcx, source);
- upcast_trait_ref = util::supertraits(tcx, source_trait_ref).nth(idx).unwrap();
- assert_eq!(data_b.principal_def_id(), Some(upcast_trait_ref.def_id()));
- let existential_predicate = upcast_trait_ref.map_bound(|trait_ref| {
- ty::ExistentialPredicate::Trait(ty::ExistentialTraitRef::erase_self_ty(
- tcx, trait_ref,
- ))
- });
- let iter = Some(existential_predicate)
- .into_iter()
- .chain(
- data_a
- .projection_bounds()
- .map(|b| b.map_bound(ty::ExistentialPredicate::Projection)),
- )
- .chain(
- data_b
- .auto_traits()
- .map(ty::ExistentialPredicate::AutoTrait)
- .map(ty::Binder::dummy),
- );
- let existential_predicates = tcx.mk_poly_existential_predicates_from_iter(iter);
- let source_trait = Ty::new_dynamic(tcx, existential_predicates, r_b, repr_a);
-
- // Require that the traits involved in this upcast are **equal**;
- // only the **lifetime bound** is changed.
- let InferOk { obligations, .. } = self
- .infcx
- .at(&obligation.cause, obligation.param_env)
- .sup(DefineOpaqueTypes::No, target, source_trait)
- .map_err(|_| Unimplemented)?;
- nested.extend(obligations);
-
- let outlives = ty::OutlivesPredicate(r_a, r_b);
- nested.push(Obligation::with_depth(
- tcx,
- obligation.cause.clone(),
- obligation.recursion_depth + 1,
- obligation.param_env,
- obligation.predicate.rebind(outlives),
- ));
- }
- _ => bug!(),
- };
+ let predicate = obligation.predicate.no_bound_vars().unwrap();
+ let a_ty = self.infcx.shallow_resolve(predicate.self_ty());
+ let b_ty = self.infcx.shallow_resolve(predicate.trait_ref.args.type_at(1));
+
+ let ty::Dynamic(a_data, a_region, ty::Dyn) = *a_ty.kind() else { bug!() };
+ let ty::Dynamic(b_data, b_region, ty::Dyn) = *b_ty.kind() else { bug!() };
+
+ let source_principal = a_data.principal().unwrap().with_self_ty(tcx, a_ty);
+ let unnormalized_upcast_principal =
+ util::supertraits(tcx, source_principal).nth(idx).unwrap();
+
+ let nested = self
+ .match_upcast_principal(
+ obligation,
+ unnormalized_upcast_principal,
+ a_data,
+ b_data,
+ a_region,
+ b_region,
+ )?
+ .expect("did not expect ambiguity during confirmation");
let vtable_segment_callback = {
let mut vptr_offset = 0;
@@ -979,7 +909,7 @@ impl<'cx, 'tcx> SelectionContext<'cx, 'tcx> {
}
VtblSegment::TraitOwnEntries { trait_ref, emit_vptr } => {
vptr_offset += count_own_vtable_entries(tcx, trait_ref);
- if trait_ref == upcast_trait_ref {
+ if trait_ref == unnormalized_upcast_principal {
if emit_vptr {
return ControlFlow::Break(Some(vptr_offset));
} else {
@@ -997,27 +927,25 @@ impl<'cx, 'tcx> SelectionContext<'cx, 'tcx> {
};
let vtable_vptr_slot =
- prepare_vtable_segments(tcx, source_trait_ref, vtable_segment_callback).unwrap();
+ prepare_vtable_segments(tcx, source_principal, vtable_segment_callback).unwrap();
- Ok(ImplSourceTraitUpcastingData { vtable_vptr_slot, nested })
+ Ok(ImplSource::Builtin(BuiltinImplSource::TraitUpcasting { vtable_vptr_slot }, nested))
}
fn confirm_builtin_unsize_candidate(
&mut self,
obligation: &PolyTraitObligation<'tcx>,
- ) -> Result<Vec<PredicateObligation<'tcx>>, SelectionError<'tcx>> {
+ ) -> Result<ImplSource<'tcx, PredicateObligation<'tcx>>, SelectionError<'tcx>> {
let tcx = self.tcx();
// `assemble_candidates_for_unsizing` should ensure there are no late-bound
// regions here. See the comment there for more details.
let source = self.infcx.shallow_resolve(obligation.self_ty().no_bound_vars().unwrap());
- let target = obligation.predicate.skip_binder().trait_ref.substs.type_at(1);
+ let target = obligation.predicate.skip_binder().trait_ref.args.type_at(1);
let target = self.infcx.shallow_resolve(target);
-
debug!(?source, ?target, "confirm_builtin_unsize_candidate");
- let mut nested = vec![];
- match (source.kind(), target.kind()) {
+ Ok(match (source.kind(), target.kind()) {
// Trait+Kx+'a -> Trait+Ky+'b (auto traits and lifetime subtyping).
(&ty::Dynamic(ref data_a, r_a, dyn_a), &ty::Dynamic(ref data_b, r_b, dyn_b))
if dyn_a == dyn_b =>
@@ -1044,22 +972,23 @@ impl<'cx, 'tcx> SelectionContext<'cx, 'tcx> {
// Require that the traits involved in this upcast are **equal**;
// only the **lifetime bound** is changed.
- let InferOk { obligations, .. } = self
+ let InferOk { mut obligations, .. } = self
.infcx
.at(&obligation.cause, obligation.param_env)
.sup(DefineOpaqueTypes::No, target, source_trait)
.map_err(|_| Unimplemented)?;
- nested.extend(obligations);
// Register one obligation for 'a: 'b.
let outlives = ty::OutlivesPredicate(r_a, r_b);
- nested.push(Obligation::with_depth(
+ obligations.push(Obligation::with_depth(
tcx,
obligation.cause.clone(),
obligation.recursion_depth + 1,
obligation.param_env,
obligation.predicate.rebind(outlives),
));
+
+ ImplSource::Builtin(BuiltinImplSource::Misc, obligations)
}
// `T` -> `Trait`
@@ -1085,11 +1014,10 @@ impl<'cx, 'tcx> SelectionContext<'cx, 'tcx> {
// words, if the object type is `Foo + Send`, this would create an obligation for
// the `Send` check.)
// - Projection predicates
- nested.extend(
- data.iter().map(|predicate| {
- predicate_to_obligation(predicate.with_self_ty(tcx, source))
- }),
- );
+ let mut nested: Vec<_> = data
+ .iter()
+ .map(|predicate| predicate_to_obligation(predicate.with_self_ty(tcx, source)))
+ .collect();
// We can only make objects from sized types.
let tr = ty::TraitRef::from_lang_item(
@@ -1106,6 +1034,8 @@ impl<'cx, 'tcx> SelectionContext<'cx, 'tcx> {
nested.push(predicate_to_obligation(
ty::Binder::dummy(ty::ClauseKind::TypeOutlives(outlives)).to_predicate(tcx),
));
+
+ ImplSource::Builtin(BuiltinImplSource::Misc, nested)
}
// `[T; n]` -> `[T]`
@@ -1115,11 +1045,12 @@ impl<'cx, 'tcx> SelectionContext<'cx, 'tcx> {
.at(&obligation.cause, obligation.param_env)
.eq(DefineOpaqueTypes::No, b, a)
.map_err(|_| Unimplemented)?;
- nested.extend(obligations);
+
+ ImplSource::Builtin(BuiltinImplSource::Misc, obligations)
}
// `Struct<T>` -> `Struct<U>`
- (&ty::Adt(def, substs_a), &ty::Adt(_, substs_b)) => {
+ (&ty::Adt(def, args_a), &ty::Adt(_, args_b)) => {
let unsizing_params = tcx.unsizing_params_for_adt(def.did());
if unsizing_params.is_empty() {
return Err(Unimplemented);
@@ -1128,6 +1059,8 @@ impl<'cx, 'tcx> SelectionContext<'cx, 'tcx> {
let tail_field = def.non_enum_variant().tail();
let tail_field_ty = tcx.type_of(tail_field.did);
+ let mut nested = vec![];
+
// Extract `TailField<T>` and `TailField<U>` from `Struct<T>` and `Struct<U>`,
// normalizing in the process, since `type_of` returns something directly from
// astconv (which means it's un-normalized).
@@ -1136,7 +1069,7 @@ impl<'cx, 'tcx> SelectionContext<'cx, 'tcx> {
obligation.param_env,
obligation.cause.clone(),
obligation.recursion_depth + 1,
- tail_field_ty.subst(tcx, substs_a),
+ tail_field_ty.instantiate(tcx, args_a),
&mut nested,
);
let target_tail = normalize_with_depth_to(
@@ -1144,16 +1077,17 @@ impl<'cx, 'tcx> SelectionContext<'cx, 'tcx> {
obligation.param_env,
obligation.cause.clone(),
obligation.recursion_depth + 1,
- tail_field_ty.subst(tcx, substs_b),
+ tail_field_ty.instantiate(tcx, args_b),
&mut nested,
);
// Check that the source struct with the target's
// unsizing parameters is equal to the target.
- let substs = tcx.mk_substs_from_iter(substs_a.iter().enumerate().map(|(i, k)| {
- if unsizing_params.contains(i as u32) { substs_b[i] } else { k }
- }));
- let new_struct = Ty::new_adt(tcx, def, substs);
+ let args =
+ tcx.mk_args_from_iter(args_a.iter().enumerate().map(|(i, k)| {
+ if unsizing_params.contains(i as u32) { args_b[i] } else { k }
+ }));
+ let new_struct = Ty::new_adt(tcx, def, args);
let InferOk { obligations, .. } = self
.infcx
.at(&obligation.cause, obligation.param_env)
@@ -1171,6 +1105,8 @@ impl<'cx, 'tcx> SelectionContext<'cx, 'tcx> {
),
);
nested.push(tail_unsize_obligation);
+
+ ImplSource::Builtin(BuiltinImplSource::Misc, nested)
}
// `(.., T)` -> `(.., U)`
@@ -1185,25 +1121,24 @@ impl<'cx, 'tcx> SelectionContext<'cx, 'tcx> {
// last element is equal to the target.
let new_tuple =
Ty::new_tup_from_iter(tcx, a_mid.iter().copied().chain(iter::once(b_last)));
- let InferOk { obligations, .. } = self
+ let InferOk { mut obligations, .. } = self
.infcx
.at(&obligation.cause, obligation.param_env)
.eq(DefineOpaqueTypes::No, target, new_tuple)
.map_err(|_| Unimplemented)?;
- nested.extend(obligations);
// Add a nested `T: Unsize<U>` predicate.
let last_unsize_obligation = obligation.with(
tcx,
ty::TraitRef::new(tcx, obligation.predicate.def_id(), [a_last, b_last]),
);
- nested.push(last_unsize_obligation);
+ obligations.push(last_unsize_obligation);
+
+ ImplSource::Builtin(BuiltinImplSource::TupleUnsizing, obligations)
}
_ => bug!("source: {source}, target: {target}"),
- };
-
- Ok(nested)
+ })
}
fn confirm_const_destruct_candidate(
@@ -1212,7 +1147,8 @@ impl<'cx, 'tcx> SelectionContext<'cx, 'tcx> {
impl_def_id: Option<DefId>,
) -> Result<Vec<PredicateObligation<'tcx>>, SelectionError<'tcx>> {
// `~const Destruct` in a non-const environment is always trivially true, since our type is `Drop`
- if !obligation.is_const() {
+ // FIXME(effects)
+ if true {
return Ok(vec![]);
}
@@ -1233,8 +1169,8 @@ impl<'cx, 'tcx> SelectionContext<'cx, 'tcx> {
trait_pred.trait_ref.def_id = drop_trait;
trait_pred
});
- let substs = self.rematch_impl(impl_def_id, &new_obligation);
- debug!(?substs, "impl substs");
+ let args = self.rematch_impl(impl_def_id, &new_obligation);
+ debug!(?args, "impl args");
let cause = obligation.derived_cause(|derived| {
ImplDerivedObligation(Box::new(ImplDerivedObligationCause {
@@ -1247,7 +1183,7 @@ impl<'cx, 'tcx> SelectionContext<'cx, 'tcx> {
let obligations = ensure_sufficient_stack(|| {
self.vtable_impl(
impl_def_id,
- substs,
+ args,
&cause,
new_obligation.recursion_depth + 1,
new_obligation.param_env,
@@ -1259,7 +1195,7 @@ impl<'cx, 'tcx> SelectionContext<'cx, 'tcx> {
// We want to confirm the ADT's fields if we have an ADT
let mut stack = match *self_ty.skip_binder().kind() {
- ty::Adt(def, substs) => def.all_fields().map(|f| f.ty(tcx, substs)).collect(),
+ ty::Adt(def, args) => def.all_fields().map(|f| f.ty(tcx, args)).collect(),
_ => vec![self_ty.skip_binder()],
};
@@ -1292,20 +1228,20 @@ impl<'cx, 'tcx> SelectionContext<'cx, 'tcx> {
ty::Tuple(tys) => {
stack.extend(tys.iter());
}
- ty::Closure(_, substs) => {
- stack.push(substs.as_closure().tupled_upvars_ty());
+ ty::Closure(_, args) => {
+ stack.push(args.as_closure().tupled_upvars_ty());
}
- ty::Generator(_, substs, _) => {
- let generator = substs.as_generator();
+ ty::Generator(_, args, _) => {
+ let generator = args.as_generator();
stack.extend([generator.tupled_upvars_ty(), generator.witness()]);
}
ty::GeneratorWitness(tys) => {
stack.extend(tcx.erase_late_bound_regions(tys).to_vec());
}
- ty::GeneratorWitnessMIR(def_id, substs) => {
+ ty::GeneratorWitnessMIR(def_id, args) => {
let tcx = self.tcx();
stack.extend(tcx.generator_hidden_types(def_id).map(|bty| {
- let ty = bty.subst(tcx, substs);
+ let ty = bty.instantiate(tcx, args);
debug_assert!(!ty.has_late_bound_regions());
ty
}))
@@ -1314,6 +1250,7 @@ impl<'cx, 'tcx> SelectionContext<'cx, 'tcx> {
// If we have a projection type, make sure to normalize it so we replace it
// with a fresh infer variable
ty::Alias(ty::Projection | ty::Inherent, ..) => {
+ // FIXME(effects) this needs constness
let predicate = normalize_with_depth_to(
self,
obligation.param_env,
@@ -1326,7 +1263,6 @@ impl<'cx, 'tcx> SelectionContext<'cx, 'tcx> {
cause.span,
[nested_ty],
),
- constness: ty::BoundConstness::ConstIfConst,
polarity: ty::ImplPolarity::Positive,
}),
&mut nested,
@@ -1345,6 +1281,7 @@ impl<'cx, 'tcx> SelectionContext<'cx, 'tcx> {
// since it's either not `const Drop` (and we raise an error during selection),
// or it's an ADT (and we need to check for a custom impl during selection)
_ => {
+ // FIXME(effects) this needs constness
let predicate = self_ty.rebind(ty::TraitPredicate {
trait_ref: ty::TraitRef::from_lang_item(
self.tcx(),
@@ -1352,7 +1289,6 @@ impl<'cx, 'tcx> SelectionContext<'cx, 'tcx> {
cause.span,
[nested_ty],
),
- constness: ty::BoundConstness::ConstIfConst,
polarity: ty::ImplPolarity::Positive,
});
diff --git a/compiler/rustc_trait_selection/src/traits/select/mod.rs b/compiler/rustc_trait_selection/src/traits/select/mod.rs
index 7f31ab751..19385e2d7 100644
--- a/compiler/rustc_trait_selection/src/traits/select/mod.rs
+++ b/compiler/rustc_trait_selection/src/traits/select/mod.rs
@@ -40,7 +40,7 @@ use rustc_middle::mir::interpret::ErrorHandled;
use rustc_middle::ty::abstract_const::NotConstEvaluatable;
use rustc_middle::ty::fold::BottomUpFolder;
use rustc_middle::ty::relate::TypeRelation;
-use rustc_middle::ty::SubstsRef;
+use rustc_middle::ty::GenericArgsRef;
use rustc_middle::ty::{self, EarlyBinder, PolyProjectionPredicate, ToPolyTraitRef, ToPredicate};
use rustc_middle::ty::{Ty, TyCtxt, TypeFoldable, TypeVisitableExt};
use rustc_span::symbol::sym;
@@ -74,22 +74,21 @@ impl IntercrateAmbiguityCause {
match self {
IntercrateAmbiguityCause::DownstreamCrate { trait_desc, self_desc } => {
let self_desc = if let Some(ty) = self_desc {
- format!(" for type `{}`", ty)
+ format!(" for type `{ty}`")
} else {
String::new()
};
- format!("downstream crates may implement trait `{}`{}", trait_desc, self_desc)
+ format!("downstream crates may implement trait `{trait_desc}`{self_desc}")
}
IntercrateAmbiguityCause::UpstreamCrateUpdate { trait_desc, self_desc } => {
let self_desc = if let Some(ty) = self_desc {
- format!(" for type `{}`", ty)
+ format!(" for type `{ty}`")
} else {
String::new()
};
format!(
- "upstream crates may add a new impl of trait `{}`{} \
- in future versions",
- trait_desc, self_desc
+ "upstream crates may add a new impl of trait `{trait_desc}`{self_desc} \
+ in future versions"
)
}
IntercrateAmbiguityCause::ReservationImpl { message } => message.clone(),
@@ -119,6 +118,8 @@ pub struct SelectionContext<'cx, 'tcx> {
/// policy. In essence, canonicalized queries need their errors propagated
/// rather than immediately reported because we do not have accurate spans.
query_mode: TraitQueryMode,
+
+ treat_inductive_cycle: TreatInductiveCycleAs,
}
// A stack that walks back up the stack frame.
@@ -199,6 +200,27 @@ enum BuiltinImplConditions<'tcx> {
Ambiguous,
}
+#[derive(Copy, Clone)]
+pub enum TreatInductiveCycleAs {
+ /// This is the previous behavior, where `Recur` represents an inductive
+ /// cycle that is known not to hold. This is not forwards-compatible with
+ /// coinduction, and will be deprecated. This is the default behavior
+ /// of the old trait solver due to back-compat reasons.
+ Recur,
+ /// This is the behavior of the new trait solver, where inductive cycles
+ /// are treated as ambiguous and possibly holding.
+ Ambig,
+}
+
+impl From<TreatInductiveCycleAs> for EvaluationResult {
+ fn from(treat: TreatInductiveCycleAs) -> EvaluationResult {
+ match treat {
+ TreatInductiveCycleAs::Ambig => EvaluatedToUnknown,
+ TreatInductiveCycleAs::Recur => EvaluatedToRecur,
+ }
+ }
+}
+
impl<'cx, 'tcx> SelectionContext<'cx, 'tcx> {
pub fn new(infcx: &'cx InferCtxt<'tcx>) -> SelectionContext<'cx, 'tcx> {
SelectionContext {
@@ -206,9 +228,26 @@ impl<'cx, 'tcx> SelectionContext<'cx, 'tcx> {
freshener: infcx.freshener(),
intercrate_ambiguity_causes: None,
query_mode: TraitQueryMode::Standard,
+ treat_inductive_cycle: TreatInductiveCycleAs::Recur,
}
}
+ // Sets the `TreatInductiveCycleAs` mode temporarily in the selection context
+ pub fn with_treat_inductive_cycle_as<T>(
+ &mut self,
+ treat_inductive_cycle: TreatInductiveCycleAs,
+ f: impl FnOnce(&mut Self) -> T,
+ ) -> T {
+ // Should be executed in a context where caching is disabled,
+ // otherwise the cache is poisoned with the temporary result.
+ assert!(self.is_intercrate());
+ let treat_inductive_cycle =
+ std::mem::replace(&mut self.treat_inductive_cycle, treat_inductive_cycle);
+ let value = f(self);
+ self.treat_inductive_cycle = treat_inductive_cycle;
+ value
+ }
+
pub fn with_query_mode(
infcx: &'cx InferCtxt<'tcx>,
query_mode: TraitQueryMode,
@@ -720,7 +759,7 @@ impl<'cx, 'tcx> SelectionContext<'cx, 'tcx> {
stack.update_reached_depth(stack_arg.1);
return Ok(EvaluatedToOk);
} else {
- return Ok(EvaluatedToRecur);
+ return Ok(self.treat_inductive_cycle.into());
}
}
return Ok(EvaluatedToOk);
@@ -838,13 +877,13 @@ impl<'cx, 'tcx> SelectionContext<'cx, 'tcx> {
}
}
ProjectAndUnifyResult::FailedNormalization => Ok(EvaluatedToAmbig),
- ProjectAndUnifyResult::Recursive => Ok(EvaluatedToRecur),
+ ProjectAndUnifyResult::Recursive => Ok(self.treat_inductive_cycle.into()),
ProjectAndUnifyResult::MismatchedProjectionTypes(_) => Ok(EvaluatedToErr),
}
}
- ty::PredicateKind::ClosureKind(_, closure_substs, kind) => {
- match self.infcx.closure_kind(closure_substs) {
+ ty::PredicateKind::ClosureKind(_, closure_args, kind) => {
+ match self.infcx.closure_kind(closure_args) {
Some(closure_kind) => {
if closure_kind.extends(kind) {
Ok(EvaluatedToOk)
@@ -895,7 +934,7 @@ impl<'cx, 'tcx> SelectionContext<'cx, 'tcx> {
.infcx
.at(&obligation.cause, obligation.param_env)
.trace(c1, c2)
- .eq(DefineOpaqueTypes::No, a.substs, b.substs)
+ .eq(DefineOpaqueTypes::No, a.args, b.args)
{
return self.evaluate_predicates_recursively(
previous_stack,
@@ -1000,13 +1039,8 @@ impl<'cx, 'tcx> SelectionContext<'cx, 'tcx> {
}
let stack = self.push_stack(previous_stack, &obligation);
- let mut fresh_trait_pred = stack.fresh_trait_pred;
- let mut param_env = obligation.param_env;
-
- fresh_trait_pred = fresh_trait_pred.map_bound(|mut pred| {
- pred.remap_constness(&mut param_env);
- pred
- });
+ let fresh_trait_pred = stack.fresh_trait_pred;
+ let param_env = obligation.param_env;
debug!(?fresh_trait_pred);
@@ -1157,7 +1191,7 @@ impl<'cx, 'tcx> SelectionContext<'cx, 'tcx> {
Some(EvaluatedToOk)
} else {
debug!("evaluate_stack --> recursive, inductive");
- Some(EvaluatedToRecur)
+ Some(self.treat_inductive_cycle.into())
}
} else {
None
@@ -1194,7 +1228,7 @@ impl<'cx, 'tcx> SelectionContext<'cx, 'tcx> {
// terms of `Fn` etc, but we could probably make this more
// precise still.
let unbound_input_types =
- stack.fresh_trait_pred.skip_binder().trait_ref.substs.types().any(|ty| ty.is_fresh());
+ stack.fresh_trait_pred.skip_binder().trait_ref.args.types().any(|ty| ty.is_fresh());
if unbound_input_types
&& stack.iter().skip(1).any(|prev| {
@@ -1386,8 +1420,8 @@ impl<'cx, 'tcx> SelectionContext<'cx, 'tcx> {
(result, dep_node)
}
- /// filter_impls filters constant trait obligations and candidates that have a positive impl
- /// for a negative goal and a negative impl for a positive goal
+ /// filter_impls filters candidates that have a positive impl for a negative
+ /// goal and a negative impl for a positive goal
#[instrument(level = "debug", skip(self, candidates))]
fn filter_impls(
&mut self,
@@ -1399,42 +1433,6 @@ impl<'cx, 'tcx> SelectionContext<'cx, 'tcx> {
let mut result = Vec::with_capacity(candidates.len());
for candidate in candidates {
- // Respect const trait obligations
- if obligation.is_const() {
- match candidate {
- // const impl
- ImplCandidate(def_id) if tcx.constness(def_id) == hir::Constness::Const => {}
- // const param
- ParamCandidate(trait_pred) if trait_pred.is_const_if_const() => {}
- // const projection
- ProjectionCandidate(_, ty::BoundConstness::ConstIfConst)
- // auto trait impl
- | AutoImplCandidate
- // generator / future, this will raise error in other places
- // or ignore error with const_async_blocks feature
- | GeneratorCandidate
- | FutureCandidate
- // FnDef where the function is const
- | FnPointerCandidate { is_const: true }
- | ConstDestructCandidate(_)
- | ClosureCandidate { is_const: true } => {}
-
- FnPointerCandidate { is_const: false } => {
- if let ty::FnDef(def_id, _) = obligation.self_ty().skip_binder().kind() && tcx.trait_of_item(*def_id).is_some() {
- // Trait methods are not seen as const unless the trait is implemented as const.
- // We do not filter that out in here, but nested obligations will be needed to confirm this.
- } else {
- continue
- }
- }
-
- _ => {
- // reject all other types of candidates
- continue;
- }
- }
- }
-
if let ImplCandidate(def_id) = candidate {
if ty::ImplPolarity::Reservation == tcx.impl_polarity(def_id)
|| obligation.polarity() == tcx.impl_polarity(def_id)
@@ -1487,7 +1485,7 @@ impl<'cx, 'tcx> SelectionContext<'cx, 'tcx> {
fn is_knowable<'o>(&mut self, stack: &TraitObligationStack<'o, 'tcx>) -> Result<(), Conflict> {
debug!("is_knowable(intercrate={:?})", self.is_intercrate());
- if !self.is_intercrate() || stack.obligation.polarity() == ty::ImplPolarity::Negative {
+ if !self.is_intercrate() {
return Ok(());
}
@@ -1499,7 +1497,7 @@ impl<'cx, 'tcx> SelectionContext<'cx, 'tcx> {
// bound regions.
let trait_ref = predicate.skip_binder().trait_ref;
- coherence::trait_ref_is_knowable(self.tcx(), trait_ref)
+ coherence::trait_ref_is_knowable::<!>(self.tcx(), trait_ref, |ty| Ok(ty)).unwrap()
}
/// Returns `true` if the global caches can be used.
@@ -1528,7 +1526,7 @@ impl<'cx, 'tcx> SelectionContext<'cx, 'tcx> {
fn check_candidate_cache(
&mut self,
- mut param_env: ty::ParamEnv<'tcx>,
+ param_env: ty::ParamEnv<'tcx>,
cache_fresh_trait_pred: ty::PolyTraitPredicate<'tcx>,
) -> Option<SelectionResult<'tcx, SelectionCandidate<'tcx>>> {
// Neither the global nor local cache is aware of intercrate
@@ -1539,8 +1537,7 @@ impl<'cx, 'tcx> SelectionContext<'cx, 'tcx> {
return None;
}
let tcx = self.tcx();
- let mut pred = cache_fresh_trait_pred.skip_binder();
- pred.remap_constness(&mut param_env);
+ let pred = cache_fresh_trait_pred.skip_binder();
if self.can_use_global_caches(param_env) {
if let Some(res) = tcx.selection_cache.get(&(param_env, pred), tcx) {
@@ -1586,15 +1583,13 @@ impl<'cx, 'tcx> SelectionContext<'cx, 'tcx> {
#[instrument(skip(self, param_env, cache_fresh_trait_pred, dep_node), level = "debug")]
fn insert_candidate_cache(
&mut self,
- mut param_env: ty::ParamEnv<'tcx>,
+ param_env: ty::ParamEnv<'tcx>,
cache_fresh_trait_pred: ty::PolyTraitPredicate<'tcx>,
dep_node: DepNodeIndex,
candidate: SelectionResult<'tcx, SelectionCandidate<'tcx>>,
) {
let tcx = self.tcx();
- let mut pred = cache_fresh_trait_pred.skip_binder();
-
- pred.remap_constness(&mut param_env);
+ let pred = cache_fresh_trait_pred.skip_binder();
if !self.can_cache_candidate(&candidate) {
debug!(?pred, ?candidate, "insert_candidate_cache - candidate is not cacheable");
@@ -1628,16 +1623,16 @@ impl<'cx, 'tcx> SelectionContext<'cx, 'tcx> {
fn match_projection_obligation_against_definition_bounds(
&mut self,
obligation: &PolyTraitObligation<'tcx>,
- ) -> smallvec::SmallVec<[(usize, ty::BoundConstness); 2]> {
+ ) -> smallvec::SmallVec<[usize; 2]> {
let poly_trait_predicate = self.infcx.resolve_vars_if_possible(obligation.predicate);
let placeholder_trait_predicate =
self.infcx.instantiate_binder_with_placeholders(poly_trait_predicate);
debug!(?placeholder_trait_predicate);
let tcx = self.infcx.tcx;
- let (def_id, substs) = match *placeholder_trait_predicate.trait_ref.self_ty().kind() {
- ty::Alias(ty::Projection | ty::Opaque, ty::AliasTy { def_id, substs, .. }) => {
- (def_id, substs)
+ let (def_id, args) = match *placeholder_trait_predicate.trait_ref.self_ty().kind() {
+ ty::Alias(ty::Projection | ty::Opaque, ty::AliasTy { def_id, args, .. }) => {
+ (def_id, args)
}
_ => {
span_bug!(
@@ -1648,7 +1643,7 @@ impl<'cx, 'tcx> SelectionContext<'cx, 'tcx> {
);
}
};
- let bounds = tcx.item_bounds(def_id).subst(tcx, substs);
+ let bounds = tcx.item_bounds(def_id).instantiate(tcx, args);
// The bounds returned by `item_bounds` may contain duplicates after
// normalization, so try to deduplicate when possible to avoid
@@ -1677,7 +1672,7 @@ impl<'cx, 'tcx> SelectionContext<'cx, 'tcx> {
_ => false,
}
}) {
- return Some((idx, pred.constness));
+ return Some(idx);
}
}
None
@@ -1785,11 +1780,11 @@ impl<'cx, 'tcx> SelectionContext<'cx, 'tcx> {
if is_match {
let generics = self.tcx().generics_of(obligation.predicate.def_id);
// FIXME(generic-associated-types): Addresses aggressive inference in #92917.
- // If this type is a GAT, and of the GAT substs resolve to something new,
+ // If this type is a GAT, and of the GAT args resolve to something new,
// that means that we must have newly inferred something about the GAT.
// We should give up in that case.
if !generics.params.is_empty()
- && obligation.predicate.substs[generics.parent_count..]
+ && obligation.predicate.args[generics.parent_count..]
.iter()
.any(|&p| p.has_non_region_infer() && self.infcx.shallow_resolve(p) != p)
{
@@ -1827,6 +1822,7 @@ impl<'tcx> SelectionContext<'_, 'tcx> {
/// candidates and prefer where-clause candidates.
///
/// See the comment for "SelectionCandidate" for more details.
+ #[instrument(level = "debug", skip(self))]
fn candidate_should_be_dropped_in_favor_of(
&mut self,
victim: &EvaluatedCandidate<'tcx>,
@@ -1850,13 +1846,6 @@ impl<'tcx> SelectionContext<'_, 'tcx> {
// This is a fix for #53123 and prevents winnowing from accidentally extending the
// lifetime of a variable.
match (&other.candidate, &victim.candidate) {
- (_, AutoImplCandidate) | (AutoImplCandidate, _) => {
- bug!(
- "default implementations shouldn't be recorded \
- when there are other valid candidates"
- );
- }
-
// FIXME(@jswrenn): this should probably be more sophisticated
(TransmutabilityCandidate, _) | (_, TransmutabilityCandidate) => DropVictim::No,
@@ -1871,7 +1860,6 @@ impl<'tcx> SelectionContext<'_, 'tcx> {
(ParamCandidate(other), ParamCandidate(victim)) => {
let same_except_bound_vars = other.skip_binder().trait_ref
== victim.skip_binder().trait_ref
- && other.skip_binder().constness == victim.skip_binder().constness
&& other.skip_binder().polarity == victim.skip_binder().polarity
&& !other.skip_binder().trait_ref.has_escaping_bound_vars();
if same_except_bound_vars {
@@ -1881,12 +1869,6 @@ impl<'tcx> SelectionContext<'_, 'tcx> {
// probably best characterized as a "hack", since we might prefer to just do our
// best to *not* create essentially duplicate candidates in the first place.
DropVictim::drop_if(other.bound_vars().len() <= victim.bound_vars().len())
- } else if other.skip_binder().trait_ref == victim.skip_binder().trait_ref
- && victim.skip_binder().constness == ty::BoundConstness::NotConst
- && other.skip_binder().polarity == victim.skip_binder().polarity
- {
- // Drop otherwise equivalent non-const candidates in favor of const candidates.
- DropVictim::Yes
} else {
DropVictim::No
}
@@ -1898,6 +1880,7 @@ impl<'tcx> SelectionContext<'_, 'tcx> {
(
ParamCandidate(ref other_cand),
ImplCandidate(..)
+ | AutoImplCandidate
| ClosureCandidate { .. }
| GeneratorCandidate
| FutureCandidate
@@ -1925,6 +1908,7 @@ impl<'tcx> SelectionContext<'_, 'tcx> {
}
(
ImplCandidate(_)
+ | AutoImplCandidate
| ClosureCandidate { .. }
| GeneratorCandidate
| FutureCandidate
@@ -1958,6 +1942,7 @@ impl<'tcx> SelectionContext<'_, 'tcx> {
(
ObjectCandidate(_) | ProjectionCandidate(..),
ImplCandidate(..)
+ | AutoImplCandidate
| ClosureCandidate { .. }
| GeneratorCandidate
| FutureCandidate
@@ -1971,6 +1956,7 @@ impl<'tcx> SelectionContext<'_, 'tcx> {
(
ImplCandidate(..)
+ | AutoImplCandidate
| ClosureCandidate { .. }
| GeneratorCandidate
| FutureCandidate
@@ -2061,6 +2047,19 @@ impl<'tcx> SelectionContext<'_, 'tcx> {
}
}
+ (AutoImplCandidate, ImplCandidate(_)) | (ImplCandidate(_), AutoImplCandidate) => {
+ DropVictim::No
+ }
+
+ (AutoImplCandidate, _) | (_, AutoImplCandidate) => {
+ bug!(
+ "default implementations shouldn't be recorded \
+ when there are other global candidates: {:?} {:?}",
+ other,
+ victim
+ );
+ }
+
// Everything else is ambiguous
(
ImplCandidate(_)
@@ -2127,13 +2126,13 @@ impl<'tcx> SelectionContext<'_, 'tcx> {
obligation.predicate.rebind(tys.last().map_or_else(Vec::new, |&last| vec![last])),
),
- ty::Adt(def, substs) => {
+ ty::Adt(def, args) => {
let sized_crit = def.sized_constraint(self.tcx());
// (*) binder moved here
Where(
obligation
.predicate
- .rebind(sized_crit.subst_iter_copied(self.tcx(), substs).collect()),
+ .rebind(sized_crit.iter_instantiated(self.tcx(), args).collect()),
)
}
@@ -2159,14 +2158,11 @@ impl<'tcx> SelectionContext<'_, 'tcx> {
use self::BuiltinImplConditions::{Ambiguous, None, Where};
match *self_ty.kind() {
- ty::Infer(ty::IntVar(_))
- | ty::Infer(ty::FloatVar(_))
- | ty::FnDef(..)
- | ty::FnPtr(_)
- | ty::Error(_) => Where(ty::Binder::dummy(Vec::new())),
+ ty::FnDef(..) | ty::FnPtr(_) | ty::Error(_) => Where(ty::Binder::dummy(Vec::new())),
ty::Uint(_)
| ty::Int(_)
+ | ty::Infer(ty::IntVar(_) | ty::FloatVar(_))
| ty::Bool
| ty::Float(_)
| ty::Char
@@ -2190,20 +2186,21 @@ impl<'tcx> SelectionContext<'_, 'tcx> {
Where(obligation.predicate.rebind(tys.iter().collect()))
}
- ty::Generator(_, substs, hir::Movability::Movable) => {
+ ty::Generator(_, args, hir::Movability::Movable) => {
if self.tcx().features().generator_clone {
let resolved_upvars =
- self.infcx.shallow_resolve(substs.as_generator().tupled_upvars_ty());
+ self.infcx.shallow_resolve(args.as_generator().tupled_upvars_ty());
let resolved_witness =
- self.infcx.shallow_resolve(substs.as_generator().witness());
+ self.infcx.shallow_resolve(args.as_generator().witness());
if resolved_upvars.is_ty_var() || resolved_witness.is_ty_var() {
// Not yet resolved.
Ambiguous
} else {
- let all = substs
+ let all = args
.as_generator()
.upvar_tys()
- .chain(iter::once(substs.as_generator().witness()))
+ .iter()
+ .chain([args.as_generator().witness()])
.collect::<Vec<_>>();
Where(obligation.predicate.rebind(all))
}
@@ -2227,24 +2224,24 @@ impl<'tcx> SelectionContext<'_, 'tcx> {
Where(ty::Binder::bind_with_vars(witness_tys.to_vec(), all_vars))
}
- ty::GeneratorWitnessMIR(def_id, ref substs) => {
+ ty::GeneratorWitnessMIR(def_id, ref args) => {
let hidden_types = bind_generator_hidden_types_above(
self.infcx,
def_id,
- substs,
+ args,
obligation.predicate.bound_vars(),
);
Where(hidden_types)
}
- ty::Closure(_, substs) => {
+ ty::Closure(_, args) => {
// (*) binder moved here
- let ty = self.infcx.shallow_resolve(substs.as_closure().tupled_upvars_ty());
+ let ty = self.infcx.shallow_resolve(args.as_closure().tupled_upvars_ty());
if let ty::Infer(ty::TyVar(_)) = ty.kind() {
// Not yet resolved.
Ambiguous
} else {
- Where(obligation.predicate.rebind(substs.as_closure().upvar_tys().collect()))
+ Where(obligation.predicate.rebind(args.as_closure().upvar_tys().to_vec()))
}
}
@@ -2321,14 +2318,14 @@ impl<'tcx> SelectionContext<'_, 'tcx> {
t.rebind(tys.iter().collect())
}
- ty::Closure(_, ref substs) => {
- let ty = self.infcx.shallow_resolve(substs.as_closure().tupled_upvars_ty());
+ ty::Closure(_, ref args) => {
+ let ty = self.infcx.shallow_resolve(args.as_closure().tupled_upvars_ty());
t.rebind(vec![ty])
}
- ty::Generator(_, ref substs, _) => {
- let ty = self.infcx.shallow_resolve(substs.as_generator().tupled_upvars_ty());
- let witness = substs.as_generator().witness();
+ ty::Generator(_, ref args, _) => {
+ let ty = self.infcx.shallow_resolve(args.as_generator().tupled_upvars_ty());
+ let witness = args.as_generator().witness();
t.rebind([ty].into_iter().chain(iter::once(witness)).collect())
}
@@ -2337,18 +2334,18 @@ impl<'tcx> SelectionContext<'_, 'tcx> {
types.map_bound(|types| types.to_vec())
}
- ty::GeneratorWitnessMIR(def_id, ref substs) => {
- bind_generator_hidden_types_above(self.infcx, def_id, substs, t.bound_vars())
+ ty::GeneratorWitnessMIR(def_id, ref args) => {
+ bind_generator_hidden_types_above(self.infcx, def_id, args, t.bound_vars())
}
// For `PhantomData<T>`, we pass `T`.
- ty::Adt(def, substs) if def.is_phantom_data() => t.rebind(substs.types().collect()),
+ ty::Adt(def, args) if def.is_phantom_data() => t.rebind(args.types().collect()),
- ty::Adt(def, substs) => {
- t.rebind(def.all_fields().map(|f| f.ty(self.tcx(), substs)).collect())
+ ty::Adt(def, args) => {
+ t.rebind(def.all_fields().map(|f| f.ty(self.tcx(), args)).collect())
}
- ty::Alias(ty::Opaque, ty::AliasTy { def_id, substs, .. }) => {
+ ty::Alias(ty::Opaque, ty::AliasTy { def_id, args, .. }) => {
let ty = self.tcx().type_of(def_id);
if ty.skip_binder().references_error() {
return Err(SelectionError::OpaqueTypeAutoTraitLeakageUnknown(def_id));
@@ -2356,7 +2353,7 @@ impl<'tcx> SelectionContext<'_, 'tcx> {
// We can resolve the `impl Trait` to its concrete type,
// which enforces a DAG between the functions requiring
// the auto trait bounds in question.
- t.rebind(vec![ty.subst(self.tcx(), substs)])
+ t.rebind(vec![ty.instantiate(self.tcx(), args)])
}
})
}
@@ -2428,13 +2425,13 @@ impl<'tcx> SelectionContext<'_, 'tcx> {
&mut self,
impl_def_id: DefId,
obligation: &PolyTraitObligation<'tcx>,
- ) -> Normalized<'tcx, SubstsRef<'tcx>> {
+ ) -> Normalized<'tcx, GenericArgsRef<'tcx>> {
let impl_trait_ref = self.tcx().impl_trait_ref(impl_def_id).unwrap();
match self.match_impl(impl_def_id, impl_trait_ref, obligation) {
- Ok(substs) => substs,
+ Ok(args) => args,
Err(()) => {
// FIXME: A rematch may fail when a candidate cache hit occurs
- // on thefreshened form of the trait predicate, but the match
+ // on the freshened form of the trait predicate, but the match
// fails for some reason that is not captured in the freshened
// cache key. For example, equating an impl trait ref against
// the placeholder trait ref may fail due the Generalizer relation
@@ -2443,11 +2440,10 @@ impl<'tcx> SelectionContext<'_, 'tcx> {
let guar = self.infcx.tcx.sess.delay_span_bug(
obligation.cause.span,
format!(
- "Impl {:?} was matchable against {:?} but now is not",
- impl_def_id, obligation
+ "Impl {impl_def_id:?} was matchable against {obligation:?} but now is not"
),
);
- let value = self.infcx.fresh_substs_for_item(obligation.cause.span, impl_def_id);
+ let value = self.infcx.fresh_args_for_item(obligation.cause.span, impl_def_id);
let err = Ty::new_error(self.tcx(), guar);
let value = value.fold_with(&mut BottomUpFolder {
tcx: self.tcx(),
@@ -2466,14 +2462,14 @@ impl<'tcx> SelectionContext<'_, 'tcx> {
impl_def_id: DefId,
impl_trait_ref: EarlyBinder<ty::TraitRef<'tcx>>,
obligation: &PolyTraitObligation<'tcx>,
- ) -> Result<Normalized<'tcx, SubstsRef<'tcx>>, ()> {
+ ) -> Result<Normalized<'tcx, GenericArgsRef<'tcx>>, ()> {
let placeholder_obligation =
self.infcx.instantiate_binder_with_placeholders(obligation.predicate);
let placeholder_obligation_trait_ref = placeholder_obligation.trait_ref;
- let impl_substs = self.infcx.fresh_substs_for_item(obligation.cause.span, impl_def_id);
+ let impl_args = self.infcx.fresh_args_for_item(obligation.cause.span, impl_def_id);
- let impl_trait_ref = impl_trait_ref.subst(self.tcx(), impl_substs);
+ let impl_trait_ref = impl_trait_ref.instantiate(self.tcx(), impl_args);
if impl_trait_ref.references_error() {
return Err(());
}
@@ -2515,7 +2511,99 @@ impl<'tcx> SelectionContext<'_, 'tcx> {
return Err(());
}
- Ok(Normalized { value: impl_substs, obligations: nested_obligations })
+ Ok(Normalized { value: impl_args, obligations: nested_obligations })
+ }
+
+ fn match_upcast_principal(
+ &mut self,
+ obligation: &PolyTraitObligation<'tcx>,
+ unnormalized_upcast_principal: ty::PolyTraitRef<'tcx>,
+ a_data: &'tcx ty::List<ty::PolyExistentialPredicate<'tcx>>,
+ b_data: &'tcx ty::List<ty::PolyExistentialPredicate<'tcx>>,
+ a_region: ty::Region<'tcx>,
+ b_region: ty::Region<'tcx>,
+ ) -> SelectionResult<'tcx, Vec<PredicateObligation<'tcx>>> {
+ let tcx = self.tcx();
+ let mut nested = vec![];
+
+ let upcast_principal = normalize_with_depth_to(
+ self,
+ obligation.param_env,
+ obligation.cause.clone(),
+ obligation.recursion_depth + 1,
+ unnormalized_upcast_principal,
+ &mut nested,
+ );
+
+ for bound in b_data {
+ match bound.skip_binder() {
+ // Check that a_ty's supertrait (upcast_principal) is compatible
+ // with the target (b_ty).
+ ty::ExistentialPredicate::Trait(target_principal) => {
+ nested.extend(
+ self.infcx
+ .at(&obligation.cause, obligation.param_env)
+ .sup(
+ DefineOpaqueTypes::No,
+ upcast_principal.map_bound(|trait_ref| {
+ ty::ExistentialTraitRef::erase_self_ty(tcx, trait_ref)
+ }),
+ bound.rebind(target_principal),
+ )
+ .map_err(|_| SelectionError::Unimplemented)?
+ .into_obligations(),
+ );
+ }
+ // Check that b_ty's projection is satisfied by exactly one of
+ // a_ty's projections. First, we look through the list to see if
+ // any match. If not, error. Then, if *more* than one matches, we
+ // return ambiguity. Otherwise, if exactly one matches, equate
+ // it with b_ty's projection.
+ ty::ExistentialPredicate::Projection(target_projection) => {
+ let target_projection = bound.rebind(target_projection);
+ let mut matching_projections =
+ a_data.projection_bounds().filter(|source_projection| {
+ // Eager normalization means that we can just use can_eq
+ // here instead of equating and processing obligations.
+ source_projection.item_def_id() == target_projection.item_def_id()
+ && self.infcx.can_eq(
+ obligation.param_env,
+ *source_projection,
+ target_projection,
+ )
+ });
+ let Some(source_projection) = matching_projections.next() else {
+ return Err(SelectionError::Unimplemented);
+ };
+ if matching_projections.next().is_some() {
+ return Ok(None);
+ }
+ nested.extend(
+ self.infcx
+ .at(&obligation.cause, obligation.param_env)
+ .sup(DefineOpaqueTypes::No, source_projection, target_projection)
+ .map_err(|_| SelectionError::Unimplemented)?
+ .into_obligations(),
+ );
+ }
+ // Check that b_ty's auto traits are present in a_ty's bounds.
+ ty::ExistentialPredicate::AutoTrait(def_id) => {
+ if !a_data.auto_traits().any(|source_def_id| source_def_id == def_id) {
+ return Err(SelectionError::Unimplemented);
+ }
+ }
+ }
+ }
+
+ nested.push(Obligation::with_depth(
+ tcx,
+ obligation.cause.clone(),
+ obligation.recursion_depth + 1,
+ obligation.param_env,
+ ty::Binder::dummy(ty::OutlivesPredicate(a_region, b_region)),
+ ));
+
+ Ok(Some(nested))
}
/// Normalize `where_clause_trait_ref` and try to match it against
@@ -2580,9 +2668,9 @@ impl<'tcx> SelectionContext<'_, 'tcx> {
fn closure_trait_ref_unnormalized(
&mut self,
obligation: &PolyTraitObligation<'tcx>,
- substs: SubstsRef<'tcx>,
+ args: GenericArgsRef<'tcx>,
) -> ty::PolyTraitRef<'tcx> {
- let closure_sig = substs.as_closure().sig();
+ let closure_sig = args.as_closure().sig();
debug!(?closure_sig);
@@ -2615,8 +2703,8 @@ impl<'tcx> SelectionContext<'_, 'tcx> {
cause: &ObligationCause<'tcx>,
recursion_depth: usize,
param_env: ty::ParamEnv<'tcx>,
- def_id: DefId, // of impl or trait
- substs: SubstsRef<'tcx>, // for impl or trait
+ def_id: DefId, // of impl or trait
+ args: GenericArgsRef<'tcx>, // for impl or trait
parent_trait_pred: ty::Binder<'tcx, ty::TraitPredicate<'tcx>>,
) -> Vec<PredicateObligation<'tcx>> {
let tcx = self.tcx();
@@ -2637,7 +2725,7 @@ impl<'tcx> SelectionContext<'_, 'tcx> {
// that order.
let predicates = tcx.predicates_of(def_id);
assert_eq!(predicates.parent, None);
- let predicates = predicates.instantiate_own(tcx, substs);
+ let predicates = predicates.instantiate_own(tcx, args);
let mut obligations = Vec::with_capacity(predicates.len());
for (index, (predicate, span)) in predicates.into_iter().enumerate() {
let cause =
@@ -2990,7 +3078,7 @@ pub enum ProjectionMatchesProjection {
fn bind_generator_hidden_types_above<'tcx>(
infcx: &InferCtxt<'tcx>,
def_id: DefId,
- substs: ty::SubstsRef<'tcx>,
+ args: ty::GenericArgsRef<'tcx>,
bound_vars: &ty::List<ty::BoundVariableKind>,
) -> ty::Binder<'tcx, Vec<Ty<'tcx>>> {
let tcx = infcx.tcx;
@@ -3006,7 +3094,7 @@ fn bind_generator_hidden_types_above<'tcx>(
// Deduplicate tys to avoid repeated work.
.filter(|bty| seen_tys.insert(*bty))
.map(|bty| {
- let mut ty = bty.subst(tcx, substs);
+ let mut ty = bty.instantiate(tcx, args);
// Only remap erased regions if we use them.
if considering_regions {
diff --git a/compiler/rustc_trait_selection/src/traits/specialize/mod.rs b/compiler/rustc_trait_selection/src/traits/specialize/mod.rs
index 96f128741..729cf2f33 100644
--- a/compiler/rustc_trait_selection/src/traits/specialize/mod.rs
+++ b/compiler/rustc_trait_selection/src/traits/specialize/mod.rs
@@ -23,7 +23,7 @@ use rustc_data_structures::fx::FxIndexSet;
use rustc_errors::{error_code, DelayDm, Diagnostic};
use rustc_hir::def_id::{DefId, LocalDefId};
use rustc_middle::ty::{self, ImplSubject, Ty, TyCtxt, TypeVisitableExt};
-use rustc_middle::ty::{InternalSubsts, SubstsRef};
+use rustc_middle::ty::{GenericArgs, GenericArgsRef};
use rustc_session::lint::builtin::COHERENCE_LEAK_CHECK;
use rustc_session::lint::builtin::ORDER_DEPENDENT_TRAIT_OBJECTS;
use rustc_span::{Span, DUMMY_SP};
@@ -48,7 +48,7 @@ pub struct OverlapError<'tcx> {
/// When we have selected one impl, but are actually using item definitions from
/// a parent impl providing a default, we need a way to translate between the
/// type parameters of the two impls. Here the `source_impl` is the one we've
-/// selected, and `source_substs` is a substitution of its generics.
+/// selected, and `source_args` is a substitution of its generics.
/// And `target_node` is the impl/trait we're actually going to get the
/// definition from. The resulting substitution will map from `target_node`'s
/// generics to `source_impl`'s generics as instantiated by `source_subst`.
@@ -76,51 +76,46 @@ pub struct OverlapError<'tcx> {
/// through associated type projection. We deal with such cases by using
/// *fulfillment* to relate the two impls, requiring that all projections are
/// resolved.
-pub fn translate_substs<'tcx>(
+pub fn translate_args<'tcx>(
infcx: &InferCtxt<'tcx>,
param_env: ty::ParamEnv<'tcx>,
source_impl: DefId,
- source_substs: SubstsRef<'tcx>,
+ source_args: GenericArgsRef<'tcx>,
target_node: specialization_graph::Node,
-) -> SubstsRef<'tcx> {
- translate_substs_with_cause(
- infcx,
- param_env,
- source_impl,
- source_substs,
- target_node,
- |_, _| ObligationCause::dummy(),
- )
+) -> GenericArgsRef<'tcx> {
+ translate_args_with_cause(infcx, param_env, source_impl, source_args, target_node, |_, _| {
+ ObligationCause::dummy()
+ })
}
-/// Like [translate_substs], but obligations from the parent implementation
+/// Like [translate_args], but obligations from the parent implementation
/// are registered with the provided `ObligationCause`.
///
/// This is for reporting *region* errors from those bounds. Type errors should
/// not happen because the specialization graph already checks for those, and
/// will result in an ICE.
-pub fn translate_substs_with_cause<'tcx>(
+pub fn translate_args_with_cause<'tcx>(
infcx: &InferCtxt<'tcx>,
param_env: ty::ParamEnv<'tcx>,
source_impl: DefId,
- source_substs: SubstsRef<'tcx>,
+ source_args: GenericArgsRef<'tcx>,
target_node: specialization_graph::Node,
cause: impl Fn(usize, Span) -> ObligationCause<'tcx>,
-) -> SubstsRef<'tcx> {
+) -> GenericArgsRef<'tcx> {
debug!(
- "translate_substs({:?}, {:?}, {:?}, {:?})",
- param_env, source_impl, source_substs, target_node
+ "translate_args({:?}, {:?}, {:?}, {:?})",
+ param_env, source_impl, source_args, target_node
);
let source_trait_ref =
- infcx.tcx.impl_trait_ref(source_impl).unwrap().subst(infcx.tcx, &source_substs);
+ infcx.tcx.impl_trait_ref(source_impl).unwrap().instantiate(infcx.tcx, &source_args);
// translate the Self and Param parts of the substitution, since those
// vary across impls
- let target_substs = match target_node {
+ let target_args = match target_node {
specialization_graph::Node::Impl(target_impl) => {
// no need to translate if we're targeting the impl we started with
if source_impl == target_impl {
- return source_substs;
+ return source_args;
}
fulfill_implication(infcx, param_env, source_trait_ref, source_impl, target_impl, cause)
@@ -131,11 +126,11 @@ pub fn translate_substs_with_cause<'tcx>(
)
})
}
- specialization_graph::Node::Trait(..) => source_trait_ref.substs,
+ specialization_graph::Node::Trait(..) => source_trait_ref.args,
};
// directly inherent the method generics, since those do not vary across impls
- source_substs.rebase_onto(infcx.tcx, source_impl, target_substs)
+ source_args.rebase_onto(infcx.tcx, source_impl, target_args)
}
/// Is `impl1` a specialization of `impl2`?
@@ -172,7 +167,7 @@ pub(super) fn specializes(tcx: TyCtxt<'_>, (impl1_def_id, impl2_def_id): (DefId,
// create a parameter environment corresponding to a (placeholder) instantiation of impl1
let penv = tcx.param_env(impl1_def_id);
- let impl1_trait_ref = tcx.impl_trait_ref(impl1_def_id).unwrap().subst_identity();
+ let impl1_trait_ref = tcx.impl_trait_ref(impl1_def_id).unwrap().instantiate_identity();
// Create an infcx, taking the predicates of impl1 as assumptions:
let infcx = tcx.infer_ctxt().build();
@@ -196,7 +191,7 @@ fn fulfill_implication<'tcx>(
source_impl: DefId,
target_impl: DefId,
error_cause: impl Fn(usize, Span) -> ObligationCause<'tcx>,
-) -> Result<SubstsRef<'tcx>, ()> {
+) -> Result<GenericArgsRef<'tcx>, ()> {
debug!(
"fulfill_implication({:?}, trait_ref={:?} |- {:?} applies)",
param_env, source_trait_ref, target_impl
@@ -221,18 +216,16 @@ fn fulfill_implication<'tcx>(
let source_trait = ImplSubject::Trait(source_trait_ref);
let selcx = &mut SelectionContext::new(&infcx);
- let target_substs = infcx.fresh_substs_for_item(DUMMY_SP, target_impl);
+ let target_args = infcx.fresh_args_for_item(DUMMY_SP, target_impl);
let (target_trait, obligations) =
- util::impl_subject_and_oblig(selcx, param_env, target_impl, target_substs, error_cause);
+ util::impl_subject_and_oblig(selcx, param_env, target_impl, target_args, error_cause);
// do the impls unify? If not, no specialization.
- let Ok(InferOk { obligations: more_obligations, .. }) =
- infcx.at(&ObligationCause::dummy(), param_env).eq(DefineOpaqueTypes::No, source_trait, target_trait)
+ let Ok(InferOk { obligations: more_obligations, .. }) = infcx
+ .at(&ObligationCause::dummy(), param_env)
+ .eq(DefineOpaqueTypes::No, source_trait, target_trait)
else {
- debug!(
- "fulfill_implication: {:?} does not unify with {:?}",
- source_trait, target_trait
- );
+ debug!("fulfill_implication: {:?} does not unify with {:?}", source_trait, target_trait);
return Err(());
};
@@ -261,7 +254,7 @@ fn fulfill_implication<'tcx>(
// Now resolve the *substitution* we built for the target earlier, replacing
// the inference variables inside with whatever we got from fulfillment.
- Ok(infcx.resolve_vars_if_possible(target_substs))
+ Ok(infcx.resolve_vars_if_possible(target_args))
}
/// Query provider for `specialization_graph_of`.
@@ -395,16 +388,16 @@ fn report_conflicting_impls<'tcx>(
impl_span,
format!(
"conflicting implementation{}",
- overlap.self_ty.map_or_else(String::new, |ty| format!(" for `{}`", ty))
+ overlap.self_ty.map_or_else(String::new, |ty| format!(" for `{ty}`"))
),
);
}
Err(cname) => {
let msg = match to_pretty_impl_header(tcx, overlap.with_impl) {
Some(s) => {
- format!("conflicting implementation in crate `{}`:\n- {}", cname, s)
+ format!("conflicting implementation in crate `{cname}`:\n- {s}")
}
- None => format!("conflicting implementation in crate `{}`", cname),
+ None => format!("conflicting implementation in crate `{cname}`"),
};
err.note(msg);
}
@@ -469,21 +462,21 @@ fn report_conflicting_impls<'tcx>(
pub(crate) fn to_pretty_impl_header(tcx: TyCtxt<'_>, impl_def_id: DefId) -> Option<String> {
use std::fmt::Write;
- let trait_ref = tcx.impl_trait_ref(impl_def_id)?.subst_identity();
+ let trait_ref = tcx.impl_trait_ref(impl_def_id)?.instantiate_identity();
let mut w = "impl".to_owned();
- let substs = InternalSubsts::identity_for_item(tcx, impl_def_id);
+ let args = GenericArgs::identity_for_item(tcx, impl_def_id);
// FIXME: Currently only handles ?Sized.
// Needs to support ?Move and ?DynSized when they are implemented.
let mut types_without_default_bounds = FxIndexSet::default();
let sized_trait = tcx.lang_items().sized_trait();
- if !substs.is_empty() {
- types_without_default_bounds.extend(substs.types());
+ if !args.is_empty() {
+ types_without_default_bounds.extend(args.types());
w.push('<');
w.push_str(
- &substs
+ &args
.iter()
.map(|k| k.to_string())
.filter(|k| k != "'_")
@@ -497,7 +490,7 @@ pub(crate) fn to_pretty_impl_header(tcx: TyCtxt<'_>, impl_def_id: DefId) -> Opti
w,
" {} for {}",
trait_ref.print_only_trait_path(),
- tcx.type_of(impl_def_id).subst_identity()
+ tcx.type_of(impl_def_id).instantiate_identity()
)
.unwrap();
@@ -507,22 +500,17 @@ pub(crate) fn to_pretty_impl_header(tcx: TyCtxt<'_>, impl_def_id: DefId) -> Opti
let mut pretty_predicates =
Vec::with_capacity(predicates.len() + types_without_default_bounds.len());
- for (mut p, _) in predicates {
+ for (p, _) in predicates {
if let Some(poly_trait_ref) = p.as_trait_clause() {
if Some(poly_trait_ref.def_id()) == sized_trait {
types_without_default_bounds.remove(&poly_trait_ref.self_ty().skip_binder());
continue;
}
-
- if ty::BoundConstness::ConstIfConst == poly_trait_ref.skip_binder().constness {
- p = p.without_const(tcx);
- }
}
pretty_predicates.push(p.to_string());
}
- pretty_predicates
- .extend(types_without_default_bounds.iter().map(|ty| format!("{}: ?Sized", ty)));
+ pretty_predicates.extend(types_without_default_bounds.iter().map(|ty| format!("{ty}: ?Sized")));
if !pretty_predicates.is_empty() {
write!(w, "\n where {}", pretty_predicates.join(", ")).unwrap();
diff --git a/compiler/rustc_trait_selection/src/traits/specialize/specialization_graph.rs b/compiler/rustc_trait_selection/src/traits/specialize/specialization_graph.rs
index aa5c624f4..e9a592bde 100644
--- a/compiler/rustc_trait_selection/src/traits/specialize/specialization_graph.rs
+++ b/compiler/rustc_trait_selection/src/traits/specialize/specialization_graph.rs
@@ -180,7 +180,7 @@ impl<'tcx> ChildrenExt<'tcx> for Children {
if le && !ge {
debug!(
"descending as child of TraitRef {:?}",
- tcx.impl_trait_ref(possible_sibling).unwrap().subst_identity()
+ tcx.impl_trait_ref(possible_sibling).unwrap().instantiate_identity()
);
// The impl specializes `possible_sibling`.
@@ -188,7 +188,7 @@ impl<'tcx> ChildrenExt<'tcx> for Children {
} else if ge && !le {
debug!(
"placing as parent of TraitRef {:?}",
- tcx.impl_trait_ref(possible_sibling).unwrap().subst_identity()
+ tcx.impl_trait_ref(possible_sibling).unwrap().instantiate_identity()
);
replace_children.push(possible_sibling);
diff --git a/compiler/rustc_trait_selection/src/traits/structural_match.rs b/compiler/rustc_trait_selection/src/traits/structural_match.rs
index 420f8c5dc..0864e4dc8 100644
--- a/compiler/rustc_trait_selection/src/traits/structural_match.rs
+++ b/compiler/rustc_trait_selection/src/traits/structural_match.rs
@@ -62,8 +62,8 @@ impl<'tcx> TypeVisitor<TyCtxt<'tcx>> for Search<'tcx> {
fn visit_ty(&mut self, ty: Ty<'tcx>) -> ControlFlow<Self::BreakTy> {
debug!("Search visiting ty: {:?}", ty);
- let (adt_def, substs) = match *ty.kind() {
- ty::Adt(adt_def, substs) => (adt_def, substs),
+ let (adt_def, args) = match *ty.kind() {
+ ty::Adt(adt_def, args) => (adt_def, args),
ty::Param(_) => {
return ControlFlow::Break(ty);
}
@@ -157,15 +157,15 @@ impl<'tcx> TypeVisitor<TyCtxt<'tcx>> for Search<'tcx> {
// instead looks directly at its fields outside
// this match), so we skip super_visit_with.
//
- // (Must not recur on substs for `PhantomData<T>` cf
+ // (Must not recur on args for `PhantomData<T>` cf
// rust-lang/rust#55028 and rust-lang/rust#55837; but also
- // want to skip substs when only uses of generic are
+ // want to skip args when only uses of generic are
// behind unsafe pointers `*const T`/`*mut T`.)
// even though we skip super_visit_with, we must recur on
// fields of ADT.
let tcx = self.tcx;
- adt_def.all_fields().map(|field| field.ty(tcx, substs)).try_for_each(|field_ty| {
+ adt_def.all_fields().map(|field| field.ty(tcx, args)).try_for_each(|field_ty| {
let ty = self.tcx.normalize_erasing_regions(ty::ParamEnv::empty(), field_ty);
debug!("structural-match ADT: field_ty={:?}, ty={:?}", field_ty, ty);
ty.visit_with(self)
diff --git a/compiler/rustc_trait_selection/src/traits/structural_normalize.rs b/compiler/rustc_trait_selection/src/traits/structural_normalize.rs
index 84746eba3..d3c4dc459 100644
--- a/compiler/rustc_trait_selection/src/traits/structural_normalize.rs
+++ b/compiler/rustc_trait_selection/src/traits/structural_normalize.rs
@@ -22,7 +22,9 @@ impl<'tcx> StructurallyNormalizeExt<'tcx> for At<'_, 'tcx> {
assert!(!ty.is_ty_var(), "should have resolved vars before calling");
if self.infcx.next_trait_solver() {
- while let ty::Alias(ty::Projection, projection_ty) = *ty.kind() {
+ while let ty::Alias(ty::Projection | ty::Inherent | ty::Weak, projection_ty) =
+ *ty.kind()
+ {
let new_infer_ty = self.infcx.next_ty_var(TypeVariableOrigin {
kind: TypeVariableOriginKind::NormalizeProjectionType,
span: self.cause.span,
diff --git a/compiler/rustc_trait_selection/src/traits/util.rs b/compiler/rustc_trait_selection/src/traits/util.rs
index 302b6cacf..a76272e9d 100644
--- a/compiler/rustc_trait_selection/src/traits/util.rs
+++ b/compiler/rustc_trait_selection/src/traits/util.rs
@@ -4,7 +4,7 @@ use rustc_data_structures::fx::FxHashSet;
use rustc_errors::Diagnostic;
use rustc_hir::def_id::DefId;
use rustc_infer::infer::InferOk;
-use rustc_middle::ty::SubstsRef;
+use rustc_middle::ty::GenericArgsRef;
use rustc_middle::ty::{self, ImplSubject, ToPredicate, Ty, TyCtxt, TypeVisitableExt};
use rustc_span::Span;
use smallvec::SmallVec;
@@ -50,7 +50,7 @@ impl<'tcx> TraitAliasExpansionInfo<'tcx> {
diag.span_label(self.top().1, top_label);
if self.path.len() > 1 {
for (_, sp) in self.path.iter().rev().skip(1).take(self.path.len() - 2) {
- diag.span_label(*sp, format!("referenced here ({})", use_desc));
+ diag.span_label(*sp, format!("referenced here ({use_desc})"));
}
}
if self.top().1 != self.bottom().1 {
@@ -58,7 +58,7 @@ impl<'tcx> TraitAliasExpansionInfo<'tcx> {
// redundant labels.
diag.span_label(
self.bottom().1,
- format!("trait alias used in trait object type ({})", use_desc),
+ format!("trait alias used in trait object type ({use_desc})"),
);
}
}
@@ -101,7 +101,7 @@ impl<'tcx> TraitAliasExpander<'tcx> {
fn expand(&mut self, item: &TraitAliasExpansionInfo<'tcx>) -> bool {
let tcx = self.tcx;
let trait_ref = item.trait_ref();
- let pred = trait_ref.without_const().to_predicate(tcx);
+ let pred = trait_ref.to_predicate(tcx);
debug!("expand_trait_aliases: trait_ref={:?}", trait_ref);
@@ -113,9 +113,13 @@ impl<'tcx> TraitAliasExpander<'tcx> {
// Don't recurse if this trait alias is already on the stack for the DFS search.
let anon_pred = anonymize_predicate(tcx, pred);
- if item.path.iter().rev().skip(1).any(|&(tr, _)| {
- anonymize_predicate(tcx, tr.without_const().to_predicate(tcx)) == anon_pred
- }) {
+ if item
+ .path
+ .iter()
+ .rev()
+ .skip(1)
+ .any(|&(tr, _)| anonymize_predicate(tcx, tr.to_predicate(tcx)) == anon_pred)
+ {
return false;
}
@@ -194,24 +198,24 @@ impl Iterator for SupertraitDefIds<'_> {
// Other
///////////////////////////////////////////////////////////////////////////
-/// Instantiate all bound parameters of the impl subject with the given substs,
+/// Instantiate all bound parameters of the impl subject with the given args,
/// returning the resulting subject and all obligations that arise.
/// The obligations are closed under normalization.
pub fn impl_subject_and_oblig<'a, 'tcx>(
selcx: &mut SelectionContext<'a, 'tcx>,
param_env: ty::ParamEnv<'tcx>,
impl_def_id: DefId,
- impl_substs: SubstsRef<'tcx>,
+ impl_args: GenericArgsRef<'tcx>,
cause: impl Fn(usize, Span) -> ObligationCause<'tcx>,
) -> (ImplSubject<'tcx>, impl Iterator<Item = PredicateObligation<'tcx>>) {
let subject = selcx.tcx().impl_subject(impl_def_id);
- let subject = subject.subst(selcx.tcx(), impl_substs);
+ let subject = subject.instantiate(selcx.tcx(), impl_args);
let InferOk { value: subject, obligations: normalization_obligations1 } =
selcx.infcx.at(&ObligationCause::dummy(), param_env).normalize(subject);
let predicates = selcx.tcx().predicates_of(impl_def_id);
- let predicates = predicates.instantiate(selcx.tcx(), impl_substs);
+ let predicates = predicates.instantiate(selcx.tcx(), impl_args);
let InferOk { value: predicates, obligations: normalization_obligations2 } =
selcx.infcx.at(&ObligationCause::dummy(), param_env).normalize(predicates);
let impl_obligations = super::predicates_for_generics(cause, param_env, predicates);
@@ -241,9 +245,9 @@ pub fn upcast_choices<'tcx>(
/// Given an upcast trait object described by `object`, returns the
/// index of the method `method_def_id` (which should be part of
/// `object.upcast_trait_ref`) within the vtable for `object`.
-pub fn get_vtable_index_of_object_method<'tcx, N>(
+pub fn get_vtable_index_of_object_method<'tcx>(
tcx: TyCtxt<'tcx>,
- object: &super::ImplSourceObjectData<N>,
+ vtable_base: usize,
method_def_id: DefId,
) -> Option<usize> {
// Count number of methods preceding the one we are selecting and
@@ -252,7 +256,7 @@ pub fn get_vtable_index_of_object_method<'tcx, N>(
.iter()
.copied()
.position(|def_id| def_id == method_def_id)
- .map(|index| object.vtable_base + index)
+ .map(|index| vtable_base + index)
}
pub fn closure_trait_ref_and_return_type<'tcx>(
@@ -303,13 +307,13 @@ pub enum TupleArgumentsFlag {
No,
}
-// Verify that the trait item and its implementation have compatible substs lists
-pub fn check_substs_compatible<'tcx>(
+// Verify that the trait item and its implementation have compatible args lists
+pub fn check_args_compatible<'tcx>(
tcx: TyCtxt<'tcx>,
assoc_item: ty::AssocItem,
- substs: ty::SubstsRef<'tcx>,
+ args: ty::GenericArgsRef<'tcx>,
) -> bool {
- fn check_substs_compatible_inner<'tcx>(
+ fn check_args_compatible_inner<'tcx>(
tcx: TyCtxt<'tcx>,
generics: &'tcx ty::Generics,
args: &'tcx [ty::GenericArg<'tcx>],
@@ -322,7 +326,7 @@ pub fn check_substs_compatible<'tcx>(
if let Some(parent) = generics.parent
&& let parent_generics = tcx.generics_of(parent)
- && !check_substs_compatible_inner(tcx, parent_generics, parent_args) {
+ && !check_args_compatible_inner(tcx, parent_generics, parent_args) {
return false;
}
@@ -339,7 +343,7 @@ pub fn check_substs_compatible<'tcx>(
}
let generics = tcx.generics_of(assoc_item.def_id);
- // Chop off any additional substs (RPITIT) substs
- let substs = &substs[0..generics.count().min(substs.len())];
- check_substs_compatible_inner(tcx, generics, substs)
+ // Chop off any additional args (RPITIT) args
+ let args = &args[0..generics.count().min(args.len())];
+ check_args_compatible_inner(tcx, generics, args)
}
diff --git a/compiler/rustc_trait_selection/src/traits/vtable.rs b/compiler/rustc_trait_selection/src/traits/vtable.rs
index 1f83f1f44..427ac3684 100644
--- a/compiler/rustc_trait_selection/src/traits/vtable.rs
+++ b/compiler/rustc_trait_selection/src/traits/vtable.rs
@@ -5,8 +5,9 @@ use rustc_hir::lang_items::LangItem;
use rustc_infer::traits::util::PredicateSet;
use rustc_infer::traits::ImplSource;
use rustc_middle::query::Providers;
+use rustc_middle::traits::BuiltinImplSource;
use rustc_middle::ty::visit::TypeVisitableExt;
-use rustc_middle::ty::InternalSubsts;
+use rustc_middle::ty::GenericArgs;
use rustc_middle::ty::{self, GenericParamDefKind, ToPredicate, Ty, TyCtxt, VtblEntry};
use rustc_span::{sym, Span};
use smallvec::SmallVec;
@@ -24,8 +25,18 @@ pub enum VtblSegment<'tcx> {
pub fn prepare_vtable_segments<'tcx, T>(
tcx: TyCtxt<'tcx>,
trait_ref: ty::PolyTraitRef<'tcx>,
- mut segment_visitor: impl FnMut(VtblSegment<'tcx>) -> ControlFlow<T>,
+ segment_visitor: impl FnMut(VtblSegment<'tcx>) -> ControlFlow<T>,
) -> Option<T> {
+ prepare_vtable_segments_inner(tcx, trait_ref, segment_visitor).break_value()
+}
+
+/// Helper for [`prepare_vtable_segments`] that returns `ControlFlow`,
+/// such that we can use `?` in the body.
+fn prepare_vtable_segments_inner<'tcx, T>(
+ tcx: TyCtxt<'tcx>,
+ trait_ref: ty::PolyTraitRef<'tcx>,
+ mut segment_visitor: impl FnMut(VtblSegment<'tcx>) -> ControlFlow<T>,
+) -> ControlFlow<T> {
// The following constraints holds for the final arrangement.
// 1. The whole virtual table of the first direct super trait is included as the
// the prefix. If this trait doesn't have any super traits, then this step
@@ -71,20 +82,18 @@ pub fn prepare_vtable_segments<'tcx, T>(
// N, N-vptr, O
// emit dsa segment first.
- if let ControlFlow::Break(v) = (segment_visitor)(VtblSegment::MetadataDSA) {
- return Some(v);
- }
+ segment_visitor(VtblSegment::MetadataDSA)?;
let mut emit_vptr_on_new_entry = false;
let mut visited = PredicateSet::new(tcx);
- let predicate = trait_ref.without_const().to_predicate(tcx);
+ let predicate = trait_ref.to_predicate(tcx);
let mut stack: SmallVec<[(ty::PolyTraitRef<'tcx>, _, _); 5]> =
- smallvec![(trait_ref, emit_vptr_on_new_entry, None)];
+ smallvec![(trait_ref, emit_vptr_on_new_entry, maybe_iter(None))];
visited.insert(predicate);
// the main traversal loop:
// basically we want to cut the inheritance directed graph into a few non-overlapping slices of nodes
- // that each node is emitted after all its descendents have been emitted.
+ // such that each node is emitted after all its descendants have been emitted.
// so we convert the directed graph into a tree by skipping all previously visited nodes using a visited set.
// this is done on the fly.
// Each loop run emits a slice - it starts by find a "childless" unvisited node, backtracking upwards, and it
@@ -105,98 +114,107 @@ pub fn prepare_vtable_segments<'tcx, T>(
// Loop run #1: Emitting the slice [D C] (in reverse order). No one has a next-sibling node.
// Loop run #1: Stack after exiting out is []. Now the function exits.
- loop {
+ 'outer: loop {
// dive deeper into the stack, recording the path
'diving_in: loop {
- if let Some((inner_most_trait_ref, _, _)) = stack.last() {
- let inner_most_trait_ref = *inner_most_trait_ref;
- let mut direct_super_traits_iter = tcx
- .super_predicates_of(inner_most_trait_ref.def_id())
- .predicates
- .into_iter()
- .filter_map(move |(pred, _)| {
- pred.subst_supertrait(tcx, &inner_most_trait_ref).as_trait_clause()
- });
+ let &(inner_most_trait_ref, _, _) = stack.last().unwrap();
+
+ let mut direct_super_traits_iter = tcx
+ .super_predicates_of(inner_most_trait_ref.def_id())
+ .predicates
+ .into_iter()
+ .filter_map(move |(pred, _)| {
+ pred.subst_supertrait(tcx, &inner_most_trait_ref).as_trait_clause()
+ });
- 'diving_in_skip_visited_traits: loop {
- if let Some(next_super_trait) = direct_super_traits_iter.next() {
- if visited.insert(next_super_trait.to_predicate(tcx)) {
- // We're throwing away potential constness of super traits here.
- // FIXME: handle ~const super traits
- let next_super_trait = next_super_trait.map_bound(|t| t.trait_ref);
- stack.push((
- next_super_trait,
- emit_vptr_on_new_entry,
- Some(direct_super_traits_iter),
- ));
- break 'diving_in_skip_visited_traits;
- } else {
- continue 'diving_in_skip_visited_traits;
- }
- } else {
- break 'diving_in;
- }
+ // Find an unvisited supertrait
+ match direct_super_traits_iter
+ .find(|&super_trait| visited.insert(super_trait.to_predicate(tcx)))
+ {
+ // Push it to the stack for the next iteration of 'diving_in to pick up
+ Some(unvisited_super_trait) => {
+ // We're throwing away potential constness of super traits here.
+ // FIXME: handle ~const super traits
+ let next_super_trait = unvisited_super_trait.map_bound(|t| t.trait_ref);
+ stack.push((
+ next_super_trait,
+ emit_vptr_on_new_entry,
+ maybe_iter(Some(direct_super_traits_iter)),
+ ))
}
+
+ // There are no more unvisited direct super traits, dive-in finished
+ None => break 'diving_in,
}
}
- // Other than the left-most path, vptr should be emitted for each trait.
- emit_vptr_on_new_entry = true;
-
// emit innermost item, move to next sibling and stop there if possible, otherwise jump to outer level.
- 'exiting_out: loop {
- if let Some((inner_most_trait_ref, emit_vptr, siblings_opt)) = stack.last_mut() {
- if let ControlFlow::Break(v) = (segment_visitor)(VtblSegment::TraitOwnEntries {
- trait_ref: *inner_most_trait_ref,
- emit_vptr: *emit_vptr,
- }) {
- return Some(v);
- }
+ while let Some((inner_most_trait_ref, emit_vptr, mut siblings)) = stack.pop() {
+ segment_visitor(VtblSegment::TraitOwnEntries {
+ trait_ref: inner_most_trait_ref,
+ emit_vptr,
+ })?;
+
+ // If we've emitted (fed to `segment_visitor`) a trait that has methods present in the vtable,
+ // we'll need to emit vptrs from now on.
+ if !emit_vptr_on_new_entry
+ && has_own_existential_vtable_entries(tcx, inner_most_trait_ref.def_id())
+ {
+ emit_vptr_on_new_entry = true;
+ }
- 'exiting_out_skip_visited_traits: loop {
- if let Some(siblings) = siblings_opt {
- if let Some(next_inner_most_trait_ref) = siblings.next() {
- if visited.insert(next_inner_most_trait_ref.to_predicate(tcx)) {
- // We're throwing away potential constness of super traits here.
- // FIXME: handle ~const super traits
- let next_inner_most_trait_ref =
- next_inner_most_trait_ref.map_bound(|t| t.trait_ref);
- *inner_most_trait_ref = next_inner_most_trait_ref;
- *emit_vptr = emit_vptr_on_new_entry;
- break 'exiting_out;
- } else {
- continue 'exiting_out_skip_visited_traits;
- }
- }
- }
- stack.pop();
- continue 'exiting_out;
- }
+ if let Some(next_inner_most_trait_ref) =
+ siblings.find(|&sibling| visited.insert(sibling.to_predicate(tcx)))
+ {
+ // We're throwing away potential constness of super traits here.
+ // FIXME: handle ~const super traits
+ let next_inner_most_trait_ref =
+ next_inner_most_trait_ref.map_bound(|t| t.trait_ref);
+
+ stack.push((next_inner_most_trait_ref, emit_vptr_on_new_entry, siblings));
+
+ // just pushed a new trait onto the stack, so we need to go through its super traits
+ continue 'outer;
}
- // all done
- return None;
}
+
+ // the stack is empty, all done
+ return ControlFlow::Continue(());
}
}
+/// Turns option of iterator into an iterator (this is just flatten)
+fn maybe_iter<I: Iterator>(i: Option<I>) -> impl Iterator<Item = I::Item> {
+ // Flatten is bad perf-vise, we could probably implement a special case here that is better
+ i.into_iter().flatten()
+}
+
fn dump_vtable_entries<'tcx>(
tcx: TyCtxt<'tcx>,
sp: Span,
trait_ref: ty::PolyTraitRef<'tcx>,
entries: &[VtblEntry<'tcx>],
) {
- tcx.sess.emit_err(DumpVTableEntries {
- span: sp,
- trait_ref,
- entries: format!("{:#?}", entries),
- });
+ tcx.sess.emit_err(DumpVTableEntries { span: sp, trait_ref, entries: format!("{entries:#?}") });
+}
+
+fn has_own_existential_vtable_entries(tcx: TyCtxt<'_>, trait_def_id: DefId) -> bool {
+ own_existential_vtable_entries_iter(tcx, trait_def_id).next().is_some()
}
fn own_existential_vtable_entries(tcx: TyCtxt<'_>, trait_def_id: DefId) -> &[DefId] {
+ tcx.arena.alloc_from_iter(own_existential_vtable_entries_iter(tcx, trait_def_id))
+}
+
+fn own_existential_vtable_entries_iter(
+ tcx: TyCtxt<'_>,
+ trait_def_id: DefId,
+) -> impl Iterator<Item = DefId> + '_ {
let trait_methods = tcx
.associated_items(trait_def_id)
.in_definition_order()
.filter(|item| item.kind == ty::AssocKind::Fn);
+
// Now list each method's DefId (for within its trait).
let own_entries = trait_methods.filter_map(move |&trait_method| {
debug!("own_existential_vtable_entry: trait_method={:?}", trait_method);
@@ -211,7 +229,7 @@ fn own_existential_vtable_entries(tcx: TyCtxt<'_>, trait_def_id: DefId) -> &[Def
Some(def_id)
});
- tcx.arena.alloc_from_iter(own_entries.into_iter())
+ own_entries
}
/// Given a trait `trait_ref`, iterates the vtable entries
@@ -241,12 +259,12 @@ fn vtable_entries<'tcx>(
debug!("vtable_entries: trait_method={:?}", def_id);
// The method may have some early-bound lifetimes; add regions for those.
- let substs = trait_ref.map_bound(|trait_ref| {
- InternalSubsts::for_item(tcx, def_id, |param, _| match param.kind {
+ let args = trait_ref.map_bound(|trait_ref| {
+ GenericArgs::for_item(tcx, def_id, |param, _| match param.kind {
GenericParamDefKind::Lifetime => tcx.lifetimes.re_erased.into(),
GenericParamDefKind::Type { .. }
| GenericParamDefKind::Const { .. } => {
- trait_ref.substs[param.index as usize]
+ trait_ref.args[param.index as usize]
}
})
});
@@ -254,14 +272,14 @@ fn vtable_entries<'tcx>(
// The trait type may have higher-ranked lifetimes in it;
// erase them if they appear, so that we get the type
// at some particular call site.
- let substs = tcx
- .normalize_erasing_late_bound_regions(ty::ParamEnv::reveal_all(), substs);
+ let args =
+ tcx.normalize_erasing_late_bound_regions(ty::ParamEnv::reveal_all(), args);
// It's possible that the method relies on where-clauses that
// do not hold for this particular set of type parameters.
// Note that this method could then never be called, so we
// do not want to try and codegen it, in that case (see #23435).
- let predicates = tcx.predicates_of(def_id).instantiate_own(tcx, substs);
+ let predicates = tcx.predicates_of(def_id).instantiate_own(tcx, args);
if impossible_predicates(
tcx,
predicates.map(|(predicate, _)| predicate).collect(),
@@ -274,7 +292,7 @@ fn vtable_entries<'tcx>(
tcx,
ty::ParamEnv::reveal_all(),
def_id,
- substs,
+ args,
)
.expect("resolution failed during building vtable representation");
VtblEntry::Method(instance)
@@ -363,8 +381,8 @@ pub(crate) fn vtable_trait_upcasting_coercion_new_vptr_slot<'tcx>(
let trait_ref = ty::TraitRef::new(tcx, unsize_trait_did, [source, target]);
match tcx.codegen_select_candidate((ty::ParamEnv::reveal_all(), trait_ref)) {
- Ok(ImplSource::TraitUpcasting(implsrc_traitcasting)) => {
- implsrc_traitcasting.vtable_vptr_slot
+ Ok(ImplSource::Builtin(BuiltinImplSource::TraitUpcasting { vtable_vptr_slot }, _)) => {
+ *vtable_vptr_slot
}
otherwise => bug!("expected TraitUpcasting candidate, got {otherwise:?}"),
}
diff --git a/compiler/rustc_trait_selection/src/traits/wf.rs b/compiler/rustc_trait_selection/src/traits/wf.rs
index d81722ce2..f26310665 100644
--- a/compiler/rustc_trait_selection/src/traits/wf.rs
+++ b/compiler/rustc_trait_selection/src/traits/wf.rs
@@ -2,8 +2,8 @@ use crate::infer::InferCtxt;
use crate::traits;
use rustc_hir as hir;
use rustc_hir::lang_items::LangItem;
-use rustc_middle::ty::subst::{GenericArg, GenericArgKind, SubstsRef};
use rustc_middle::ty::{self, Ty, TyCtxt, TypeVisitableExt};
+use rustc_middle::ty::{GenericArg, GenericArgKind, GenericArgsRef};
use rustc_span::def_id::{DefId, LocalDefId, CRATE_DEF_ID};
use rustc_span::{Span, DUMMY_SP};
@@ -341,18 +341,14 @@ impl<'a, 'tcx> WfPredicates<'a, 'tcx> {
let trait_ref = &trait_pred.trait_ref;
// Negative trait predicates don't require supertraits to hold, just
- // that their substs are WF.
+ // that their args are WF.
if trait_pred.polarity == ty::ImplPolarity::Negative {
self.compute_negative_trait_pred(trait_ref);
return;
}
// if the trait predicate is not const, the wf obligations should not be const as well.
- let obligations = if trait_pred.constness == ty::BoundConstness::NotConst {
- self.nominal_obligations_without_const(trait_ref.def_id, trait_ref.substs)
- } else {
- self.nominal_obligations(trait_ref.def_id, trait_ref.substs)
- };
+ let obligations = self.nominal_obligations(trait_ref.def_id, trait_ref.args);
debug!("compute_trait_pred obligations {:?}", obligations);
let param_env = self.param_env;
@@ -383,7 +379,7 @@ impl<'a, 'tcx> WfPredicates<'a, 'tcx> {
self.out.extend(
trait_ref
- .substs
+ .args
.iter()
.enumerate()
.filter(|(_, arg)| {
@@ -416,7 +412,7 @@ impl<'a, 'tcx> WfPredicates<'a, 'tcx> {
// Compute the obligations that are required for `trait_ref` to be WF,
// given that it is a *negative* trait predicate.
fn compute_negative_trait_pred(&mut self, trait_ref: &ty::TraitRef<'tcx>) {
- for arg in trait_ref.substs {
+ for arg in trait_ref.args {
self.compute(arg);
}
}
@@ -427,7 +423,7 @@ impl<'a, 'tcx> WfPredicates<'a, 'tcx> {
// A projection is well-formed if
//
// (a) its predicates hold (*)
- // (b) its substs are wf
+ // (b) its args are wf
//
// (*) The predicates of an associated type include the predicates of
// the trait that it's contained in. For example, given
@@ -445,18 +441,17 @@ impl<'a, 'tcx> WfPredicates<'a, 'tcx> {
// `i32: Clone`
// `i32: Copy`
// ]
- // Projection types do not require const predicates.
- let obligations = self.nominal_obligations_without_const(data.def_id, data.substs);
+ let obligations = self.nominal_obligations(data.def_id, data.args);
self.out.extend(obligations);
- self.compute_projection_substs(data.substs);
+ self.compute_projection_args(data.args);
}
fn compute_inherent_projection(&mut self, data: ty::AliasTy<'tcx>) {
// An inherent projection is well-formed if
//
// (a) its predicates hold (*)
- // (b) its substs are wf
+ // (b) its args are wf
//
// (*) The predicates of an inherent associated type include the
// predicates of the impl that it's contained in.
@@ -464,7 +459,7 @@ impl<'a, 'tcx> WfPredicates<'a, 'tcx> {
if !data.self_ty().has_escaping_bound_vars() {
// FIXME(inherent_associated_types): Should this happen inside of a snapshot?
// FIXME(inherent_associated_types): This is incompatible with the new solver and lazy norm!
- let substs = traits::project::compute_inherent_assoc_ty_substs(
+ let args = traits::project::compute_inherent_assoc_ty_args(
&mut traits::SelectionContext::new(self.infcx),
self.param_env,
data,
@@ -472,23 +467,21 @@ impl<'a, 'tcx> WfPredicates<'a, 'tcx> {
self.recursion_depth,
&mut self.out,
);
- // Inherent projection types do not require const predicates.
- let obligations = self.nominal_obligations_without_const(data.def_id, substs);
+ let obligations = self.nominal_obligations(data.def_id, args);
self.out.extend(obligations);
}
- self.compute_projection_substs(data.substs);
+ self.compute_projection_args(data.args);
}
- fn compute_projection_substs(&mut self, substs: SubstsRef<'tcx>) {
+ fn compute_projection_args(&mut self, args: GenericArgsRef<'tcx>) {
let tcx = self.tcx();
let cause = self.cause(traits::WellFormed(None));
let param_env = self.param_env;
let depth = self.recursion_depth;
self.out.extend(
- substs
- .iter()
+ args.iter()
.filter(|arg| {
matches!(arg.unpack(), GenericArgKind::Type(..) | GenericArgKind::Const(..))
})
@@ -517,7 +510,7 @@ impl<'a, 'tcx> WfPredicates<'a, 'tcx> {
cause,
self.recursion_depth,
self.param_env,
- ty::Binder::dummy(trait_ref).without_const(),
+ ty::Binder::dummy(trait_ref),
));
}
}
@@ -541,7 +534,7 @@ impl<'a, 'tcx> WfPredicates<'a, 'tcx> {
match ct.kind() {
ty::ConstKind::Unevaluated(uv) => {
if !ct.has_escaping_bound_vars() {
- let obligations = self.nominal_obligations(uv.def, uv.substs);
+ let obligations = self.nominal_obligations(uv.def, uv.args);
self.out.extend(obligations);
let predicate = ty::Binder::dummy(ty::PredicateKind::Clause(
@@ -571,7 +564,7 @@ impl<'a, 'tcx> WfPredicates<'a, 'tcx> {
));
}
ty::ConstKind::Expr(_) => {
- // FIXME(generic_const_exprs): this doesnt verify that given `Expr(N + 1)` the
+ // FIXME(generic_const_exprs): this doesn't verify that given `Expr(N + 1)` the
// trait bound `typeof(N): Add<typeof(1)>` holds. This is currently unnecessary
// as `ConstKind::Expr` is only produced via normalization of `ConstKind::Unevaluated`
// which means that the `DefId` would have been typeck'd elsewhere. However in
@@ -661,14 +654,14 @@ impl<'a, 'tcx> WfPredicates<'a, 'tcx> {
self.compute_inherent_projection(data);
}
- ty::Adt(def, substs) => {
+ ty::Adt(def, args) => {
// WfNominalType
- let obligations = self.nominal_obligations(def.did(), substs);
+ let obligations = self.nominal_obligations(def.did(), args);
self.out.extend(obligations);
}
- ty::FnDef(did, substs) => {
- let obligations = self.nominal_obligations_without_const(did, substs);
+ ty::FnDef(did, args) => {
+ let obligations = self.nominal_obligations(did, args);
self.out.extend(obligations);
}
@@ -688,7 +681,7 @@ impl<'a, 'tcx> WfPredicates<'a, 'tcx> {
}
}
- ty::Generator(did, substs, ..) => {
+ ty::Generator(did, args, ..) => {
// Walk ALL the types in the generator: this will
// include the upvar types as well as the yield
// type. Note that this is mildly distinct from
@@ -696,11 +689,11 @@ impl<'a, 'tcx> WfPredicates<'a, 'tcx> {
// about the signature of the closure. We don't
// have the problem of implied bounds here since
// generators don't take arguments.
- let obligations = self.nominal_obligations(did, substs);
+ let obligations = self.nominal_obligations(did, args);
self.out.extend(obligations);
}
- ty::Closure(did, substs) => {
+ ty::Closure(did, args) => {
// Only check the upvar types for WF, not the rest
// of the types within. This is needed because we
// capture the signature and it may not be WF
@@ -723,7 +716,7 @@ impl<'a, 'tcx> WfPredicates<'a, 'tcx> {
// fn(&'a T) }`, as discussed in #25860.
walker.skip_current_subtree(); // subtree handled below
// FIXME(eddyb) add the type to `walker` instead of recursing.
- self.compute(substs.as_closure().tupled_upvars_ty().into());
+ self.compute(args.as_closure().tupled_upvars_ty().into());
// Note that we cannot skip the generic types
// types. Normally, within the fn
// body where they are created, the generics will
@@ -739,7 +732,7 @@ impl<'a, 'tcx> WfPredicates<'a, 'tcx> {
// can cause compiler crashes when the user abuses unsafe
// code to procure such a closure.
// See tests/ui/type-alias-impl-trait/wf_check_closures.rs
- let obligations = self.nominal_obligations(did, substs);
+ let obligations = self.nominal_obligations(did, args);
self.out.extend(obligations);
}
@@ -748,18 +741,18 @@ impl<'a, 'tcx> WfPredicates<'a, 'tcx> {
// types appearing in the fn signature
}
- ty::Alias(ty::Opaque, ty::AliasTy { def_id, substs, .. }) => {
+ ty::Alias(ty::Opaque, ty::AliasTy { def_id, args, .. }) => {
// All of the requirements on type parameters
// have already been checked for `impl Trait` in
// return position. We do need to check type-alias-impl-trait though.
if self.tcx().is_type_alias_impl_trait(def_id) {
- let obligations = self.nominal_obligations(def_id, substs);
+ let obligations = self.nominal_obligations(def_id, args);
self.out.extend(obligations);
}
}
- ty::Alias(ty::Weak, ty::AliasTy { def_id, substs, .. }) => {
- let obligations = self.nominal_obligations(def_id, substs);
+ ty::Alias(ty::Weak, ty::AliasTy { def_id, args, .. }) => {
+ let obligations = self.nominal_obligations(def_id, args);
self.out.extend(obligations);
}
@@ -823,11 +816,10 @@ impl<'a, 'tcx> WfPredicates<'a, 'tcx> {
}
#[instrument(level = "debug", skip(self))]
- fn nominal_obligations_inner(
+ fn nominal_obligations(
&mut self,
def_id: DefId,
- substs: SubstsRef<'tcx>,
- remap_constness: bool,
+ args: GenericArgsRef<'tcx>,
) -> Vec<traits::PredicateObligation<'tcx>> {
let predicates = self.tcx().predicates_of(def_id);
let mut origins = vec![def_id; predicates.predicates.len()];
@@ -837,21 +829,18 @@ impl<'a, 'tcx> WfPredicates<'a, 'tcx> {
origins.extend(iter::repeat(parent).take(head.predicates.len()));
}
- let predicates = predicates.instantiate(self.tcx(), substs);
+ let predicates = predicates.instantiate(self.tcx(), args);
trace!("{:#?}", predicates);
debug_assert_eq!(predicates.predicates.len(), origins.len());
iter::zip(predicates, origins.into_iter().rev())
- .map(|((mut pred, span), origin_def_id)| {
+ .map(|((pred, span), origin_def_id)| {
let code = if span.is_dummy() {
traits::ItemObligation(origin_def_id)
} else {
traits::BindingObligation(origin_def_id, span)
};
let cause = self.cause(code);
- if remap_constness {
- pred = pred.without_const(self.tcx());
- }
traits::Obligation::with_depth(
self.tcx(),
cause,
@@ -864,22 +853,6 @@ impl<'a, 'tcx> WfPredicates<'a, 'tcx> {
.collect()
}
- fn nominal_obligations(
- &mut self,
- def_id: DefId,
- substs: SubstsRef<'tcx>,
- ) -> Vec<traits::PredicateObligation<'tcx>> {
- self.nominal_obligations_inner(def_id, substs, false)
- }
-
- fn nominal_obligations_without_const(
- &mut self,
- def_id: DefId,
- substs: SubstsRef<'tcx>,
- ) -> Vec<traits::PredicateObligation<'tcx>> {
- self.nominal_obligations_inner(def_id, substs, true)
- }
-
fn from_object_ty(
&mut self,
ty: Ty<'tcx>,
diff --git a/compiler/rustc_traits/src/dropck_outlives.rs b/compiler/rustc_traits/src/dropck_outlives.rs
index f35c14eea..074764f0c 100644
--- a/compiler/rustc_traits/src/dropck_outlives.rs
+++ b/compiler/rustc_traits/src/dropck_outlives.rs
@@ -4,7 +4,7 @@ use rustc_infer::infer::canonical::{Canonical, QueryResponse};
use rustc_infer::infer::TyCtxtInferExt;
use rustc_middle::query::Providers;
use rustc_middle::traits::query::{DropckConstraint, DropckOutlivesResult};
-use rustc_middle::ty::InternalSubsts;
+use rustc_middle::ty::GenericArgs;
use rustc_middle::ty::TyCtxt;
use rustc_trait_selection::infer::InferCtxtBuilderExt;
use rustc_trait_selection::traits::query::dropck_outlives::{
@@ -41,11 +41,11 @@ pub(crate) fn adt_dtorck_constraint(
} else if def.is_phantom_data() {
// The first generic parameter here is guaranteed to be a type because it's
// `PhantomData`.
- let substs = InternalSubsts::identity_for_item(tcx, def_id);
- assert_eq!(substs.len(), 1);
+ let args = GenericArgs::identity_for_item(tcx, def_id);
+ assert_eq!(args.len(), 1);
let result = DropckConstraint {
outlives: vec![],
- dtorck_types: vec![substs.type_at(0)],
+ dtorck_types: vec![args.type_at(0)],
overflows: vec![],
};
debug!("dtorck_constraint: {:?} => {:?}", def, result);
@@ -54,7 +54,7 @@ pub(crate) fn adt_dtorck_constraint(
let mut result = DropckConstraint::empty();
for field in def.all_fields() {
- let fty = tcx.type_of(field.did).subst_identity();
+ let fty = tcx.type_of(field.did).instantiate_identity();
dtorck_constraint_for_ty_inner(tcx, span, fty, 0, fty, &mut result)?;
}
result.outlives.extend(tcx.destructor_constraints(def));
diff --git a/compiler/rustc_traits/src/normalize_projection_ty.rs b/compiler/rustc_traits/src/normalize_projection_ty.rs
index 83828f177..0dbac56b4 100644
--- a/compiler/rustc_traits/src/normalize_projection_ty.rs
+++ b/compiler/rustc_traits/src/normalize_projection_ty.rs
@@ -58,7 +58,7 @@ fn normalize_weak_ty<'tcx>(
tcx.infer_ctxt().enter_canonical_trait_query(
&goal,
|ocx, ParamEnvAnd { param_env, value: goal }| {
- let obligations = tcx.predicates_of(goal.def_id).instantiate_own(tcx, goal.substs).map(
+ let obligations = tcx.predicates_of(goal.def_id).instantiate_own(tcx, goal.args).map(
|(predicate, span)| {
traits::Obligation::new(
tcx,
@@ -69,7 +69,7 @@ fn normalize_weak_ty<'tcx>(
},
);
ocx.register_obligations(obligations);
- let normalized_ty = tcx.type_of(goal.def_id).subst(tcx, goal.substs);
+ let normalized_ty = tcx.type_of(goal.def_id).instantiate(tcx, goal.args);
Ok(NormalizationResult { normalized_ty })
},
)
diff --git a/compiler/rustc_transmute/src/layout/tree.rs b/compiler/rustc_transmute/src/layout/tree.rs
index be434eb7d..e8ddb0a43 100644
--- a/compiler/rustc_transmute/src/layout/tree.rs
+++ b/compiler/rustc_transmute/src/layout/tree.rs
@@ -175,8 +175,8 @@ pub(crate) mod rustc {
use rustc_middle::ty::layout::LayoutError;
use rustc_middle::ty::util::Discr;
use rustc_middle::ty::AdtDef;
+ use rustc_middle::ty::GenericArgsRef;
use rustc_middle::ty::ParamEnv;
- use rustc_middle::ty::SubstsRef;
use rustc_middle::ty::VariantDef;
use rustc_middle::ty::{self, Ty, TyCtxt, TypeVisitableExt};
use rustc_span::ErrorGuaranteed;
@@ -195,7 +195,7 @@ pub(crate) mod rustc {
impl<'tcx> From<&LayoutError<'tcx>> for Err {
fn from(err: &LayoutError<'tcx>) -> Self {
match err {
- LayoutError::Unknown(..) => Self::UnknownLayout,
+ LayoutError::Unknown(..) | LayoutError::ReferencesError(..) => Self::UnknownLayout,
err => unimplemented!("{:?}", err),
}
}
@@ -297,7 +297,7 @@ pub(crate) mod rustc {
.fold(Tree::unit(), |tree, elt| tree.then(elt)))
}
- ty::Adt(adt_def, substs_ref) => {
+ ty::Adt(adt_def, args_ref) => {
use rustc_middle::ty::AdtKind;
// If the layout is ill-specified, halt.
@@ -316,7 +316,7 @@ pub(crate) mod rustc {
AdtKind::Struct => Self::from_repr_c_variant(
ty,
*adt_def,
- substs_ref,
+ args_ref,
&layout_summary,
None,
adt_def.non_enum_variant(),
@@ -330,7 +330,7 @@ pub(crate) mod rustc {
tree = tree.or(Self::from_repr_c_variant(
ty,
*adt_def,
- substs_ref,
+ args_ref,
&layout_summary,
Some(discr),
adt_def.variant(idx),
@@ -351,7 +351,7 @@ pub(crate) mod rustc {
let mut tree = Tree::uninhabited();
for field in adt_def.all_fields() {
- let variant_ty = field.ty(tcx, substs_ref);
+ let variant_ty = field.ty(tcx, args_ref);
let variant_layout = layout_of(tcx, variant_ty)?;
let padding_needed = ty_layout.size() - variant_layout.size();
let variant = Self::def(Def::Field(field))
@@ -383,7 +383,7 @@ pub(crate) mod rustc {
fn from_repr_c_variant(
ty: Ty<'tcx>,
adt_def: AdtDef<'tcx>,
- substs_ref: SubstsRef<'tcx>,
+ args_ref: GenericArgsRef<'tcx>,
layout_summary: &LayoutSummary,
discr: Option<Discr<'tcx>>,
variant_def: &'tcx VariantDef,
@@ -427,7 +427,7 @@ pub(crate) mod rustc {
// Next come fields.
let fields_span = trace_span!("treeifying fields").entered();
for field_def in variant_def.fields.iter() {
- let field_ty = field_def.ty(tcx, substs_ref);
+ let field_ty = field_def.ty(tcx, args_ref);
let _span = trace_span!("treeifying field", field = ?field_ty).entered();
// begin with the field's visibility
diff --git a/compiler/rustc_transmute/src/lib.rs b/compiler/rustc_transmute/src/lib.rs
index 34ad6bd8c..05ad4a4a1 100644
--- a/compiler/rustc_transmute/src/lib.rs
+++ b/compiler/rustc_transmute/src/lib.rs
@@ -78,6 +78,7 @@ mod rustc {
use rustc_middle::ty::ParamEnv;
use rustc_middle::ty::Ty;
use rustc_middle::ty::TyCtxt;
+ use rustc_middle::ty::ValTree;
/// The source and destination types of a transmutation.
#[derive(TypeVisitable, Debug, Clone, Copy)]
@@ -148,7 +149,17 @@ mod rustc {
);
let variant = adt_def.non_enum_variant();
- let fields = c.to_valtree().unwrap_branch();
+ let fields = match c.try_to_valtree() {
+ Some(ValTree::Branch(branch)) => branch,
+ _ => {
+ return Some(Self {
+ alignment: true,
+ lifetimes: true,
+ safety: true,
+ validity: true,
+ });
+ }
+ };
let get_field = |name| {
let (field_idx, _) = variant
diff --git a/compiler/rustc_ty_utils/src/abi.rs b/compiler/rustc_ty_utils/src/abi.rs
index 55484f5c7..4d0b84753 100644
--- a/compiler/rustc_ty_utils/src/abi.rs
+++ b/compiler/rustc_ty_utils/src/abi.rs
@@ -9,6 +9,7 @@ use rustc_session::config::OptLevel;
use rustc_span::def_id::DefId;
use rustc_target::abi::call::{
ArgAbi, ArgAttribute, ArgAttributes, ArgExtension, Conv, FnAbi, PassMode, Reg, RegKind,
+ RiscvInterruptKind,
};
use rustc_target::abi::*;
use rustc_target::spec::abi::Abi as SpecAbi;
@@ -51,12 +52,12 @@ fn fn_sig_for_fn_abi<'tcx>(
//
// We normalize the `fn_sig` again after substituting at a later point.
let mut sig = match *ty.kind() {
- ty::FnDef(def_id, substs) => tcx
+ ty::FnDef(def_id, args) => tcx
.fn_sig(def_id)
.map_bound(|fn_sig| {
tcx.normalize_erasing_regions(tcx.param_env(def_id), fn_sig)
})
- .subst(tcx, substs),
+ .instantiate(tcx, args),
_ => unreachable!(),
};
@@ -71,8 +72,8 @@ fn fn_sig_for_fn_abi<'tcx>(
}
sig
}
- ty::Closure(def_id, substs) => {
- let sig = substs.as_closure().sig();
+ ty::Closure(def_id, args) => {
+ let sig = args.as_closure().sig();
let bound_vars = tcx.mk_bound_variable_kinds_from_iter(
sig.bound_vars().iter().chain(iter::once(ty::BoundVariableKind::Region(ty::BrEnv))),
@@ -82,7 +83,7 @@ fn fn_sig_for_fn_abi<'tcx>(
kind: ty::BoundRegionKind::BrEnv,
};
let env_region = ty::Region::new_late_bound(tcx, ty::INNERMOST, br);
- let env_ty = tcx.closure_env_ty(def_id, substs, env_region).unwrap();
+ let env_ty = tcx.closure_env_ty(def_id, args, env_region).unwrap();
let sig = sig.skip_binder();
ty::Binder::bind_with_vars(
@@ -96,8 +97,8 @@ fn fn_sig_for_fn_abi<'tcx>(
bound_vars,
)
}
- ty::Generator(did, substs, _) => {
- let sig = substs.as_generator().poly_sig();
+ ty::Generator(did, args, _) => {
+ let sig = args.as_generator().poly_sig();
let bound_vars = tcx.mk_bound_variable_kinds_from_iter(
sig.bound_vars().iter().chain(iter::once(ty::BoundVariableKind::Region(ty::BrEnv))),
@@ -111,8 +112,8 @@ fn fn_sig_for_fn_abi<'tcx>(
let pin_did = tcx.require_lang_item(LangItem::Pin, None);
let pin_adt_ref = tcx.adt_def(pin_did);
- let pin_substs = tcx.mk_substs(&[env_ty.into()]);
- let env_ty = Ty::new_adt(tcx, pin_adt_ref, pin_substs);
+ let pin_args = tcx.mk_args(&[env_ty.into()]);
+ let env_ty = Ty::new_adt(tcx, pin_adt_ref, pin_args);
let sig = sig.skip_binder();
// The `FnSig` and the `ret_ty` here is for a generators main
@@ -123,8 +124,8 @@ fn fn_sig_for_fn_abi<'tcx>(
// The signature should be `Future::poll(_, &mut Context<'_>) -> Poll<Output>`
let poll_did = tcx.require_lang_item(LangItem::Poll, None);
let poll_adt_ref = tcx.adt_def(poll_did);
- let poll_substs = tcx.mk_substs(&[sig.return_ty.into()]);
- let ret_ty = Ty::new_adt(tcx, poll_adt_ref, poll_substs);
+ let poll_args = tcx.mk_args(&[sig.return_ty.into()]);
+ let ret_ty = Ty::new_adt(tcx, poll_adt_ref, poll_args);
// We have to replace the `ResumeTy` that is used for type and borrow checking
// with `&mut Context<'_>` which is used in codegen.
@@ -145,8 +146,8 @@ fn fn_sig_for_fn_abi<'tcx>(
// The signature should be `Generator::resume(_, Resume) -> GeneratorState<Yield, Return>`
let state_did = tcx.require_lang_item(LangItem::GeneratorState, None);
let state_adt_ref = tcx.adt_def(state_did);
- let state_substs = tcx.mk_substs(&[sig.yield_ty.into(), sig.return_ty.into()]);
- let ret_ty = Ty::new_adt(tcx, state_adt_ref, state_substs);
+ let state_args = tcx.mk_args(&[sig.yield_ty.into(), sig.return_ty.into()]);
+ let ret_ty = Ty::new_adt(tcx, state_adt_ref, state_args);
(sig.resume_ty, ret_ty)
};
@@ -193,6 +194,8 @@ fn conv_from_spec_abi(tcx: TyCtxt<'_>, abi: SpecAbi) -> Conv {
AmdGpuKernel => Conv::AmdGpuKernel,
AvrInterrupt => Conv::AvrInterrupt,
AvrNonBlockingInterrupt => Conv::AvrNonBlockingInterrupt,
+ RiscvInterruptM => Conv::RiscvInterrupt { kind: RiscvInterruptKind::Machine },
+ RiscvInterruptS => Conv::RiscvInterrupt { kind: RiscvInterruptKind::Supervisor },
Wasm => Conv::C,
// These API constants ought to be more specific...
diff --git a/compiler/rustc_ty_utils/src/assoc.rs b/compiler/rustc_ty_utils/src/assoc.rs
index 897e7aad4..780f7ea42 100644
--- a/compiler/rustc_ty_utils/src/assoc.rs
+++ b/compiler/rustc_ty_utils/src/assoc.rs
@@ -5,7 +5,7 @@ use rustc_hir::def_id::{DefId, DefIdMap, LocalDefId};
use rustc_hir::definitions::DefPathData;
use rustc_hir::intravisit::{self, Visitor};
use rustc_middle::query::Providers;
-use rustc_middle::ty::{self, ImplTraitInTraitData, InternalSubsts, Ty, TyCtxt};
+use rustc_middle::ty::{self, GenericArgs, ImplTraitInTraitData, Ty, TyCtxt};
use rustc_span::symbol::kw;
pub fn provide(providers: &mut Providers) {
@@ -24,70 +24,54 @@ fn associated_item_def_ids(tcx: TyCtxt<'_>, def_id: LocalDefId) -> &[DefId] {
let item = tcx.hir().expect_item(def_id);
match item.kind {
hir::ItemKind::Trait(.., ref trait_item_refs) => {
- if tcx.lower_impl_trait_in_trait_to_assoc_ty() {
- // We collect RPITITs for each trait method's return type and create a
- // corresponding associated item using associated_types_for_impl_traits_in_associated_fn
- // query.
- tcx.arena.alloc_from_iter(
- trait_item_refs
- .iter()
- .map(|trait_item_ref| trait_item_ref.id.owner_id.to_def_id())
- .chain(
- trait_item_refs
- .iter()
- .filter(|trait_item_ref| {
- matches!(trait_item_ref.kind, hir::AssocItemKind::Fn { .. })
- })
- .flat_map(|trait_item_ref| {
- let trait_fn_def_id =
- trait_item_ref.id.owner_id.def_id.to_def_id();
- tcx.associated_types_for_impl_traits_in_associated_fn(
- trait_fn_def_id,
- )
- })
- .map(|def_id| *def_id),
- ),
- )
- } else {
- tcx.arena.alloc_from_iter(
- trait_item_refs
- .iter()
- .map(|trait_item_ref| trait_item_ref.id.owner_id.to_def_id()),
- )
- }
+ // We collect RPITITs for each trait method's return type and create a
+ // corresponding associated item using associated_types_for_impl_traits_in_associated_fn
+ // query.
+ tcx.arena.alloc_from_iter(
+ trait_item_refs
+ .iter()
+ .map(|trait_item_ref| trait_item_ref.id.owner_id.to_def_id())
+ .chain(
+ trait_item_refs
+ .iter()
+ .filter(|trait_item_ref| {
+ matches!(trait_item_ref.kind, hir::AssocItemKind::Fn { .. })
+ })
+ .flat_map(|trait_item_ref| {
+ let trait_fn_def_id = trait_item_ref.id.owner_id.def_id.to_def_id();
+ tcx.associated_types_for_impl_traits_in_associated_fn(
+ trait_fn_def_id,
+ )
+ })
+ .map(|def_id| *def_id),
+ ),
+ )
}
hir::ItemKind::Impl(ref impl_) => {
- if tcx.lower_impl_trait_in_trait_to_assoc_ty() {
- // We collect RPITITs for each trait method's return type, on the impl side too and
- // create a corresponding associated item using
- // associated_types_for_impl_traits_in_associated_fn query.
- tcx.arena.alloc_from_iter(
- impl_
- .items
- .iter()
- .map(|impl_item_ref| impl_item_ref.id.owner_id.to_def_id())
- .chain(impl_.of_trait.iter().flat_map(|_| {
- impl_
- .items
- .iter()
- .filter(|impl_item_ref| {
- matches!(impl_item_ref.kind, hir::AssocItemKind::Fn { .. })
- })
- .flat_map(|impl_item_ref| {
- let impl_fn_def_id =
- impl_item_ref.id.owner_id.def_id.to_def_id();
- tcx.associated_types_for_impl_traits_in_associated_fn(
- impl_fn_def_id,
- )
- })
- .map(|def_id| *def_id)
- })),
- )
- } else {
- tcx.arena.alloc_from_iter(
- impl_.items.iter().map(|impl_item_ref| impl_item_ref.id.owner_id.to_def_id()),
- )
- }
+ // We collect RPITITs for each trait method's return type, on the impl side too and
+ // create a corresponding associated item using
+ // associated_types_for_impl_traits_in_associated_fn query.
+ tcx.arena.alloc_from_iter(
+ impl_
+ .items
+ .iter()
+ .map(|impl_item_ref| impl_item_ref.id.owner_id.to_def_id())
+ .chain(impl_.of_trait.iter().flat_map(|_| {
+ impl_
+ .items
+ .iter()
+ .filter(|impl_item_ref| {
+ matches!(impl_item_ref.kind, hir::AssocItemKind::Fn { .. })
+ })
+ .flat_map(|impl_item_ref| {
+ let impl_fn_def_id = impl_item_ref.id.owner_id.def_id.to_def_id();
+ tcx.associated_types_for_impl_traits_in_associated_fn(
+ impl_fn_def_id,
+ )
+ })
+ .map(|def_id| *def_id)
+ })),
+ )
}
_ => span_bug!(item.span, "associated_item_def_ids: not impl or trait"),
}
@@ -231,7 +215,9 @@ fn associated_types_for_impl_traits_in_associated_fn(
}
DefKind::Impl { .. } => {
- let Some(trait_fn_def_id) = tcx.associated_item(fn_def_id).trait_item_def_id else { return &[] };
+ let Some(trait_fn_def_id) = tcx.associated_item(fn_def_id).trait_item_def_id else {
+ return &[];
+ };
tcx.arena.alloc_from_iter(
tcx.associated_types_for_impl_traits_in_associated_fn(trait_fn_def_id).iter().map(
@@ -304,7 +290,7 @@ fn associated_type_for_impl_trait_in_trait(
trait_assoc_ty.type_of(ty::EarlyBinder::bind(Ty::new_opaque(
tcx,
opaque_ty_def_id.to_def_id(),
- InternalSubsts::identity_for_item(tcx, opaque_ty_def_id),
+ GenericArgs::identity_for_item(tcx, opaque_ty_def_id),
)));
trait_assoc_ty.is_type_alias_impl_trait(false);
@@ -360,8 +346,16 @@ fn associated_type_for_impl_trait_in_impl(
) -> LocalDefId {
let impl_local_def_id = tcx.local_parent(impl_fn_def_id);
- // FIXME fix the span, we probably want the def_id of the return type of the function
- let span = tcx.def_span(impl_fn_def_id);
+ let decl = tcx
+ .hir()
+ .find_by_def_id(impl_fn_def_id)
+ .expect("expected item")
+ .fn_decl()
+ .expect("expected decl");
+ let span = match decl.output {
+ hir::FnRetTy::DefaultReturn(_) => tcx.def_span(impl_fn_def_id),
+ hir::FnRetTy::Return(ty) => ty.span,
+ };
let impl_assoc_ty = tcx.at(span).create_def(impl_local_def_id, DefPathData::ImplTraitAssocTy);
let local_def_id = impl_assoc_ty.def_id();
diff --git a/compiler/rustc_ty_utils/src/consts.rs b/compiler/rustc_ty_utils/src/consts.rs
index 426c98012..383cc996b 100644
--- a/compiler/rustc_ty_utils/src/consts.rs
+++ b/compiler/rustc_ty_utils/src/consts.rs
@@ -42,7 +42,7 @@ pub(crate) fn destructure_const<'tcx>(
(field_consts, None)
}
ty::Adt(def, _) if def.variants().is_empty() => bug!("unreachable"),
- ty::Adt(def, substs) => {
+ ty::Adt(def, args) => {
let (variant_idx, branches) = if def.is_enum() {
let (head, rest) = branches.split_first().unwrap();
(VariantIdx::from_u32(head.unwrap_leaf().try_to_u32().unwrap()), rest)
@@ -53,7 +53,7 @@ pub(crate) fn destructure_const<'tcx>(
let mut field_consts = Vec::with_capacity(fields.len());
for (field, field_valtree) in iter::zip(fields, branches) {
- let field_ty = field.ty(tcx, substs);
+ let field_ty = field.ty(tcx, args);
let field_const = ty::Const::new_value(tcx, *field_valtree, field_ty);
field_consts.push(field_const);
}
@@ -133,8 +133,8 @@ fn recurse_build<'tcx>(
let val = ty::ValTree::zst();
ty::Const::new_value(tcx, val, node.ty)
}
- &ExprKind::NamedConst { def_id, substs, user_ty: _ } => {
- let uneval = ty::UnevaluatedConst::new(def_id, substs);
+ &ExprKind::NamedConst { def_id, args, user_ty: _ } => {
+ let uneval = ty::UnevaluatedConst::new(def_id, args);
ty::Const::new_unevaluated(tcx, uneval, node.ty)
}
ExprKind::ConstParam { param, .. } => ty::Const::new_param(tcx, *param, node.ty),
@@ -174,7 +174,7 @@ fn recurse_build<'tcx>(
}
// `ExprKind::Use` happens when a `hir::ExprKind::Cast` is a
// "coercion cast" i.e. using a coercion or is a no-op.
- // This is important so that `N as usize as usize` doesnt unify with `N as usize`. (untested)
+ // This is important so that `N as usize as usize` doesn't unify with `N as usize`. (untested)
&ExprKind::Use { source } => {
let arg = recurse_build(tcx, body, source, root_span)?;
ty::Const::new_expr(tcx, Expr::Cast(CastKind::Use, arg, node.ty), node.ty)
@@ -306,8 +306,9 @@ impl<'a, 'tcx> IsThirPolymorphic<'a, 'tcx> {
}
match expr.kind {
- thir::ExprKind::NamedConst { substs, .. }
- | thir::ExprKind::ConstBlock { substs, .. } => substs.has_non_region_param(),
+ thir::ExprKind::NamedConst { args, .. } | thir::ExprKind::ConstBlock { args, .. } => {
+ args.has_non_region_param()
+ }
thir::ExprKind::ConstParam { .. } => true,
thir::ExprKind::Repeat { value, count } => {
self.visit_expr(&self.thir()[value]);
diff --git a/compiler/rustc_ty_utils/src/implied_bounds.rs b/compiler/rustc_ty_utils/src/implied_bounds.rs
index 10dec9a7a..436f10a4f 100644
--- a/compiler/rustc_ty_utils/src/implied_bounds.rs
+++ b/compiler/rustc_ty_utils/src/implied_bounds.rs
@@ -1,3 +1,4 @@
+use rustc_data_structures::fx::FxHashMap;
use rustc_hir as hir;
use rustc_hir::def::DefKind;
use rustc_hir::def_id::LocalDefId;
@@ -7,13 +8,20 @@ use rustc_span::Span;
use std::iter;
pub fn provide(providers: &mut Providers) {
- *providers = Providers { assumed_wf_types, ..*providers };
+ *providers = Providers {
+ assumed_wf_types,
+ assumed_wf_types_for_rpitit: |tcx, def_id| {
+ assert!(tcx.is_impl_trait_in_trait(def_id.to_def_id()));
+ tcx.assumed_wf_types(def_id)
+ },
+ ..*providers
+ };
}
fn assumed_wf_types<'tcx>(tcx: TyCtxt<'tcx>, def_id: LocalDefId) -> &'tcx [(Ty<'tcx>, Span)] {
match tcx.def_kind(def_id) {
DefKind::Fn => {
- let sig = tcx.fn_sig(def_id).subst_identity();
+ let sig = tcx.fn_sig(def_id).instantiate_identity();
let liberated_sig = tcx.liberate_late_bound_regions(def_id.to_def_id(), sig);
tcx.arena.alloc_from_iter(itertools::zip_eq(
liberated_sig.inputs_and_output,
@@ -21,7 +29,7 @@ fn assumed_wf_types<'tcx>(tcx: TyCtxt<'tcx>, def_id: LocalDefId) -> &'tcx [(Ty<'
))
}
DefKind::AssocFn => {
- let sig = tcx.fn_sig(def_id).subst_identity();
+ let sig = tcx.fn_sig(def_id).instantiate_identity();
let liberated_sig = tcx.liberate_late_bound_regions(def_id.to_def_id(), sig);
let mut assumed_wf_types: Vec<_> =
tcx.assumed_wf_types(tcx.local_parent(def_id)).into();
@@ -35,23 +43,90 @@ fn assumed_wf_types<'tcx>(tcx: TyCtxt<'tcx>, def_id: LocalDefId) -> &'tcx [(Ty<'
// Trait arguments and the self type for trait impls or only the self type for
// inherent impls.
let tys = match tcx.impl_trait_ref(def_id) {
- Some(trait_ref) => trait_ref.skip_binder().substs.types().collect(),
- None => vec![tcx.type_of(def_id).subst_identity()],
+ Some(trait_ref) => trait_ref.skip_binder().args.types().collect(),
+ None => vec![tcx.type_of(def_id).instantiate_identity()],
};
let mut impl_spans = impl_spans(tcx, def_id);
tcx.arena.alloc_from_iter(tys.into_iter().map(|ty| (ty, impl_spans.next().unwrap())))
}
+ DefKind::AssocTy if let Some(data) = tcx.opt_rpitit_info(def_id.to_def_id()) => match data {
+ ty::ImplTraitInTraitData::Trait { fn_def_id, .. } => {
+ // We need to remap all of the late-bound lifetimes in theassumed wf types
+ // of the fn (which are represented as ReFree) to the early-bound lifetimes
+ // of the RPITIT (which are represented by ReEarlyBound owned by the opaque).
+ // Luckily, this is very easy to do because we already have that mapping
+ // stored in the HIR of this RPITIT.
+ //
+ // Side-note: We don't really need to do this remapping for early-bound
+ // lifetimes because they're already "linked" by the bidirectional outlives
+ // predicates we insert in the `explicit_predicates_of` query for RPITITs.
+ let mut mapping = FxHashMap::default();
+ let generics = tcx.generics_of(def_id);
+
+ // For each captured opaque lifetime, if it's late-bound (`ReFree` in this case,
+ // since it has been liberated), map it back to the early-bound lifetime of
+ // the GAT. Since RPITITs also have all of the fn's generics, we slice only
+ // the end of the list corresponding to the opaque's generics.
+ for param in &generics.params[tcx.generics_of(fn_def_id).params.len()..] {
+ let orig_lt = tcx.map_rpit_lifetime_to_fn_lifetime(param.def_id.expect_local());
+ if matches!(*orig_lt, ty::ReFree(..)) {
+ mapping.insert(
+ orig_lt,
+ ty::Region::new_early_bound(
+ tcx,
+ ty::EarlyBoundRegion {
+ def_id: param.def_id,
+ index: param.index,
+ name: param.name,
+ },
+ ),
+ );
+ }
+ }
+ // FIXME: This could use a real folder, I guess.
+ let remapped_wf_tys = tcx.fold_regions(
+ tcx.assumed_wf_types(fn_def_id.expect_local()).to_vec(),
+ |region, _| {
+ // If `region` is a `ReFree` that is captured by the
+ // opaque, remap it to its corresponding the early-
+ // bound region.
+ if let Some(remapped_region) = mapping.get(&region) {
+ *remapped_region
+ } else {
+ region
+ }
+ },
+ );
+ tcx.arena.alloc_from_iter(remapped_wf_tys)
+ }
+ // Assumed wf types for RPITITs in an impl just inherit (and instantiate)
+ // the assumed wf types of the trait's RPITIT GAT.
+ ty::ImplTraitInTraitData::Impl { .. } => {
+ let impl_def_id = tcx.local_parent(def_id);
+ let rpitit_def_id = tcx.associated_item(def_id).trait_item_def_id.unwrap();
+ let args = ty::GenericArgs::identity_for_item(tcx, def_id).rebase_onto(
+ tcx,
+ impl_def_id.to_def_id(),
+ tcx.impl_trait_ref(impl_def_id).unwrap().instantiate_identity().args,
+ );
+ tcx.arena.alloc_from_iter(
+ ty::EarlyBinder::bind(tcx.assumed_wf_types_for_rpitit(rpitit_def_id))
+ .iter_instantiated_copied(tcx, args)
+ .chain(tcx.assumed_wf_types(impl_def_id).into_iter().copied()),
+ )
+ }
+ },
DefKind::AssocConst | DefKind::AssocTy => tcx.assumed_wf_types(tcx.local_parent(def_id)),
DefKind::OpaqueTy => match tcx.def_kind(tcx.local_parent(def_id)) {
- DefKind::TyAlias => ty::List::empty(),
+ DefKind::TyAlias { .. } => ty::List::empty(),
DefKind::AssocTy => tcx.assumed_wf_types(tcx.local_parent(def_id)),
// Nested opaque types only occur in associated types:
// ` type Opaque<T> = impl Trait<&'static T, AssocTy = impl Nested>; `
// assumed_wf_types should include those of `Opaque<T>`, `Opaque<T>` itself
// and `&'static T`.
DefKind::OpaqueTy => bug!("unimplemented implied bounds for nested opaque types"),
- def_kind @ _ => {
+ def_kind => {
bug!("unimplemented implied bounds for opaque types with parent {def_kind:?}")
}
},
@@ -61,7 +136,7 @@ fn assumed_wf_types<'tcx>(tcx: TyCtxt<'tcx>, def_id: LocalDefId) -> &'tcx [(Ty<'
| DefKind::Enum
| DefKind::Variant
| DefKind::Trait
- | DefKind::TyAlias
+ | DefKind::TyAlias { .. }
| DefKind::ForeignTy
| DefKind::TraitAlias
| DefKind::TyParam
@@ -75,7 +150,6 @@ fn assumed_wf_types<'tcx>(tcx: TyCtxt<'tcx>, def_id: LocalDefId) -> &'tcx [(Ty<'
| DefKind::ForeignMod
| DefKind::AnonConst
| DefKind::InlineConst
- | DefKind::ImplTraitPlaceholder
| DefKind::Field
| DefKind::LifetimeParam
| DefKind::GlobalAsm
diff --git a/compiler/rustc_ty_utils/src/instance.rs b/compiler/rustc_ty_utils/src/instance.rs
index 1d93a79e5..e1a15b5cf 100644
--- a/compiler/rustc_ty_utils/src/instance.rs
+++ b/compiler/rustc_ty_utils/src/instance.rs
@@ -1,69 +1,64 @@
use rustc_errors::ErrorGuaranteed;
+use rustc_hir::def::DefKind;
use rustc_hir::def_id::DefId;
use rustc_infer::infer::TyCtxtInferExt;
use rustc_middle::query::Providers;
-use rustc_middle::traits::CodegenObligationError;
-use rustc_middle::ty::subst::SubstsRef;
+use rustc_middle::traits::{BuiltinImplSource, CodegenObligationError};
+use rustc_middle::ty::GenericArgsRef;
use rustc_middle::ty::{self, Instance, TyCtxt, TypeVisitableExt};
use rustc_span::sym;
use rustc_trait_selection::traits;
-use traits::{translate_substs, Reveal};
+use traits::{translate_args, Reveal};
use crate::errors::UnexpectedFnPtrAssociatedItem;
fn resolve_instance<'tcx>(
tcx: TyCtxt<'tcx>,
- key: ty::ParamEnvAnd<'tcx, (DefId, SubstsRef<'tcx>)>,
+ key: ty::ParamEnvAnd<'tcx, (DefId, GenericArgsRef<'tcx>)>,
) -> Result<Option<Instance<'tcx>>, ErrorGuaranteed> {
- let (param_env, (def, substs)) = key.into_parts();
+ let (param_env, (def_id, args)) = key.into_parts();
- let result = if let Some(trait_def_id) = tcx.trait_of_item(def) {
+ let result = if let Some(trait_def_id) = tcx.trait_of_item(def_id) {
debug!(" => associated item, attempting to find impl in param_env {:#?}", param_env);
resolve_associated_item(
tcx,
- def,
+ def_id,
param_env,
trait_def_id,
- tcx.normalize_erasing_regions(param_env, substs),
+ tcx.normalize_erasing_regions(param_env, args),
)
} else {
- let ty = tcx.type_of(def);
- let item_type = tcx.subst_and_normalize_erasing_regions(substs, param_env, ty);
+ let def = if matches!(tcx.def_kind(def_id), DefKind::Fn) && tcx.is_intrinsic(def_id) {
+ debug!(" => intrinsic");
+ ty::InstanceDef::Intrinsic(def_id)
+ } else if Some(def_id) == tcx.lang_items().drop_in_place_fn() {
+ let ty = args.type_at(0);
- let def = match *item_type.kind() {
- ty::FnDef(def_id, ..) if tcx.is_intrinsic(def_id) => {
- debug!(" => intrinsic");
- ty::InstanceDef::Intrinsic(def)
- }
- ty::FnDef(def_id, substs) if Some(def_id) == tcx.lang_items().drop_in_place_fn() => {
- let ty = substs.type_at(0);
-
- if ty.needs_drop(tcx, param_env) {
- debug!(" => nontrivial drop glue");
- match *ty.kind() {
- ty::Closure(..)
- | ty::Generator(..)
- | ty::Tuple(..)
- | ty::Adt(..)
- | ty::Dynamic(..)
- | ty::Array(..)
- | ty::Slice(..) => {}
- // Drop shims can only be built from ADTs.
- _ => return Ok(None),
- }
-
- ty::InstanceDef::DropGlue(def_id, Some(ty))
- } else {
- debug!(" => trivial drop glue");
- ty::InstanceDef::DropGlue(def_id, None)
+ if ty.needs_drop(tcx, param_env) {
+ debug!(" => nontrivial drop glue");
+ match *ty.kind() {
+ ty::Closure(..)
+ | ty::Generator(..)
+ | ty::Tuple(..)
+ | ty::Adt(..)
+ | ty::Dynamic(..)
+ | ty::Array(..)
+ | ty::Slice(..) => {}
+ // Drop shims can only be built from ADTs.
+ _ => return Ok(None),
}
+
+ ty::InstanceDef::DropGlue(def_id, Some(ty))
+ } else {
+ debug!(" => trivial drop glue");
+ ty::InstanceDef::DropGlue(def_id, None)
}
- _ => {
- debug!(" => free item");
- ty::InstanceDef::Item(def)
- }
+ } else {
+ debug!(" => free item");
+ ty::InstanceDef::Item(def_id)
};
- Ok(Some(Instance { def, substs }))
+
+ Ok(Some(Instance { def, args }))
};
debug!("inner_resolve_instance: result={:?}", result);
result
@@ -74,11 +69,11 @@ fn resolve_associated_item<'tcx>(
trait_item_id: DefId,
param_env: ty::ParamEnv<'tcx>,
trait_id: DefId,
- rcvr_substs: SubstsRef<'tcx>,
+ rcvr_args: GenericArgsRef<'tcx>,
) -> Result<Option<Instance<'tcx>>, ErrorGuaranteed> {
- debug!(?trait_item_id, ?param_env, ?trait_id, ?rcvr_substs, "resolve_associated_item");
+ debug!(?trait_item_id, ?param_env, ?trait_id, ?rcvr_args, "resolve_associated_item");
- let trait_ref = ty::TraitRef::from_method(tcx, trait_id, rcvr_substs);
+ let trait_ref = ty::TraitRef::from_method(tcx, trait_id, rcvr_args);
let vtbl = match tcx.codegen_select_candidate((param_env, trait_ref)) {
Ok(vtbl) => vtbl,
@@ -102,9 +97,9 @@ fn resolve_associated_item<'tcx>(
traits::ImplSource::UserDefined(impl_data) => {
debug!(
"resolving ImplSource::UserDefined: {:?}, {:?}, {:?}, {:?}",
- param_env, trait_item_id, rcvr_substs, impl_data
+ param_env, trait_item_id, rcvr_args, impl_data
);
- assert!(!rcvr_substs.has_infer());
+ assert!(!rcvr_args.has_infer());
assert!(!trait_ref.has_infer());
let trait_def_id = tcx.trait_id_of_impl(impl_data.impl_def_id).unwrap();
@@ -117,15 +112,15 @@ fn resolve_associated_item<'tcx>(
});
let infcx = tcx.infer_ctxt().build();
let param_env = param_env.with_reveal_all_normalized(tcx);
- let substs = rcvr_substs.rebase_onto(tcx, trait_def_id, impl_data.substs);
- let substs = translate_substs(
+ let args = rcvr_args.rebase_onto(tcx, trait_def_id, impl_data.args);
+ let args = translate_args(
&infcx,
param_env,
impl_data.impl_def_id,
- substs,
+ args,
leaf_def.defining_node,
);
- let substs = infcx.tcx.erase_regions(substs);
+ let args = infcx.tcx.erase_regions(args);
// Since this is a trait item, we need to see if the item is either a trait default item
// or a specialization because we can't resolve those unless we can `Reveal::All`.
@@ -159,7 +154,7 @@ fn resolve_associated_item<'tcx>(
return Err(guard);
}
- let substs = tcx.erase_regions(substs);
+ let args = tcx.erase_regions(args);
// Check if we just resolved an associated `const` declaration from
// a `trait` to an associated `const` definition in an `impl`, where
@@ -175,17 +170,17 @@ fn resolve_associated_item<'tcx>(
))?;
}
- Some(ty::Instance::new(leaf_def.item.def_id, substs))
+ Some(ty::Instance::new(leaf_def.item.def_id, args))
}
- traits::ImplSource::Object(ref data) => {
- traits::get_vtable_index_of_object_method(tcx, data, trait_item_id).map(|index| {
- Instance {
+ traits::ImplSource::Builtin(BuiltinImplSource::Object { vtable_base }, _) => {
+ traits::get_vtable_index_of_object_method(tcx, *vtable_base, trait_item_id).map(
+ |index| Instance {
def: ty::InstanceDef::Virtual(trait_item_id, index),
- substs: rcvr_substs,
- }
- })
+ args: rcvr_args,
+ },
+ )
}
- traits::ImplSource::Builtin(..) => {
+ traits::ImplSource::Builtin(BuiltinImplSource::Misc, _) => {
let lang_items = tcx.lang_items();
if Some(trait_ref.def_id) == lang_items.clone_trait() {
// FIXME(eddyb) use lang items for methods instead of names.
@@ -205,14 +200,14 @@ fn resolve_associated_item<'tcx>(
Some(Instance {
def: ty::InstanceDef::CloneShim(trait_item_id, self_ty),
- substs: rcvr_substs,
+ args: rcvr_args,
})
} else {
assert_eq!(name, sym::clone_from);
// Use the default `fn clone_from` from `trait Clone`.
- let substs = tcx.erase_regions(rcvr_substs);
- Some(ty::Instance::new(trait_item_id, substs))
+ let args = tcx.erase_regions(rcvr_args);
+ Some(ty::Instance::new(trait_item_id, args))
}
} else if Some(trait_ref.def_id) == lang_items.fn_ptr_trait() {
if lang_items.fn_ptr_addr() == Some(trait_item_id) {
@@ -222,7 +217,7 @@ fn resolve_associated_item<'tcx>(
}
Some(Instance {
def: ty::InstanceDef::FnPtrAddrShim(trait_item_id, self_ty),
- substs: rcvr_substs,
+ args: rcvr_args,
})
} else {
tcx.sess.emit_fatal(UnexpectedFnPtrAssociatedItem {
@@ -230,20 +225,20 @@ fn resolve_associated_item<'tcx>(
})
}
} else if Some(trait_ref.def_id) == lang_items.future_trait() {
- let ty::Generator(generator_def_id, substs, _) = *rcvr_substs.type_at(0).kind() else {
+ let ty::Generator(generator_def_id, args, _) = *rcvr_args.type_at(0).kind() else {
bug!()
};
if Some(trait_item_id) == tcx.lang_items().future_poll_fn() {
// `Future::poll` is generated by the compiler.
- Some(Instance { def: ty::InstanceDef::Item(generator_def_id), substs: substs })
+ Some(Instance { def: ty::InstanceDef::Item(generator_def_id), args: args })
} else {
// All other methods are default methods of the `Future` trait.
// (this assumes that `ImplSource::Builtin` is only used for methods on `Future`)
debug_assert!(tcx.defaultness(trait_item_id).has_value());
- Some(Instance::new(trait_item_id, rcvr_substs))
+ Some(Instance::new(trait_item_id, rcvr_args))
}
} else if Some(trait_ref.def_id) == lang_items.gen_trait() {
- let ty::Generator(generator_def_id, substs, _) = *rcvr_substs.type_at(0).kind() else {
+ let ty::Generator(generator_def_id, args, _) = *rcvr_args.type_at(0).kind() else {
bug!()
};
if cfg!(debug_assertions) && tcx.item_name(trait_item_id) != sym::resume {
@@ -257,7 +252,7 @@ fn resolve_associated_item<'tcx>(
tcx.item_name(trait_item_id)
)
}
- Some(Instance { def: ty::InstanceDef::Item(generator_def_id), substs })
+ Some(Instance { def: ty::InstanceDef::Item(generator_def_id), args })
} else if tcx.fn_trait_kind_from_def_id(trait_ref.def_id).is_some() {
// FIXME: This doesn't check for malformed libcore that defines, e.g.,
// `trait Fn { fn call_once(&self) { .. } }`. This is mostly for extension
@@ -275,14 +270,14 @@ fn resolve_associated_item<'tcx>(
tcx.item_name(trait_item_id)
)
}
- match *rcvr_substs.type_at(0).kind() {
- ty::Closure(closure_def_id, substs) => {
+ match *rcvr_args.type_at(0).kind() {
+ ty::Closure(closure_def_id, args) => {
let trait_closure_kind = tcx.fn_trait_kind_from_def_id(trait_id).unwrap();
- Instance::resolve_closure(tcx, closure_def_id, substs, trait_closure_kind)
+ Instance::resolve_closure(tcx, closure_def_id, args, trait_closure_kind)
}
ty::FnDef(..) | ty::FnPtr(..) => Some(Instance {
- def: ty::InstanceDef::FnPtrShim(trait_item_id, rcvr_substs.type_at(0)),
- substs: rcvr_substs,
+ def: ty::InstanceDef::FnPtrShim(trait_item_id, rcvr_args.type_at(0)),
+ args: rcvr_args,
}),
_ => bug!(
"no built-in definition for `{trait_ref}::{}` for non-fn type",
@@ -293,7 +288,9 @@ fn resolve_associated_item<'tcx>(
None
}
}
- traits::ImplSource::Param(..) | traits::ImplSource::TraitUpcasting(_) => None,
+ traits::ImplSource::Param(..)
+ | traits::ImplSource::Builtin(BuiltinImplSource::TraitUpcasting { .. }, _)
+ | traits::ImplSource::Builtin(BuiltinImplSource::TupleUnsizing, _) => None,
})
}
diff --git a/compiler/rustc_ty_utils/src/layout.rs b/compiler/rustc_ty_utils/src/layout.rs
index b67cd96a7..6b4273c03 100644
--- a/compiler/rustc_ty_utils/src/layout.rs
+++ b/compiler/rustc_ty_utils/src/layout.rs
@@ -8,7 +8,7 @@ use rustc_middle::ty::layout::{
IntegerExt, LayoutCx, LayoutError, LayoutOf, TyAndLayout, MAX_SIMD_LANES,
};
use rustc_middle::ty::{
- self, subst::SubstsRef, AdtDef, EarlyBinder, ReprOptions, Ty, TyCtxt, TypeVisitableExt,
+ self, AdtDef, EarlyBinder, GenericArgsRef, ReprOptions, Ty, TyCtxt, TypeVisitableExt,
};
use rustc_session::{DataTypeKind, FieldInfo, FieldKind, SizeKind, VariantInfo};
use rustc_span::symbol::Symbol;
@@ -96,6 +96,13 @@ fn layout_of_uncached<'tcx>(
cx: &LayoutCx<'tcx, TyCtxt<'tcx>>,
ty: Ty<'tcx>,
) -> Result<Layout<'tcx>, &'tcx LayoutError<'tcx>> {
+ // Types that reference `ty::Error` pessimistically don't have a meaningful layout.
+ // The only side-effect of this is possibly worse diagnostics in case the layout
+ // was actually computable (like if the `ty::Error` showed up only in a `PhantomData`).
+ if let Err(guar) = ty.error_reported() {
+ return Err(error(cx, LayoutError::ReferencesError(guar)));
+ }
+
let tcx = cx.tcx;
let param_env = cx.param_env;
let dl = cx.data_layout();
@@ -258,6 +265,8 @@ fn layout_of_uncached<'tcx>(
largest_niche,
align: element.align,
size,
+ max_repr_align: None,
+ unadjusted_abi_align: element.align.abi,
})
}
ty::Slice(element) => {
@@ -269,6 +278,8 @@ fn layout_of_uncached<'tcx>(
largest_niche: None,
align: element.align,
size: Size::ZERO,
+ max_repr_align: None,
+ unadjusted_abi_align: element.align.abi,
})
}
ty::Str => tcx.mk_layout(LayoutS {
@@ -278,6 +289,8 @@ fn layout_of_uncached<'tcx>(
largest_niche: None,
align: dl.i8_align,
size: Size::ZERO,
+ max_repr_align: None,
+ unadjusted_abi_align: dl.i8_align.abi,
}),
// Odd unit types.
@@ -299,12 +312,14 @@ fn layout_of_uncached<'tcx>(
tcx.mk_layout(unit)
}
- ty::Generator(def_id, substs, _) => generator_layout(cx, ty, def_id, substs)?,
+ ty::Generator(def_id, args, _) => generator_layout(cx, ty, def_id, args)?,
- ty::Closure(_, ref substs) => {
- let tys = substs.as_closure().upvar_tys();
+ ty::Closure(_, ref args) => {
+ let tys = args.as_closure().upvar_tys();
univariant(
- &tys.map(|ty| Ok(cx.layout_of(ty)?.layout)).try_collect::<IndexVec<_, _>>()?,
+ &tys.iter()
+ .map(|ty| Ok(cx.layout_of(ty)?.layout))
+ .try_collect::<IndexVec<_, _>>()?,
&ReprOptions::default(),
StructKind::AlwaysSized,
)?
@@ -322,7 +337,7 @@ fn layout_of_uncached<'tcx>(
}
// SIMD vector types.
- ty::Adt(def, substs) if def.repr().simd() => {
+ ty::Adt(def, args) if def.repr().simd() => {
if !def.is_struct() {
// Should have yielded E0517 by now.
tcx.sess.delay_span_bug(
@@ -349,12 +364,12 @@ fn layout_of_uncached<'tcx>(
}
// Type of the first ADT field:
- let f0_ty = fields[FieldIdx::from_u32(0)].ty(tcx, substs);
+ let f0_ty = fields[FieldIdx::from_u32(0)].ty(tcx, args);
// Heterogeneous SIMD vectors are not supported:
// (should be caught by typeck)
for fi in fields {
- if fi.ty(tcx, substs) != f0_ty {
+ if fi.ty(tcx, args) != f0_ty {
tcx.sess.delay_span_bug(
DUMMY_SP,
"#[repr(simd)] was applied to an ADT with heterogeneous field type",
@@ -431,11 +446,13 @@ fn layout_of_uncached<'tcx>(
largest_niche: e_ly.largest_niche,
size,
align,
+ max_repr_align: None,
+ unadjusted_abi_align: align.abi,
})
}
// ADTs.
- ty::Adt(def, substs) => {
+ ty::Adt(def, args) => {
// Cache the field layouts.
let variants = def
.variants()
@@ -443,7 +460,7 @@ fn layout_of_uncached<'tcx>(
.map(|v| {
v.fields
.iter()
- .map(|field| Ok(cx.layout_of(field.ty(tcx, substs))?.layout))
+ .map(|field| Ok(cx.layout_of(field.ty(tcx, args))?.layout))
.try_collect::<IndexVec<_, _>>()
})
.try_collect::<IndexVec<VariantIdx, _>>()?;
@@ -482,7 +499,7 @@ fn layout_of_uncached<'tcx>(
let maybe_unsized = def.is_struct()
&& def.non_enum_variant().tail_opt().is_some_and(|last_field| {
let param_env = tcx.param_env(def.did());
- !tcx.type_of(last_field.did).subst_identity().is_sized(tcx, param_env)
+ !tcx.type_of(last_field.did).instantiate_identity().is_sized(tcx, param_env)
});
let Some(layout) = cx.layout_of_struct_or_enum(
@@ -502,7 +519,7 @@ fn layout_of_uncached<'tcx>(
// If the struct tail is sized and can be unsized, check that unsizing doesn't move the fields around.
if cfg!(debug_assertions)
&& maybe_unsized
- && def.non_enum_variant().tail().ty(tcx, substs).is_sized(tcx, cx.param_env)
+ && def.non_enum_variant().tail().ty(tcx, args).is_sized(tcx, cx.param_env)
{
let mut variants = variants;
let tail_replacement = cx.layout_of(Ty::new_slice(tcx, tcx.types.u8)).unwrap();
@@ -525,8 +542,13 @@ fn layout_of_uncached<'tcx>(
let FieldsShape::Arbitrary { offsets: sized_offsets, .. } = &layout.fields else {
bug!("unexpected FieldsShape for sized layout of {ty:?}: {:?}", layout.fields);
};
- let FieldsShape::Arbitrary { offsets: unsized_offsets, .. } = &unsized_layout.fields else {
- bug!("unexpected FieldsShape for unsized layout of {ty:?}: {:?}", unsized_layout.fields);
+ let FieldsShape::Arbitrary { offsets: unsized_offsets, .. } =
+ &unsized_layout.fields
+ else {
+ bug!(
+ "unexpected FieldsShape for unsized layout of {ty:?}: {:?}",
+ unsized_layout.fields
+ );
};
let (sized_tail, sized_fields) = sized_offsets.raw.split_last().unwrap();
@@ -551,11 +573,15 @@ fn layout_of_uncached<'tcx>(
return Err(error(cx, LayoutError::Unknown(ty)));
}
- ty::Bound(..) | ty::GeneratorWitness(..) | ty::GeneratorWitnessMIR(..) | ty::Infer(_) => {
+ ty::Bound(..)
+ | ty::GeneratorWitness(..)
+ | ty::GeneratorWitnessMIR(..)
+ | ty::Infer(_)
+ | ty::Error(_) => {
bug!("Layout::compute: unexpected type `{}`", ty)
}
- ty::Placeholder(..) | ty::Param(_) | ty::Error(_) => {
+ ty::Placeholder(..) | ty::Param(_) => {
return Err(error(cx, LayoutError::Unknown(ty)));
}
})
@@ -691,11 +717,11 @@ fn generator_layout<'tcx>(
cx: &LayoutCx<'tcx, TyCtxt<'tcx>>,
ty: Ty<'tcx>,
def_id: hir::def_id::DefId,
- substs: SubstsRef<'tcx>,
+ args: GenericArgsRef<'tcx>,
) -> Result<Layout<'tcx>, &'tcx LayoutError<'tcx>> {
use SavedLocalEligibility::*;
let tcx = cx.tcx;
- let subst_field = |ty: Ty<'tcx>| EarlyBinder::bind(ty).subst(tcx, substs);
+ let subst_field = |ty: Ty<'tcx>| EarlyBinder::bind(ty).instantiate(tcx, args);
let Some(info) = tcx.generator_layout(def_id) else {
return Err(error(cx, LayoutError::Unknown(ty)));
@@ -705,7 +731,7 @@ fn generator_layout<'tcx>(
// Build a prefix layout, including "promoting" all ineligible
// locals as part of the prefix. We compute the layout of all of
// these fields at once to get optimal packing.
- let tag_index = substs.as_generator().prefix_tys().count();
+ let tag_index = args.as_generator().prefix_tys().len();
// `info.variant_fields` already accounts for the reserved variants, so no need to add them.
let max_discr = (info.variant_fields.len() - 1) as u128;
@@ -721,9 +747,10 @@ fn generator_layout<'tcx>(
.map(|local| subst_field(info.field_tys[local].ty))
.map(|ty| Ty::new_maybe_uninit(tcx, ty))
.map(|ty| Ok(cx.layout_of(ty)?.layout));
- let prefix_layouts = substs
+ let prefix_layouts = args
.as_generator()
.prefix_tys()
+ .iter()
.map(|ty| Ok(cx.layout_of(ty)?.layout))
.chain(iter::once(Ok(tag_layout)))
.chain(promoted_layouts)
@@ -879,6 +906,8 @@ fn generator_layout<'tcx>(
largest_niche: prefix.largest_niche,
size,
align,
+ max_repr_align: None,
+ unadjusted_abi_align: align.abi,
});
debug!("generator layout ({:?}): {:#?}", ty, layout);
Ok(layout)
@@ -929,11 +958,11 @@ fn record_layout_for_printing_outlined<'tcx>(
record(adt_kind.into(), adt_packed, opt_discr_size, variant_infos);
}
- ty::Generator(def_id, substs, _) => {
+ ty::Generator(def_id, args, _) => {
debug!("print-type-size t: `{:?}` record generator", layout.ty);
// Generators always have a begin/poisoned/end state with additional suspend points
let (variant_infos, opt_discr_size) =
- variant_info_for_generator(cx, layout, def_id, substs);
+ variant_info_for_generator(cx, layout, def_id, args);
record(DataTypeKind::Generator, false, opt_discr_size, variant_infos);
}
@@ -1023,7 +1052,7 @@ fn variant_info_for_generator<'tcx>(
cx: &LayoutCx<'tcx, TyCtxt<'tcx>>,
layout: TyAndLayout<'tcx>,
def_id: DefId,
- substs: ty::SubstsRef<'tcx>,
+ args: ty::GenericArgsRef<'tcx>,
) -> (Vec<VariantInfo>, Option<Size>) {
let Variants::Multiple { tag, ref tag_encoding, tag_field, .. } = layout.variants else {
return (vec![], None);
@@ -1033,9 +1062,10 @@ fn variant_info_for_generator<'tcx>(
let upvar_names = cx.tcx.closure_saved_names_of_captured_variables(def_id);
let mut upvars_size = Size::ZERO;
- let upvar_fields: Vec<_> = substs
+ let upvar_fields: Vec<_> = args
.as_generator()
.upvar_tys()
+ .iter()
.zip(upvar_names)
.enumerate()
.map(|(field_idx, (_, name))| {
@@ -1108,7 +1138,7 @@ fn variant_info_for_generator<'tcx>(
}
VariantInfo {
- name: Some(Symbol::intern(&ty::GeneratorSubsts::variant_name(variant_idx))),
+ name: Some(Symbol::intern(&ty::GeneratorArgs::variant_name(variant_idx))),
kind: SizeKind::Exact,
size: variant_size.bytes(),
align: variant_layout.align.abi.bytes(),
diff --git a/compiler/rustc_ty_utils/src/layout_sanity_check.rs b/compiler/rustc_ty_utils/src/layout_sanity_check.rs
index c4a4cda68..863333438 100644
--- a/compiler/rustc_ty_utils/src/layout_sanity_check.rs
+++ b/compiler/rustc_ty_utils/src/layout_sanity_check.rs
@@ -52,7 +52,7 @@ pub(super) fn sanity_check_layout<'tcx>(
let mut fields = non_zst_fields(cx, layout);
let Some(first) = fields.next() else {
// No fields here, so this could be a primitive or enum -- either way it's not a newtype around a thing
- return *layout
+ return *layout;
};
if fields.next().is_none() {
let (offset, first) = first;
@@ -77,7 +77,7 @@ pub(super) fn sanity_check_layout<'tcx>(
Abi::Uninhabited | Abi::Aggregate { .. },
"ABI unexpectedly missing alignment and/or size in {layout:#?}"
);
- return
+ return;
};
assert_eq!(
layout.layout.align().abi,
diff --git a/compiler/rustc_ty_utils/src/lib.rs b/compiler/rustc_ty_utils/src/lib.rs
index 55b8857ed..147b600f7 100644
--- a/compiler/rustc_ty_utils/src/lib.rs
+++ b/compiler/rustc_ty_utils/src/lib.rs
@@ -8,6 +8,7 @@
#![feature(assert_matches)]
#![feature(iterator_try_collect)]
#![feature(let_chains)]
+#![feature(if_let_guard)]
#![feature(never_type)]
#![feature(box_patterns)]
#![recursion_limit = "256"]
diff --git a/compiler/rustc_ty_utils/src/needs_drop.rs b/compiler/rustc_ty_utils/src/needs_drop.rs
index 9d593dc5e..1fc5d9359 100644
--- a/compiler/rustc_ty_utils/src/needs_drop.rs
+++ b/compiler/rustc_ty_utils/src/needs_drop.rs
@@ -3,8 +3,8 @@
use rustc_data_structures::fx::FxHashSet;
use rustc_hir::def_id::DefId;
use rustc_middle::query::Providers;
-use rustc_middle::ty::subst::SubstsRef;
use rustc_middle::ty::util::{needs_drop_components, AlwaysRequiresDrop};
+use rustc_middle::ty::GenericArgsRef;
use rustc_middle::ty::{self, EarlyBinder, Ty, TyCtxt};
use rustc_session::Limit;
use rustc_span::{sym, DUMMY_SP};
@@ -19,13 +19,32 @@ fn needs_drop_raw<'tcx>(tcx: TyCtxt<'tcx>, query: ty::ParamEnvAnd<'tcx, Ty<'tcx>
// needs drop.
let adt_has_dtor =
|adt_def: ty::AdtDef<'tcx>| adt_def.destructor(tcx).map(|_| DtorType::Significant);
- let res =
- drop_tys_helper(tcx, query.value, query.param_env, adt_has_dtor, false).next().is_some();
+ let res = drop_tys_helper(tcx, query.value, query.param_env, adt_has_dtor, false)
+ .filter(filter_array_elements(tcx, query.param_env))
+ .next()
+ .is_some();
debug!("needs_drop_raw({:?}) = {:?}", query, res);
res
}
+/// HACK: in order to not mistakenly assume that `[PhantomData<T>; N]` requires drop glue
+/// we check the element type for drop glue. The correct fix would be looking at the
+/// entirety of the code around `needs_drop_components` and this file and come up with
+/// logic that is easier to follow while not repeating any checks that may thus diverge.
+fn filter_array_elements<'tcx>(
+ tcx: TyCtxt<'tcx>,
+ param_env: ty::ParamEnv<'tcx>,
+) -> impl Fn(&Result<Ty<'tcx>, AlwaysRequiresDrop>) -> bool {
+ move |ty| match ty {
+ Ok(ty) => match *ty.kind() {
+ ty::Array(elem, _) => tcx.needs_drop_raw(param_env.and(elem)),
+ _ => true,
+ },
+ Err(AlwaysRequiresDrop) => true,
+ }
+}
+
fn has_significant_drop_raw<'tcx>(
tcx: TyCtxt<'tcx>,
query: ty::ParamEnvAnd<'tcx, Ty<'tcx>>,
@@ -37,6 +56,7 @@ fn has_significant_drop_raw<'tcx>(
adt_consider_insignificant_dtor(tcx),
true,
)
+ .filter(filter_array_elements(tcx, query.param_env))
.next()
.is_some();
debug!("has_significant_drop_raw({:?}) = {:?}", query, res);
@@ -80,7 +100,7 @@ impl<'tcx, F> NeedsDropTypes<'tcx, F> {
impl<'tcx, F, I> Iterator for NeedsDropTypes<'tcx, F>
where
- F: Fn(ty::AdtDef<'tcx>, SubstsRef<'tcx>) -> NeedsDropResult<I>,
+ F: Fn(ty::AdtDef<'tcx>, GenericArgsRef<'tcx>) -> NeedsDropResult<I>,
I: Iterator<Item = Ty<'tcx>>,
{
type Item = NeedsDropResult<Ty<'tcx>>;
@@ -96,7 +116,7 @@ where
return Some(Err(AlwaysRequiresDrop));
}
- let components = match needs_drop_components(ty, &tcx.data_layout) {
+ let components = match needs_drop_components(tcx, ty) {
Err(e) => return Some(Err(e)),
Ok(components) => components,
};
@@ -119,21 +139,25 @@ where
_ if component.is_copy_modulo_regions(tcx, self.param_env) => (),
- ty::Closure(_, substs) => {
- queue_type(self, substs.as_closure().tupled_upvars_ty());
+ ty::Closure(_, args) => {
+ for upvar in args.as_closure().upvar_tys() {
+ queue_type(self, upvar);
+ }
}
- ty::Generator(def_id, substs, _) => {
- let substs = substs.as_generator();
- queue_type(self, substs.tupled_upvars_ty());
+ ty::Generator(def_id, args, _) => {
+ let args = args.as_generator();
+ for upvar in args.upvar_tys() {
+ queue_type(self, upvar);
+ }
- let witness = substs.witness();
+ let witness = args.witness();
let interior_tys = match witness.kind() {
&ty::GeneratorWitness(tys) => tcx.erase_late_bound_regions(tys),
_ => {
tcx.sess.delay_span_bug(
tcx.hir().span_if_local(def_id).unwrap_or(DUMMY_SP),
- format!("unexpected generator witness type {:?}", witness),
+ format!("unexpected generator witness type {witness:?}"),
);
return Some(Err(AlwaysRequiresDrop));
}
@@ -147,8 +171,8 @@ where
// Check for a `Drop` impl and whether this is a union or
// `ManuallyDrop`. If it's a struct or enum without a `Drop`
// impl then check whether the field types need `Drop`.
- ty::Adt(adt_def, substs) => {
- let tys = match (self.adt_components)(adt_def, substs) {
+ ty::Adt(adt_def, args) => {
+ let tys = match (self.adt_components)(adt_def, args) {
Err(e) => return Some(Err(e)),
Ok(tys) => tys,
};
@@ -160,7 +184,7 @@ where
queue_type(self, required);
}
}
- ty::Array(..) | ty::Alias(..) | ty::Param(_) => {
+ ty::Alias(..) | ty::Array(..) | ty::Placeholder(_) | ty::Param(_) => {
if ty == component {
// Return the type to the caller: they may be able
// to normalize further than we can.
@@ -172,7 +196,31 @@ where
queue_type(self, component);
}
}
- _ => return Some(Err(AlwaysRequiresDrop)),
+
+ ty::Foreign(_) | ty::Dynamic(..) => {
+ return Some(Err(AlwaysRequiresDrop));
+ }
+
+ ty::Bool
+ | ty::Char
+ | ty::Int(_)
+ | ty::Uint(_)
+ | ty::Float(_)
+ | ty::Str
+ | ty::Slice(_)
+ | ty::Ref(..)
+ | ty::RawPtr(..)
+ | ty::FnDef(..)
+ | ty::FnPtr(..)
+ | ty::Tuple(_)
+ | ty::Bound(..)
+ | ty::GeneratorWitness(..)
+ | ty::GeneratorWitnessMIR(..)
+ | ty::Never
+ | ty::Infer(_)
+ | ty::Error(_) => {
+ bug!("unexpected type returned by `needs_drop_components`: {component}")
+ }
}
}
}
@@ -210,7 +258,7 @@ fn drop_tys_helper<'tcx>(
match subty.kind() {
ty::Adt(adt_id, subst) => {
for subty in tcx.adt_drop_tys(adt_id.did())? {
- vec.push(EarlyBinder::bind(subty).subst(tcx, subst));
+ vec.push(EarlyBinder::bind(subty).instantiate(tcx, subst));
}
}
_ => vec.push(subty),
@@ -219,7 +267,7 @@ fn drop_tys_helper<'tcx>(
})
}
- let adt_components = move |adt_def: ty::AdtDef<'tcx>, substs: SubstsRef<'tcx>| {
+ let adt_components = move |adt_def: ty::AdtDef<'tcx>, args: GenericArgsRef<'tcx>| {
if adt_def.is_manually_drop() {
debug!("drop_tys_helper: `{:?}` is manually drop", adt_def);
Ok(Vec::new())
@@ -235,7 +283,7 @@ fn drop_tys_helper<'tcx>(
// Since the destructor is insignificant, we just want to make sure all of
// the passed in type parameters are also insignificant.
// Eg: Vec<T> dtor is insignificant when T=i32 but significant when T=Mutex.
- Ok(substs.types().collect())
+ Ok(args.types().collect())
}
}
} else if adt_def.is_union() {
@@ -243,8 +291,8 @@ fn drop_tys_helper<'tcx>(
Ok(Vec::new())
} else {
let field_tys = adt_def.all_fields().map(|field| {
- let r = tcx.type_of(field.did).subst(tcx, substs);
- debug!("drop_tys_helper: Subst into {:?} with {:?} getting {:?}", field, substs, r);
+ let r = tcx.type_of(field.did).instantiate(tcx, args);
+ debug!("drop_tys_helper: Subst into {:?} with {:?} getting {:?}", field, args, r);
r
});
if only_significant {
@@ -295,10 +343,10 @@ fn adt_drop_tys<'tcx>(
// significant.
let adt_has_dtor =
|adt_def: ty::AdtDef<'tcx>| adt_def.destructor(tcx).map(|_| DtorType::Significant);
- // `tcx.type_of(def_id)` identical to `tcx.make_adt(def, identity_substs)`
+ // `tcx.type_of(def_id)` identical to `tcx.make_adt(def, identity_args)`
drop_tys_helper(
tcx,
- tcx.type_of(def_id).subst_identity(),
+ tcx.type_of(def_id).instantiate_identity(),
tcx.param_env(def_id),
adt_has_dtor,
false,
@@ -307,7 +355,7 @@ fn adt_drop_tys<'tcx>(
.map(|components| tcx.mk_type_list(&components))
}
// If `def_id` refers to a generic ADT, the queries above and below act as if they had been handed
-// a `tcx.make_ty(def, identity_substs)` and as such it is legal to substitute the generic parameters
+// a `tcx.make_ty(def, identity_args)` and as such it is legal to substitute the generic parameters
// of the ADT into the outputted `ty`s.
fn adt_significant_drop_tys(
tcx: TyCtxt<'_>,
@@ -315,7 +363,7 @@ fn adt_significant_drop_tys(
) -> Result<&ty::List<Ty<'_>>, AlwaysRequiresDrop> {
drop_tys_helper(
tcx,
- tcx.type_of(def_id).subst_identity(), // identical to `tcx.make_adt(def, identity_substs)`
+ tcx.type_of(def_id).instantiate_identity(), // identical to `tcx.make_adt(def, identity_args)`
tcx.param_env(def_id),
adt_consider_insignificant_dtor(tcx),
true,
diff --git a/compiler/rustc_ty_utils/src/opaque_types.rs b/compiler/rustc_ty_utils/src/opaque_types.rs
index 570c3b245..38768f0a0 100644
--- a/compiler/rustc_ty_utils/src/opaque_types.rs
+++ b/compiler/rustc_ty_utils/src/opaque_types.rs
@@ -7,7 +7,7 @@ use rustc_middle::ty::util::{CheckRegions, NotUniqueParam};
use rustc_middle::ty::{self, Ty, TyCtxt};
use rustc_middle::ty::{TypeSuperVisitable, TypeVisitable, TypeVisitor};
use rustc_span::Span;
-use rustc_trait_selection::traits::check_substs_compatible;
+use rustc_trait_selection::traits::check_args_compatible;
use std::ops::ControlFlow;
use crate::errors::{DuplicateArg, NotParam};
@@ -45,7 +45,7 @@ impl<'tcx> OpaqueTypeCollector<'tcx> {
fn parent_trait_ref(&self) -> Option<ty::TraitRef<'tcx>> {
let parent = self.parent()?;
if matches!(self.tcx.def_kind(parent), DefKind::Impl { .. }) {
- Some(self.tcx.impl_trait_ref(parent)?.subst_identity())
+ Some(self.tcx.impl_trait_ref(parent)?.instantiate_identity())
} else {
None
}
@@ -53,7 +53,9 @@ impl<'tcx> OpaqueTypeCollector<'tcx> {
fn parent(&self) -> Option<LocalDefId> {
match self.tcx.def_kind(self.item) {
- DefKind::AnonConst | DefKind::InlineConst | DefKind::Fn | DefKind::TyAlias => None,
+ DefKind::AnonConst | DefKind::InlineConst | DefKind::Fn | DefKind::TyAlias { .. } => {
+ None
+ }
DefKind::AssocFn | DefKind::AssocTy | DefKind::AssocConst => {
Some(self.tcx.local_parent(self.item))
}
@@ -116,7 +118,7 @@ impl<'tcx> OpaqueTypeCollector<'tcx> {
#[instrument(level = "trace", skip(self))]
fn visit_nested_item(&mut self, id: rustc_hir::ItemId) {
let id = id.owner_id.def_id;
- if let DefKind::TyAlias = self.collector.tcx.def_kind(id) {
+ if let DefKind::TyAlias { .. } = self.collector.tcx.def_kind(id) {
let items = self.collector.tcx.opaque_types_defined_by(id);
self.collector.opaques.extend(items);
}
@@ -159,19 +161,19 @@ impl<'tcx> TypeVisitor<TyCtxt<'tcx>> for OpaqueTypeCollector<'tcx> {
self.opaques.push(alias_ty.def_id.expect_local());
- match self.tcx.uses_unique_generic_params(alias_ty.substs, CheckRegions::Bound) {
+ match self.tcx.uses_unique_generic_params(alias_ty.args, CheckRegions::Bound) {
Ok(()) => {
// FIXME: implement higher kinded lifetime bounds on nested opaque types. They are not
// supported at all, so this is sound to do, but once we want to support them, you'll
// start seeing the error below.
// Collect opaque types nested within the associated type bounds of this opaque type.
- // We use identity substs here, because we already know that the opaque type uses
+ // We use identity args here, because we already know that the opaque type uses
// only generic parameters, and thus substituting would not give us more information.
for (pred, span) in self
.tcx
.explicit_item_bounds(alias_ty.def_id)
- .subst_identity_iter_copied()
+ .instantiate_identity_iter_copied()
{
trace!(?pred);
self.visit_spanned(span, pred);
@@ -196,7 +198,7 @@ impl<'tcx> TypeVisitor<TyCtxt<'tcx>> for OpaqueTypeCollector<'tcx> {
ty::Alias(ty::Weak, alias_ty) if alias_ty.def_id.is_local() => {
self.tcx
.type_of(alias_ty.def_id)
- .subst(self.tcx, alias_ty.substs)
+ .instantiate(self.tcx, alias_ty.args)
.visit_with(self)?;
}
ty::Alias(ty::Projection, alias_ty) => {
@@ -222,22 +224,22 @@ impl<'tcx> TypeVisitor<TyCtxt<'tcx>> for OpaqueTypeCollector<'tcx> {
continue;
}
- let impl_substs = alias_ty.substs.rebase_onto(
+ let impl_args = alias_ty.args.rebase_onto(
self.tcx,
parent_trait_ref.def_id,
- ty::InternalSubsts::identity_for_item(self.tcx, parent),
+ ty::GenericArgs::identity_for_item(self.tcx, parent),
);
- if check_substs_compatible(self.tcx, assoc, impl_substs) {
+ if check_args_compatible(self.tcx, assoc, impl_args) {
return self
.tcx
.type_of(assoc.def_id)
- .subst(self.tcx, impl_substs)
+ .instantiate(self.tcx, impl_args)
.visit_with(self);
} else {
self.tcx.sess.delay_span_bug(
self.tcx.def_span(assoc.def_id),
- "item had incorrect substs",
+ "item had incorrect args",
);
}
}
@@ -250,15 +252,15 @@ impl<'tcx> TypeVisitor<TyCtxt<'tcx>> for OpaqueTypeCollector<'tcx> {
}
for variant in def.variants().iter() {
for field in variant.fields.iter() {
- // Don't use the `ty::Adt` substs, we either
- // * found the opaque in the substs
+ // Don't use the `ty::Adt` args, we either
+ // * found the opaque in the args
// * will find the opaque in the unsubstituted fields
// The only other situation that can occur is that after substituting,
// some projection resolves to an opaque that we would have otherwise
// not found. While we could substitute and walk those, that would mean we
// would have to walk all substitutions of an Adt, which can quickly
// degenerate into looking at an exponential number of types.
- let ty = self.tcx.type_of(field.did).subst_identity();
+ let ty = self.tcx.type_of(field.did).instantiate_identity();
self.visit_spanned(self.tcx.def_span(field.did), ty);
}
}
@@ -276,7 +278,7 @@ fn opaque_types_defined_by<'tcx>(tcx: TyCtxt<'tcx>, item: LocalDefId) -> &'tcx [
match kind {
// Walk over the signature of the function-like to find the opaques.
DefKind::AssocFn | DefKind::Fn => {
- let ty_sig = tcx.fn_sig(item).subst_identity();
+ let ty_sig = tcx.fn_sig(item).instantiate_identity();
let hir_sig = tcx.hir().get_by_def_id(item).fn_sig().unwrap();
// Walk over the inputs and outputs manually in order to get good spans for them.
collector.visit_spanned(hir_sig.decl.output.span(), ty_sig.output());
@@ -291,15 +293,15 @@ fn opaque_types_defined_by<'tcx>(tcx: TyCtxt<'tcx>, item: LocalDefId) -> &'tcx [
Some(ty) => ty.span,
_ => tcx.def_span(item),
};
- collector.visit_spanned(span, tcx.type_of(item).subst_identity());
+ collector.visit_spanned(span, tcx.type_of(item).instantiate_identity());
collector.collect_body_and_predicate_taits();
}
// We're also doing this for `AssocTy` for the wf checks in `check_opaque_meets_bounds`
- DefKind::TyAlias | DefKind::AssocTy => {
- tcx.type_of(item).subst_identity().visit_with(&mut collector);
+ DefKind::TyAlias { .. } | DefKind::AssocTy => {
+ tcx.type_of(item).instantiate_identity().visit_with(&mut collector);
}
DefKind::OpaqueTy => {
- for (pred, span) in tcx.explicit_item_bounds(item).subst_identity_iter_copied() {
+ for (pred, span) in tcx.explicit_item_bounds(item).instantiate_identity_iter_copied() {
collector.visit_spanned(span, pred);
}
}
@@ -318,7 +320,6 @@ fn opaque_types_defined_by<'tcx>(tcx: TyCtxt<'tcx>, item: LocalDefId) -> &'tcx [
| DefKind::ExternCrate
| DefKind::Use
| DefKind::ForeignMod
- | DefKind::ImplTraitPlaceholder
| DefKind::Field
| DefKind::LifetimeParam
| DefKind::GlobalAsm
diff --git a/compiler/rustc_ty_utils/src/representability.rs b/compiler/rustc_ty_utils/src/representability.rs
index 0b5e27c2c..f34e0df2c 100644
--- a/compiler/rustc_ty_utils/src/representability.rs
+++ b/compiler/rustc_ty_utils/src/representability.rs
@@ -31,7 +31,7 @@ fn representability(tcx: TyCtxt<'_>, def_id: LocalDefId) -> Representability {
}
Representability::Representable
}
- DefKind::Field => representability_ty(tcx, tcx.type_of(def_id).subst_identity()),
+ DefKind::Field => representability_ty(tcx, tcx.type_of(def_id).instantiate_identity()),
def_kind => bug!("unexpected {def_kind:?}"),
}
}
@@ -68,14 +68,14 @@ representability_adt_ty(Bar<..>) is in the cycle and representability(Bar) is
*not* in the cycle.
*/
fn representability_adt_ty<'tcx>(tcx: TyCtxt<'tcx>, ty: Ty<'tcx>) -> Representability {
- let ty::Adt(adt, substs) = ty.kind() else { bug!("expected adt") };
+ let ty::Adt(adt, args) = ty.kind() else { bug!("expected adt") };
if let Some(def_id) = adt.did().as_local() {
rtry!(tcx.representability(def_id));
}
// At this point, we know that the item of the ADT type is representable;
// but the type parameters may cause a cycle with an upstream type
let params_in_repr = tcx.params_in_repr(adt.did());
- for (i, subst) in substs.iter().enumerate() {
+ for (i, subst) in args.iter().enumerate() {
if let ty::GenericArgKind::Type(ty) = subst.unpack() {
if params_in_repr.contains(i as u32) {
rtry!(representability_ty(tcx, ty));
@@ -91,7 +91,11 @@ fn params_in_repr(tcx: TyCtxt<'_>, def_id: LocalDefId) -> BitSet<u32> {
let mut params_in_repr = BitSet::new_empty(generics.params.len());
for variant in adt_def.variants() {
for field in variant.fields.iter() {
- params_in_repr_ty(tcx, tcx.type_of(field.did).subst_identity(), &mut params_in_repr);
+ params_in_repr_ty(
+ tcx,
+ tcx.type_of(field.did).instantiate_identity(),
+ &mut params_in_repr,
+ );
}
}
params_in_repr
@@ -99,9 +103,9 @@ fn params_in_repr(tcx: TyCtxt<'_>, def_id: LocalDefId) -> BitSet<u32> {
fn params_in_repr_ty<'tcx>(tcx: TyCtxt<'tcx>, ty: Ty<'tcx>, params_in_repr: &mut BitSet<u32>) {
match *ty.kind() {
- ty::Adt(adt, substs) => {
+ ty::Adt(adt, args) => {
let inner_params_in_repr = tcx.params_in_repr(adt.did());
- for (i, subst) in substs.iter().enumerate() {
+ for (i, subst) in args.iter().enumerate() {
if let ty::GenericArgKind::Type(ty) = subst.unpack() {
if inner_params_in_repr.contains(i as u32) {
params_in_repr_ty(tcx, ty, params_in_repr);
diff --git a/compiler/rustc_ty_utils/src/ty.rs b/compiler/rustc_ty_utils/src/ty.rs
index 6e5c50492..ba0258b63 100644
--- a/compiler/rustc_ty_utils/src/ty.rs
+++ b/compiler/rustc_ty_utils/src/ty.rs
@@ -37,12 +37,12 @@ fn sized_constraint_for_ty<'tcx>(
Some(&ty) => sized_constraint_for_ty(tcx, adtdef, ty),
},
- Adt(adt, substs) => {
+ Adt(adt, args) => {
// recursive case
let adt_tys = adt.sized_constraint(tcx);
debug!("sized_constraint_for_ty({:?}) intermediate = {:?}", ty, adt_tys);
adt_tys
- .subst_iter_copied(tcx, substs)
+ .iter_instantiated(tcx, args)
.flat_map(|ty| sized_constraint_for_ty(tcx, adtdef, ty))
.collect()
}
@@ -58,11 +58,18 @@ fn sized_constraint_for_ty<'tcx>(
// we know that `T` is Sized and do not need to check
// it on the impl.
- let Some(sized_trait) = tcx.lang_items().sized_trait() else { return vec![ty] };
- let sized_predicate =
- ty::TraitRef::new(tcx, sized_trait, [ty]).without_const().to_predicate(tcx);
+ let Some(sized_trait_def_id) = tcx.lang_items().sized_trait() else { return vec![ty] };
let predicates = tcx.predicates_of(adtdef.did()).predicates;
- if predicates.iter().any(|(p, _)| *p == sized_predicate) { vec![] } else { vec![ty] }
+ if predicates.iter().any(|(p, _)| {
+ p.as_trait_clause().is_some_and(|trait_pred| {
+ trait_pred.def_id() == sized_trait_def_id
+ && trait_pred.self_ty().skip_binder() == ty
+ })
+ }) {
+ vec![]
+ } else {
+ vec![ty]
+ }
}
Placeholder(..) | Bound(..) | Infer(..) => {
@@ -92,24 +99,25 @@ fn defaultness(tcx: TyCtxt<'_>, def_id: LocalDefId) -> hir::Defaultness {
/// - a tuple of type parameters or projections, if there are multiple
/// such.
/// - an Error, if a type is infinitely sized
-fn adt_sized_constraint(tcx: TyCtxt<'_>, def_id: DefId) -> &[Ty<'_>] {
+fn adt_sized_constraint<'tcx>(
+ tcx: TyCtxt<'tcx>,
+ def_id: DefId,
+) -> ty::EarlyBinder<&'tcx ty::List<Ty<'tcx>>> {
if let Some(def_id) = def_id.as_local() {
if matches!(tcx.representability(def_id), ty::Representability::Infinite) {
- return tcx.mk_type_list(&[Ty::new_misc_error(tcx)]);
+ return ty::EarlyBinder::bind(tcx.mk_type_list(&[Ty::new_misc_error(tcx)]));
}
}
let def = tcx.adt_def(def_id);
- let result = tcx.mk_type_list_from_iter(
- def.variants()
- .iter()
- .filter_map(|v| v.tail_opt())
- .flat_map(|f| sized_constraint_for_ty(tcx, def, tcx.type_of(f.did).subst_identity())),
- );
+ let result =
+ tcx.mk_type_list_from_iter(def.variants().iter().filter_map(|v| v.tail_opt()).flat_map(
+ |f| sized_constraint_for_ty(tcx, def, tcx.type_of(f.did).instantiate_identity()),
+ ));
debug!("adt_sized_constraint: {:?} => {:?}", def, result);
- result
+ ty::EarlyBinder::bind(result)
}
/// See `ParamEnv` struct definition for details.
@@ -131,9 +139,11 @@ fn param_env(tcx: TyCtxt<'_>, def_id: DefId) -> ty::ParamEnv<'_> {
// sure that this will succeed without errors anyway.
if tcx.def_kind(def_id) == DefKind::AssocFn
- && tcx.associated_item(def_id).container == ty::AssocItemContainer::TraitContainer
+ && let assoc_item = tcx.associated_item(def_id)
+ && assoc_item.container == ty::AssocItemContainer::TraitContainer
+ && assoc_item.defaultness(tcx).has_value()
{
- let sig = tcx.fn_sig(def_id).subst_identity();
+ let sig = tcx.fn_sig(def_id).instantiate_identity();
// We accounted for the binder of the fn sig, so skip the binder.
sig.skip_binder().visit_with(&mut ImplTraitInTraitFinder {
tcx,
@@ -146,85 +156,9 @@ fn param_env(tcx: TyCtxt<'_>, def_id: DefId) -> ty::ParamEnv<'_> {
}
let local_did = def_id.as_local();
- // FIXME(-Zlower-impl-trait-in-trait-to-assoc-ty): This isn't correct for
- // RPITITs in const trait fn.
- let hir_id = local_did.and_then(|def_id| tcx.opt_local_def_id_to_hir_id(def_id));
-
- // FIXME(consts): This is not exactly in line with the constness query.
- let constness = match hir_id {
- Some(hir_id) => match tcx.hir().get(hir_id) {
- hir::Node::TraitItem(hir::TraitItem { kind: hir::TraitItemKind::Fn(..), .. })
- if tcx.is_const_default_method(def_id) =>
- {
- hir::Constness::Const
- }
-
- hir::Node::Item(hir::Item { kind: hir::ItemKind::Const(..), .. })
- | hir::Node::Item(hir::Item { kind: hir::ItemKind::Static(..), .. })
- | hir::Node::TraitItem(hir::TraitItem {
- kind: hir::TraitItemKind::Const(..), ..
- })
- | hir::Node::AnonConst(_)
- | hir::Node::ConstBlock(_)
- | hir::Node::ImplItem(hir::ImplItem { kind: hir::ImplItemKind::Const(..), .. })
- | hir::Node::ImplItem(hir::ImplItem {
- kind:
- hir::ImplItemKind::Fn(
- hir::FnSig {
- header: hir::FnHeader { constness: hir::Constness::Const, .. },
- ..
- },
- ..,
- ),
- ..
- }) => hir::Constness::Const,
-
- hir::Node::ImplItem(hir::ImplItem {
- kind: hir::ImplItemKind::Type(..) | hir::ImplItemKind::Fn(..),
- ..
- }) => {
- let parent_hir_id = tcx.hir().parent_id(hir_id);
- match tcx.hir().get(parent_hir_id) {
- hir::Node::Item(hir::Item {
- kind: hir::ItemKind::Impl(hir::Impl { constness, .. }),
- ..
- }) => *constness,
- _ => span_bug!(
- tcx.def_span(parent_hir_id.owner),
- "impl item's parent node is not an impl",
- ),
- }
- }
-
- hir::Node::Item(hir::Item {
- kind:
- hir::ItemKind::Fn(hir::FnSig { header: hir::FnHeader { constness, .. }, .. }, ..),
- ..
- })
- | hir::Node::TraitItem(hir::TraitItem {
- kind:
- hir::TraitItemKind::Fn(
- hir::FnSig { header: hir::FnHeader { constness, .. }, .. },
- ..,
- ),
- ..
- })
- | hir::Node::Item(hir::Item {
- kind: hir::ItemKind::Impl(hir::Impl { constness, .. }),
- ..
- }) => *constness,
-
- _ => hir::Constness::NotConst,
- },
- // FIXME(consts): It's suspicious that a param-env for a foreign item
- // will always have NotConst param-env, though we don't typically use
- // that param-env for anything meaningful right now, so it's likely
- // not an issue.
- None => hir::Constness::NotConst,
- };
let unnormalized_env =
- ty::ParamEnv::new(tcx.mk_clauses(&predicates), traits::Reveal::UserFacing, constness);
+ ty::ParamEnv::new(tcx.mk_clauses(&predicates), traits::Reveal::UserFacing);
let body_id = local_did.unwrap_or(CRATE_DEF_ID);
let cause = traits::ObligationCause::misc(tcx.def_span(def_id), body_id);
@@ -257,8 +191,10 @@ impl<'tcx> TypeVisitor<TyCtxt<'tcx>> for ImplTraitInTraitFinder<'_, 'tcx> {
fn visit_ty(&mut self, ty: Ty<'tcx>) -> std::ops::ControlFlow<Self::BreakTy> {
if let ty::Alias(ty::Projection, unshifted_alias_ty) = *ty.kind()
- && self.tcx.is_impl_trait_in_trait(unshifted_alias_ty.def_id)
- && self.tcx.impl_trait_in_trait_parent_fn(unshifted_alias_ty.def_id) == self.fn_def_id
+ && let Some(ty::ImplTraitInTraitData::Trait { fn_def_id, .. }
+ | ty::ImplTraitInTraitData::Impl { fn_def_id, .. })
+ = self.tcx.opt_rpitit_info(unshifted_alias_ty.def_id)
+ && fn_def_id == self.fn_def_id
&& self.seen.insert(unshifted_alias_ty.def_id)
{
// We have entered some binders as we've walked into the
@@ -282,11 +218,7 @@ impl<'tcx> TypeVisitor<TyCtxt<'tcx>> for ImplTraitInTraitFinder<'_, 'tcx> {
// If we're lowering to associated item, install the opaque type which is just
// the `type_of` of the trait's associated item. If we're using the old lowering
// strategy, then just reinterpret the associated type like an opaque :^)
- let default_ty = if self.tcx.lower_impl_trait_in_trait_to_assoc_ty() {
- self.tcx.type_of(shifted_alias_ty.def_id).subst(self.tcx, shifted_alias_ty.substs)
- } else {
- Ty::new_alias(self.tcx,ty::Opaque, shifted_alias_ty)
- };
+ let default_ty = self.tcx.type_of(shifted_alias_ty.def_id).instantiate(self.tcx, shifted_alias_ty.args);
self.predicates.push(
ty::Binder::bind_with_vars(
@@ -303,7 +235,7 @@ impl<'tcx> TypeVisitor<TyCtxt<'tcx>> for ImplTraitInTraitFinder<'_, 'tcx> {
for bound in self
.tcx
.item_bounds(unshifted_alias_ty.def_id)
- .subst_iter(self.tcx, unshifted_alias_ty.substs)
+ .iter_instantiated(self.tcx, unshifted_alias_ty.args)
{
bound.visit_with(self);
}
@@ -317,22 +249,6 @@ fn param_env_reveal_all_normalized(tcx: TyCtxt<'_>, def_id: DefId) -> ty::ParamE
tcx.param_env(def_id).with_reveal_all_normalized(tcx)
}
-fn instance_def_size_estimate<'tcx>(
- tcx: TyCtxt<'tcx>,
- instance_def: ty::InstanceDef<'tcx>,
-) -> usize {
- use ty::InstanceDef;
-
- match instance_def {
- InstanceDef::Item(..) | InstanceDef::DropGlue(..) => {
- let mir = tcx.instance_mir(instance_def);
- mir.basic_blocks.iter().map(|bb| bb.statements.len() + 1).sum()
- }
- // Estimate the size of other compiler-generated shims to be 1.
- _ => 1,
- }
-}
-
/// If `def_id` is an issue 33140 hack impl, returns its self type; otherwise, returns `None`.
///
/// See [`ty::ImplOverlapKind::Issue33140`] for more details.
@@ -356,8 +272,8 @@ fn issue33140_self_ty(tcx: TyCtxt<'_>, def_id: DefId) -> Option<EarlyBinder<Ty<'
}
// impl must be `impl Trait for dyn Marker1 + Marker2 + ...`
- if trait_ref.substs.len() != 1 {
- debug!("issue33140_self_ty - impl has substs!");
+ if trait_ref.args.len() != 1 {
+ debug!("issue33140_self_ty - impl has args!");
return None;
}
@@ -408,14 +324,12 @@ fn unsizing_params_for_adt<'tcx>(tcx: TyCtxt<'tcx>, def_id: DefId) -> BitSet<u32
};
// The last field of the structure has to exist and contain type/const parameters.
- let Some((tail_field, prefix_fields)) =
- def.non_enum_variant().fields.raw.split_last() else
- {
+ let Some((tail_field, prefix_fields)) = def.non_enum_variant().fields.raw.split_last() else {
return BitSet::new_empty(num_params);
};
let mut unsizing_params = BitSet::new_empty(num_params);
- for arg in tcx.type_of(tail_field.did).subst_identity().walk() {
+ for arg in tcx.type_of(tail_field.did).instantiate_identity().walk() {
if let Some(i) = maybe_unsizing_param_idx(arg) {
unsizing_params.insert(i);
}
@@ -424,7 +338,7 @@ fn unsizing_params_for_adt<'tcx>(tcx: TyCtxt<'tcx>, def_id: DefId) -> BitSet<u32
// Ensure none of the other fields mention the parameters used
// in unsizing.
for field in prefix_fields {
- for arg in tcx.type_of(field.did).subst_identity().walk() {
+ for arg in tcx.type_of(field.did).instantiate_identity().walk() {
if let Some(i) = maybe_unsizing_param_idx(arg) {
unsizing_params.remove(i);
}
@@ -440,7 +354,6 @@ pub fn provide(providers: &mut Providers) {
adt_sized_constraint,
param_env,
param_env_reveal_all_normalized,
- instance_def_size_estimate,
issue33140_self_ty,
defaultness,
unsizing_params_for_adt,
diff --git a/compiler/rustc_type_ir/src/lib.rs b/compiler/rustc_type_ir/src/lib.rs
index 878a6b784..b0f8ea7a0 100644
--- a/compiler/rustc_type_ir/src/lib.rs
+++ b/compiler/rustc_type_ir/src/lib.rs
@@ -6,6 +6,7 @@
#![feature(unwrap_infallible)]
#![deny(rustc::untranslatable_diagnostic)]
#![deny(rustc::diagnostic_outside_of_impl)]
+#![cfg_attr(not(bootstrap), allow(internal_features))]
#[macro_use]
extern crate bitflags;
@@ -31,6 +32,7 @@ mod macros;
mod structural_impls;
pub use codec::*;
+pub use structural_impls::{DebugWithInfcx, InferCtxtLike, OptWithInfcx};
pub use sty::*;
pub use ty_info::*;
@@ -39,41 +41,41 @@ pub trait HashStableContext {}
pub trait Interner: Sized {
type AdtDef: Clone + Debug + Hash + Ord;
- type SubstsRef: Clone + Debug + Hash + Ord;
+ type GenericArgsRef: Clone + DebugWithInfcx<Self> + Hash + Ord;
type DefId: Clone + Debug + Hash + Ord;
type Binder<T>;
- type Ty: Clone + Debug + Hash + Ord;
- type Const: Clone + Debug + Hash + Ord;
- type Region: Clone + Debug + Hash + Ord;
+ type Ty: Clone + DebugWithInfcx<Self> + Hash + Ord;
+ type Const: Clone + DebugWithInfcx<Self> + Hash + Ord;
+ type Region: Clone + DebugWithInfcx<Self> + Hash + Ord;
type Predicate;
type TypeAndMut: Clone + Debug + Hash + Ord;
type Mutability: Clone + Debug + Hash + Ord;
type Movability: Clone + Debug + Hash + Ord;
- type PolyFnSig: Clone + Debug + Hash + Ord;
- type ListBinderExistentialPredicate: Clone + Debug + Hash + Ord;
- type BinderListTy: Clone + Debug + Hash + Ord;
+ type PolyFnSig: Clone + DebugWithInfcx<Self> + Hash + Ord;
+ type ListBinderExistentialPredicate: Clone + DebugWithInfcx<Self> + Hash + Ord;
+ type BinderListTy: Clone + DebugWithInfcx<Self> + Hash + Ord;
type ListTy: Clone + Debug + Hash + Ord + IntoIterator<Item = Self::Ty>;
- type AliasTy: Clone + Debug + Hash + Ord;
+ type AliasTy: Clone + DebugWithInfcx<Self> + Hash + Ord;
type ParamTy: Clone + Debug + Hash + Ord;
type BoundTy: Clone + Debug + Hash + Ord;
type PlaceholderType: Clone + Debug + Hash + Ord;
+ type InferTy: Clone + DebugWithInfcx<Self> + Hash + Ord;
type ErrorGuaranteed: Clone + Debug + Hash + Ord;
type PredicateKind: Clone + Debug + Hash + PartialEq + Eq;
type AllocId: Clone + Debug + Hash + Ord;
- type InferConst: Clone + Debug + Hash + Ord;
- type AliasConst: Clone + Debug + Hash + Ord;
+ type InferConst: Clone + DebugWithInfcx<Self> + Hash + Ord;
+ type AliasConst: Clone + DebugWithInfcx<Self> + Hash + Ord;
type PlaceholderConst: Clone + Debug + Hash + Ord;
type ParamConst: Clone + Debug + Hash + Ord;
type BoundConst: Clone + Debug + Hash + Ord;
- type InferTy: Clone + Debug + Hash + Ord;
type ValueConst: Clone + Debug + Hash + Ord;
- type ExprConst: Clone + Debug + Hash + Ord;
+ type ExprConst: Clone + DebugWithInfcx<Self> + Hash + Ord;
type EarlyBoundRegion: Clone + Debug + Hash + Ord;
type BoundRegion: Clone + Debug + Hash + Ord;
type FreeRegion: Clone + Debug + Hash + Ord;
- type RegionVid: Clone + Debug + Hash + Ord;
+ type RegionVid: Clone + DebugWithInfcx<Self> + Hash + Ord;
type PlaceholderRegion: Clone + Debug + Hash + Ord;
fn ty_and_mut_to_parts(ty_and_mut: Self::TypeAndMut) -> (Self::Ty, Self::Mutability);
@@ -214,6 +216,11 @@ bitflags! {
/// Does this have `ConstKind::Placeholder`?
const HAS_CT_PLACEHOLDER = 1 << 8;
+ /// Does this have placeholders?
+ const HAS_PLACEHOLDER = TypeFlags::HAS_TY_PLACEHOLDER.bits
+ | TypeFlags::HAS_RE_PLACEHOLDER.bits
+ | TypeFlags::HAS_CT_PLACEHOLDER.bits;
+
/// `true` if there are "names" of regions and so forth
/// that are local to a particular fn/inferctxt
const HAS_FREE_LOCAL_REGIONS = 1 << 9;
@@ -407,7 +414,7 @@ pub fn debug_bound_var<T: std::fmt::Write>(
var: impl std::fmt::Debug,
) -> Result<(), std::fmt::Error> {
if debruijn == INNERMOST {
- write!(fmt, "^{:?}", var)
+ write!(fmt, "^{var:?}")
} else {
write!(fmt, "^{}_{:?}", debruijn.index(), var)
}
@@ -775,20 +782,6 @@ impl fmt::Debug for FloatVid {
}
}
-impl fmt::Debug for InferTy {
- fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
- use InferTy::*;
- match *self {
- TyVar(ref v) => v.fmt(f),
- IntVar(ref v) => v.fmt(f),
- FloatVar(ref v) => v.fmt(f),
- FreshTy(v) => write!(f, "FreshTy({v:?})"),
- FreshIntTy(v) => write!(f, "FreshIntTy({v:?})"),
- FreshFloatTy(v) => write!(f, "FreshFloatTy({v:?})"),
- }
- }
-}
-
impl fmt::Debug for Variance {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.write_str(match *self {
diff --git a/compiler/rustc_type_ir/src/structural_impls.rs b/compiler/rustc_type_ir/src/structural_impls.rs
index 1e42175f6..f36f4ec86 100644
--- a/compiler/rustc_type_ir/src/structural_impls.rs
+++ b/compiler/rustc_type_ir/src/structural_impls.rs
@@ -4,12 +4,13 @@
use crate::fold::{FallibleTypeFolder, TypeFoldable};
use crate::visit::{TypeVisitable, TypeVisitor};
-use crate::{ConstKind, FloatTy, IntTy, Interner, UintTy};
+use crate::{ConstKind, FloatTy, InferTy, IntTy, Interner, UintTy, UniverseIndex};
use rustc_data_structures::functor::IdFunctor;
use rustc_data_structures::sync::Lrc;
use rustc_index::{Idx, IndexVec};
use core::fmt;
+use std::marker::PhantomData;
use std::ops::ControlFlow;
///////////////////////////////////////////////////////////////////////////
@@ -22,7 +23,6 @@ TrivialTypeTraversalImpls! {
(),
bool,
usize,
- u8,
u16,
u32,
u64,
@@ -165,6 +165,116 @@ impl<I: Interner, T: TypeVisitable<I>, Ix: Idx> TypeVisitable<I> for IndexVec<Ix
}
}
+///////////////////////////////////////////////////
+// Debug impls
+
+pub trait InferCtxtLike<I: Interner> {
+ fn universe_of_ty(&self, ty: I::InferTy) -> Option<UniverseIndex>;
+ fn universe_of_lt(&self, lt: I::RegionVid) -> Option<UniverseIndex>;
+ fn universe_of_ct(&self, ct: I::InferConst) -> Option<UniverseIndex>;
+}
+
+impl<I: Interner> InferCtxtLike<I> for core::convert::Infallible {
+ fn universe_of_ty(&self, _ty: <I as Interner>::InferTy) -> Option<UniverseIndex> {
+ match *self {}
+ }
+ fn universe_of_ct(&self, _ct: <I as Interner>::InferConst) -> Option<UniverseIndex> {
+ match *self {}
+ }
+ fn universe_of_lt(&self, _lt: <I as Interner>::RegionVid) -> Option<UniverseIndex> {
+ match *self {}
+ }
+}
+
+pub trait DebugWithInfcx<I: Interner>: fmt::Debug {
+ fn fmt<InfCtx: InferCtxtLike<I>>(
+ this: OptWithInfcx<'_, I, InfCtx, &Self>,
+ f: &mut fmt::Formatter<'_>,
+ ) -> fmt::Result;
+}
+
+impl<I: Interner, T: DebugWithInfcx<I> + ?Sized> DebugWithInfcx<I> for &'_ T {
+ fn fmt<InfCtx: InferCtxtLike<I>>(
+ this: OptWithInfcx<'_, I, InfCtx, &Self>,
+ f: &mut fmt::Formatter<'_>,
+ ) -> fmt::Result {
+ <T as DebugWithInfcx<I>>::fmt(this.map(|&data| data), f)
+ }
+}
+impl<I: Interner, T: DebugWithInfcx<I>> DebugWithInfcx<I> for [T] {
+ fn fmt<InfCtx: InferCtxtLike<I>>(
+ this: OptWithInfcx<'_, I, InfCtx, &Self>,
+ f: &mut fmt::Formatter<'_>,
+ ) -> fmt::Result {
+ match f.alternate() {
+ true => {
+ write!(f, "[\n")?;
+ for element in this.data.iter() {
+ write!(f, "{:?},\n", &this.wrap(element))?;
+ }
+ write!(f, "]")
+ }
+ false => {
+ write!(f, "[")?;
+ if this.data.len() > 0 {
+ for element in &this.data[..(this.data.len() - 1)] {
+ write!(f, "{:?}, ", &this.wrap(element))?;
+ }
+ if let Some(element) = this.data.last() {
+ write!(f, "{:?}", &this.wrap(element))?;
+ }
+ }
+ write!(f, "]")
+ }
+ }
+ }
+}
+
+pub struct OptWithInfcx<'a, I: Interner, InfCtx: InferCtxtLike<I>, T> {
+ pub data: T,
+ pub infcx: Option<&'a InfCtx>,
+ _interner: PhantomData<I>,
+}
+
+impl<I: Interner, InfCtx: InferCtxtLike<I>, T: Copy> Copy for OptWithInfcx<'_, I, InfCtx, T> {}
+impl<I: Interner, InfCtx: InferCtxtLike<I>, T: Clone> Clone for OptWithInfcx<'_, I, InfCtx, T> {
+ fn clone(&self) -> Self {
+ Self { data: self.data.clone(), infcx: self.infcx, _interner: self._interner }
+ }
+}
+
+impl<'a, I: Interner, T> OptWithInfcx<'a, I, core::convert::Infallible, T> {
+ pub fn new_no_ctx(data: T) -> Self {
+ Self { data, infcx: None, _interner: PhantomData }
+ }
+}
+
+impl<'a, I: Interner, InfCtx: InferCtxtLike<I>, T> OptWithInfcx<'a, I, InfCtx, T> {
+ pub fn new(data: T, infcx: &'a InfCtx) -> Self {
+ Self { data, infcx: Some(infcx), _interner: PhantomData }
+ }
+
+ pub fn wrap<U>(self, u: U) -> OptWithInfcx<'a, I, InfCtx, U> {
+ OptWithInfcx { data: u, infcx: self.infcx, _interner: PhantomData }
+ }
+
+ pub fn map<U>(self, f: impl FnOnce(T) -> U) -> OptWithInfcx<'a, I, InfCtx, U> {
+ OptWithInfcx { data: f(self.data), infcx: self.infcx, _interner: PhantomData }
+ }
+
+ pub fn as_ref(&self) -> OptWithInfcx<'a, I, InfCtx, &T> {
+ OptWithInfcx { data: &self.data, infcx: self.infcx, _interner: PhantomData }
+ }
+}
+
+impl<I: Interner, InfCtx: InferCtxtLike<I>, T: DebugWithInfcx<I>> fmt::Debug
+ for OptWithInfcx<'_, I, InfCtx, T>
+{
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ DebugWithInfcx::fmt(self.as_ref(), f)
+ }
+}
+
impl fmt::Debug for IntTy {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
write!(f, "{}", self.name_str())
@@ -183,20 +293,60 @@ impl fmt::Debug for FloatTy {
}
}
+impl fmt::Debug for InferTy {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ use InferTy::*;
+ match *self {
+ TyVar(ref v) => v.fmt(f),
+ IntVar(ref v) => v.fmt(f),
+ FloatVar(ref v) => v.fmt(f),
+ FreshTy(v) => write!(f, "FreshTy({v:?})"),
+ FreshIntTy(v) => write!(f, "FreshIntTy({v:?})"),
+ FreshFloatTy(v) => write!(f, "FreshFloatTy({v:?})"),
+ }
+ }
+}
+impl<I: Interner<InferTy = InferTy>> DebugWithInfcx<I> for InferTy {
+ fn fmt<InfCtx: InferCtxtLike<I>>(
+ this: OptWithInfcx<'_, I, InfCtx, &Self>,
+ f: &mut fmt::Formatter<'_>,
+ ) -> fmt::Result {
+ use InferTy::*;
+ match this.infcx.and_then(|infcx| infcx.universe_of_ty(*this.data)) {
+ None => write!(f, "{:?}", this.data),
+ Some(universe) => match *this.data {
+ TyVar(ty_vid) => write!(f, "?{}_{}t", ty_vid.index(), universe.index()),
+ IntVar(_) | FloatVar(_) | FreshTy(_) | FreshIntTy(_) | FreshFloatTy(_) => {
+ unreachable!()
+ }
+ },
+ }
+ }
+}
+
impl<I: Interner> fmt::Debug for ConstKind<I> {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
+ OptWithInfcx::new_no_ctx(self).fmt(f)
+ }
+}
+impl<I: Interner> DebugWithInfcx<I> for ConstKind<I> {
+ fn fmt<InfCtx: InferCtxtLike<I>>(
+ this: OptWithInfcx<'_, I, InfCtx, &Self>,
+ f: &mut core::fmt::Formatter<'_>,
+ ) -> core::fmt::Result {
use ConstKind::*;
- match self {
+
+ match this.data {
Param(param) => write!(f, "{param:?}"),
- Infer(var) => write!(f, "{var:?}"),
+ Infer(var) => write!(f, "{:?}", &this.wrap(var)),
Bound(debruijn, var) => crate::debug_bound_var(f, *debruijn, var.clone()),
Placeholder(placeholder) => write!(f, "{placeholder:?}"),
Unevaluated(uv) => {
- write!(f, "{uv:?}")
+ write!(f, "{:?}", &this.wrap(uv))
}
Value(valtree) => write!(f, "{valtree:?}"),
Error(_) => write!(f, "{{const error}}"),
- Expr(expr) => write!(f, "{expr:?}"),
+ Expr(expr) => write!(f, "{:?}", &this.wrap(expr)),
}
}
}
diff --git a/compiler/rustc_type_ir/src/sty.rs b/compiler/rustc_type_ir/src/sty.rs
index b696f9b9b..72bd50ace 100644
--- a/compiler/rustc_type_ir/src/sty.rs
+++ b/compiler/rustc_type_ir/src/sty.rs
@@ -3,7 +3,6 @@
use std::cmp::Ordering;
use std::{fmt, hash};
-use crate::DebruijnIndex;
use crate::FloatTy;
use crate::HashStableContext;
use crate::IntTy;
@@ -11,6 +10,7 @@ use crate::Interner;
use crate::TyDecoder;
use crate::TyEncoder;
use crate::UintTy;
+use crate::{DebruijnIndex, DebugWithInfcx, InferCtxtLike, OptWithInfcx};
use self::RegionKind::*;
use self::TyKind::*;
@@ -39,6 +39,7 @@ pub enum AliasKind {
/// A projection `<Type as Trait>::AssocType`.
/// Can get normalized away if monomorphic enough.
Projection,
+ /// An associated type in an inherent `impl`
Inherent,
/// An opaque type (usually from `impl Trait` in type aliases or function return types)
/// Can only be normalized away in RevealAll mode
@@ -74,11 +75,11 @@ pub enum TyKind<I: Interner> {
/// Algebraic data types (ADT). For example: structures, enumerations and unions.
///
/// For example, the type `List<i32>` would be represented using the `AdtDef`
- /// for `struct List<T>` and the substs `[i32]`.
+ /// for `struct List<T>` and the args `[i32]`.
///
/// Note that generic parameters in fields only get lazily substituted
- /// by using something like `adt_def.all_fields().map(|field| field.ty(tcx, substs))`.
- Adt(I::AdtDef, I::SubstsRef),
+ /// by using something like `adt_def.all_fields().map(|field| field.ty(tcx, args))`.
+ Adt(I::AdtDef, I::GenericArgsRef),
/// An unsized FFI type that is opaque to Rust. Written as `extern type T`.
Foreign(I::DefId),
@@ -110,7 +111,7 @@ pub enum TyKind<I: Interner> {
/// fn foo() -> i32 { 1 }
/// let bar = foo; // bar: fn() -> i32 {foo}
/// ```
- FnDef(I::DefId, I::SubstsRef),
+ FnDef(I::DefId, I::GenericArgsRef),
/// A pointer to a function. Written as `fn() -> i32`.
///
@@ -130,20 +131,20 @@ pub enum TyKind<I: Interner> {
/// The anonymous type of a closure. Used to represent the type of `|a| a`.
///
- /// Closure substs contain both the - potentially substituted - generic parameters
+ /// Closure args contain both the - potentially substituted - generic parameters
/// of its parent and some synthetic parameters. See the documentation for
- /// `ClosureSubsts` for more details.
- Closure(I::DefId, I::SubstsRef),
+ /// `ClosureArgs` for more details.
+ Closure(I::DefId, I::GenericArgsRef),
/// The anonymous type of a generator. Used to represent the type of
/// `|a| yield a`.
///
- /// For more info about generator substs, visit the documentation for
- /// `GeneratorSubsts`.
- Generator(I::DefId, I::SubstsRef, I::Movability),
+ /// For more info about generator args, visit the documentation for
+ /// `GeneratorArgs`.
+ Generator(I::DefId, I::GenericArgsRef, I::Movability),
/// A type representing the types stored inside a generator.
- /// This should only appear as part of the `GeneratorSubsts`.
+ /// This should only appear as part of the `GeneratorArgs`.
///
/// Note that the captured variables for generators are stored separately
/// using a tuple in the same way as for closures.
@@ -168,7 +169,7 @@ pub enum TyKind<I: Interner> {
GeneratorWitness(I::BinderListTy),
/// A type representing the types stored inside a generator.
- /// This should only appear as part of the `GeneratorSubsts`.
+ /// This should only appear as part of the `GeneratorArgs`.
///
/// Unlike upvars, the witness can reference lifetimes from
/// inside of the generator itself. To deal with them in
@@ -176,7 +177,7 @@ pub enum TyKind<I: Interner> {
/// lifetimes bound by the witness itself.
///
/// This variant is only using when `drop_tracking_mir` is set.
- /// This contains the `DefId` and the `SubstsRef` of the generator.
+ /// This contains the `DefId` and the `GenericArgsRef` of the generator.
/// The actual witness types are computed on MIR by the `mir_generator_witnesses` query.
///
/// Looking at the following example, the witness for this generator
@@ -191,7 +192,7 @@ pub enum TyKind<I: Interner> {
/// }
/// # ;
/// ```
- GeneratorWitnessMIR(I::DefId, I::SubstsRef),
+ GeneratorWitnessMIR(I::DefId, I::GenericArgsRef),
/// The never type `!`.
Never,
@@ -199,7 +200,9 @@ pub enum TyKind<I: Interner> {
/// A tuple type. For example, `(i32, bool)`.
Tuple(I::ListTy),
- /// A projection or opaque type. Both of these types
+ /// A projection, opaque type, weak type alias, or inherent associated type.
+ /// All of these types are represented as pairs of def-id and args, and can
+ /// be normalized, so they are grouped conceptually.
Alias(AliasKind, I::AliasTy),
/// A type parameter; for example, `T` in `fn f<T>(x: T) {}`.
@@ -503,42 +506,48 @@ impl<I: Interner> hash::Hash for TyKind<I> {
}
}
-// This is manually implemented because a derive would require `I: Debug`
-impl<I: Interner> fmt::Debug for TyKind<I> {
- fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
- match self {
+impl<I: Interner> DebugWithInfcx<I> for TyKind<I> {
+ fn fmt<InfCtx: InferCtxtLike<I>>(
+ this: OptWithInfcx<'_, I, InfCtx, &Self>,
+ f: &mut core::fmt::Formatter<'_>,
+ ) -> fmt::Result {
+ match this.data {
Bool => write!(f, "bool"),
Char => write!(f, "char"),
Int(i) => write!(f, "{i:?}"),
Uint(u) => write!(f, "{u:?}"),
Float(float) => write!(f, "{float:?}"),
- Adt(d, s) => f.debug_tuple_field2_finish("Adt", d, s),
+ Adt(d, s) => f.debug_tuple_field2_finish("Adt", d, &this.wrap(s)),
Foreign(d) => f.debug_tuple_field1_finish("Foreign", d),
Str => write!(f, "str"),
- Array(t, c) => write!(f, "[{t:?}; {c:?}]"),
- Slice(t) => write!(f, "[{t:?}]"),
+ Array(t, c) => write!(f, "[{:?}; {:?}]", &this.wrap(t), &this.wrap(c)),
+ Slice(t) => write!(f, "[{:?}]", &this.wrap(t)),
RawPtr(p) => {
let (ty, mutbl) = I::ty_and_mut_to_parts(p.clone());
match I::mutability_is_mut(mutbl) {
true => write!(f, "*mut "),
false => write!(f, "*const "),
}?;
- write!(f, "{ty:?}")
+ write!(f, "{:?}", &this.wrap(ty))
}
Ref(r, t, m) => match I::mutability_is_mut(m.clone()) {
- true => write!(f, "&{r:?} mut {t:?}"),
- false => write!(f, "&{r:?} {t:?}"),
+ true => write!(f, "&{:?} mut {:?}", &this.wrap(r), &this.wrap(t)),
+ false => write!(f, "&{:?} {:?}", &this.wrap(r), &this.wrap(t)),
},
- FnDef(d, s) => f.debug_tuple_field2_finish("FnDef", d, s),
- FnPtr(s) => write!(f, "{s:?}"),
+ FnDef(d, s) => f.debug_tuple_field2_finish("FnDef", d, &this.wrap(s)),
+ FnPtr(s) => write!(f, "{:?}", &this.wrap(s)),
Dynamic(p, r, repr) => match repr {
- DynKind::Dyn => write!(f, "dyn {p:?} + {r:?}"),
- DynKind::DynStar => write!(f, "dyn* {p:?} + {r:?}"),
+ DynKind::Dyn => write!(f, "dyn {:?} + {:?}", &this.wrap(p), &this.wrap(r)),
+ DynKind::DynStar => {
+ write!(f, "dyn* {:?} + {:?}", &this.wrap(p), &this.wrap(r))
+ }
},
- Closure(d, s) => f.debug_tuple_field2_finish("Closure", d, s),
- Generator(d, s, m) => f.debug_tuple_field3_finish("Generator", d, s, m),
- GeneratorWitness(g) => f.debug_tuple_field1_finish("GeneratorWitness", g),
- GeneratorWitnessMIR(d, s) => f.debug_tuple_field2_finish("GeneratorWitnessMIR", d, s),
+ Closure(d, s) => f.debug_tuple_field2_finish("Closure", d, &this.wrap(s)),
+ Generator(d, s, m) => f.debug_tuple_field3_finish("Generator", d, &this.wrap(s), m),
+ GeneratorWitness(g) => f.debug_tuple_field1_finish("GeneratorWitness", &this.wrap(g)),
+ GeneratorWitnessMIR(d, s) => {
+ f.debug_tuple_field2_finish("GeneratorWitnessMIR", d, &this.wrap(s))
+ }
Never => write!(f, "!"),
Tuple(t) => {
let mut iter = t.clone().into_iter();
@@ -547,35 +556,41 @@ impl<I: Interner> fmt::Debug for TyKind<I> {
match iter.next() {
None => return write!(f, ")"),
- Some(ty) => write!(f, "{ty:?}")?,
+ Some(ty) => write!(f, "{:?}", &this.wrap(ty))?,
};
match iter.next() {
None => return write!(f, ",)"),
- Some(ty) => write!(f, "{ty:?})")?,
+ Some(ty) => write!(f, "{:?})", &this.wrap(ty))?,
}
for ty in iter {
- write!(f, ", {ty:?}")?;
+ write!(f, ", {:?}", &this.wrap(ty))?;
}
write!(f, ")")
}
- Alias(i, a) => f.debug_tuple_field2_finish("Alias", i, a),
+ Alias(i, a) => f.debug_tuple_field2_finish("Alias", i, &this.wrap(a)),
Param(p) => write!(f, "{p:?}"),
Bound(d, b) => crate::debug_bound_var(f, *d, b),
Placeholder(p) => write!(f, "{p:?}"),
- Infer(t) => write!(f, "{t:?}"),
+ Infer(t) => write!(f, "{:?}", this.wrap(t)),
TyKind::Error(_) => write!(f, "{{type error}}"),
}
}
}
+// This is manually implemented because a derive would require `I: Debug`
+impl<I: Interner> fmt::Debug for TyKind<I> {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ OptWithInfcx::new_no_ctx(self).fmt(f)
+ }
+}
// This is manually implemented because a derive would require `I: Encodable`
impl<I: Interner, E: TyEncoder> Encodable<E> for TyKind<I>
where
I::ErrorGuaranteed: Encodable<E>,
I::AdtDef: Encodable<E>,
- I::SubstsRef: Encodable<E>,
+ I::GenericArgsRef: Encodable<E>,
I::DefId: Encodable<E>,
I::Ty: Encodable<E>,
I::Const: Encodable<E>,
@@ -609,9 +624,9 @@ where
Float(f) => e.emit_enum_variant(disc, |e| {
f.encode(e);
}),
- Adt(adt, substs) => e.emit_enum_variant(disc, |e| {
+ Adt(adt, args) => e.emit_enum_variant(disc, |e| {
adt.encode(e);
- substs.encode(e);
+ args.encode(e);
}),
Foreign(def_id) => e.emit_enum_variant(disc, |e| {
def_id.encode(e);
@@ -632,9 +647,9 @@ where
t.encode(e);
m.encode(e);
}),
- FnDef(def_id, substs) => e.emit_enum_variant(disc, |e| {
+ FnDef(def_id, args) => e.emit_enum_variant(disc, |e| {
def_id.encode(e);
- substs.encode(e);
+ args.encode(e);
}),
FnPtr(polyfnsig) => e.emit_enum_variant(disc, |e| {
polyfnsig.encode(e);
@@ -644,25 +659,25 @@ where
r.encode(e);
repr.encode(e);
}),
- Closure(def_id, substs) => e.emit_enum_variant(disc, |e| {
+ Closure(def_id, args) => e.emit_enum_variant(disc, |e| {
def_id.encode(e);
- substs.encode(e);
+ args.encode(e);
}),
- Generator(def_id, substs, m) => e.emit_enum_variant(disc, |e| {
+ Generator(def_id, args, m) => e.emit_enum_variant(disc, |e| {
def_id.encode(e);
- substs.encode(e);
+ args.encode(e);
m.encode(e);
}),
GeneratorWitness(b) => e.emit_enum_variant(disc, |e| {
b.encode(e);
}),
- GeneratorWitnessMIR(def_id, substs) => e.emit_enum_variant(disc, |e| {
+ GeneratorWitnessMIR(def_id, args) => e.emit_enum_variant(disc, |e| {
def_id.encode(e);
- substs.encode(e);
+ args.encode(e);
}),
Never => e.emit_enum_variant(disc, |_| {}),
- Tuple(substs) => e.emit_enum_variant(disc, |e| {
- substs.encode(e);
+ Tuple(args) => e.emit_enum_variant(disc, |e| {
+ args.encode(e);
}),
Alias(k, p) => e.emit_enum_variant(disc, |e| {
k.encode(e);
@@ -693,7 +708,7 @@ impl<I: Interner, D: TyDecoder<I = I>> Decodable<D> for TyKind<I>
where
I::ErrorGuaranteed: Decodable<D>,
I::AdtDef: Decodable<D>,
- I::SubstsRef: Decodable<D>,
+ I::GenericArgsRef: Decodable<D>,
I::DefId: Decodable<D>,
I::Ty: Decodable<D>,
I::Const: Decodable<D>,
@@ -760,7 +775,7 @@ impl<CTX: HashStableContext, I: Interner> HashStable<CTX> for TyKind<I>
where
I::AdtDef: HashStable<CTX>,
I::DefId: HashStable<CTX>,
- I::SubstsRef: HashStable<CTX>,
+ I::GenericArgsRef: HashStable<CTX>,
I::Ty: HashStable<CTX>,
I::Const: HashStable<CTX>,
I::TypeAndMut: HashStable<CTX>,
@@ -797,9 +812,9 @@ where
Float(f) => {
f.hash_stable(__hcx, __hasher);
}
- Adt(adt, substs) => {
+ Adt(adt, args) => {
adt.hash_stable(__hcx, __hasher);
- substs.hash_stable(__hcx, __hasher);
+ args.hash_stable(__hcx, __hasher);
}
Foreign(def_id) => {
def_id.hash_stable(__hcx, __hasher);
@@ -820,9 +835,9 @@ where
t.hash_stable(__hcx, __hasher);
m.hash_stable(__hcx, __hasher);
}
- FnDef(def_id, substs) => {
+ FnDef(def_id, args) => {
def_id.hash_stable(__hcx, __hasher);
- substs.hash_stable(__hcx, __hasher);
+ args.hash_stable(__hcx, __hasher);
}
FnPtr(polyfnsig) => {
polyfnsig.hash_stable(__hcx, __hasher);
@@ -832,25 +847,25 @@ where
r.hash_stable(__hcx, __hasher);
repr.hash_stable(__hcx, __hasher);
}
- Closure(def_id, substs) => {
+ Closure(def_id, args) => {
def_id.hash_stable(__hcx, __hasher);
- substs.hash_stable(__hcx, __hasher);
+ args.hash_stable(__hcx, __hasher);
}
- Generator(def_id, substs, m) => {
+ Generator(def_id, args, m) => {
def_id.hash_stable(__hcx, __hasher);
- substs.hash_stable(__hcx, __hasher);
+ args.hash_stable(__hcx, __hasher);
m.hash_stable(__hcx, __hasher);
}
GeneratorWitness(b) => {
b.hash_stable(__hcx, __hasher);
}
- GeneratorWitnessMIR(def_id, substs) => {
+ GeneratorWitnessMIR(def_id, args) => {
def_id.hash_stable(__hcx, __hasher);
- substs.hash_stable(__hcx, __hasher);
+ args.hash_stable(__hcx, __hasher);
}
Never => {}
- Tuple(substs) => {
- substs.hash_stable(__hcx, __hasher);
+ Tuple(args) => {
+ args.hash_stable(__hcx, __hasher);
}
Alias(k, p) => {
k.hash_stable(__hcx, __hasher);
@@ -1155,7 +1170,7 @@ impl<I: Interner> Clone for ConstKind<I> {
/// These are regions that are stored behind a binder and must be substituted
/// with some concrete region before being used. There are two kind of
/// bound regions: early-bound, which are bound in an item's `Generics`,
-/// and are substituted by an `InternalSubsts`, and late-bound, which are part of
+/// and are substituted by an `GenericArgs`, and late-bound, which are part of
/// higher-ranked types (e.g., `for<'a> fn(&'a ())`), and are substituted by
/// the likes of `liberate_late_bound_regions`. The distinction exists
/// because higher-ranked lifetimes aren't supported in all places. See [1][2].
@@ -1356,21 +1371,23 @@ impl<I: Interner> hash::Hash for RegionKind<I> {
}
}
-// This is manually implemented because a derive would require `I: Debug`
-impl<I: Interner> fmt::Debug for RegionKind<I> {
- fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
- match self {
+impl<I: Interner> DebugWithInfcx<I> for RegionKind<I> {
+ fn fmt<InfCtx: InferCtxtLike<I>>(
+ this: OptWithInfcx<'_, I, InfCtx, &Self>,
+ f: &mut core::fmt::Formatter<'_>,
+ ) -> core::fmt::Result {
+ match this.data {
ReEarlyBound(data) => write!(f, "ReEarlyBound({data:?})"),
ReLateBound(binder_id, bound_region) => {
write!(f, "ReLateBound({binder_id:?}, {bound_region:?})")
}
- ReFree(fr) => fr.fmt(f),
+ ReFree(fr) => write!(f, "{fr:?}"),
ReStatic => f.write_str("ReStatic"),
- ReVar(vid) => vid.fmt(f),
+ ReVar(vid) => write!(f, "{:?}", &this.wrap(vid)),
RePlaceholder(placeholder) => write!(f, "RePlaceholder({placeholder:?})"),
@@ -1380,6 +1397,11 @@ impl<I: Interner> fmt::Debug for RegionKind<I> {
}
}
}
+impl<I: Interner> fmt::Debug for RegionKind<I> {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ OptWithInfcx::new_no_ctx(self).fmt(f)
+ }
+}
// This is manually implemented because a derive would require `I: Encodable`
impl<I: Interner, E: TyEncoder> Encodable<E> for RegionKind<I>